xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: if_iwx.c,v 1.119 2021/10/15 13:38:10 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 
179 const struct iwx_rate {
180 	uint16_t rate;
181 	uint8_t plcp;
182 	uint8_t ht_plcp;
183 } iwx_rates[] = {
184 		/* Legacy */		/* HT */
185 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
186 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
187 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
188 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
189 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
190 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
191 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
192 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
193 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
194 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
195 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
196 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
197 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
198 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
199 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
200 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
201 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
202 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
203 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
204 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
205 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
206 };
207 #define IWX_RIDX_CCK	0
208 #define IWX_RIDX_OFDM	4
209 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
210 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
211 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
212 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
213 
214 /* Convert an MCS index into an iwx_rates[] index. */
215 const int iwx_mcs2ridx[] = {
216 	IWX_RATE_MCS_0_INDEX,
217 	IWX_RATE_MCS_1_INDEX,
218 	IWX_RATE_MCS_2_INDEX,
219 	IWX_RATE_MCS_3_INDEX,
220 	IWX_RATE_MCS_4_INDEX,
221 	IWX_RATE_MCS_5_INDEX,
222 	IWX_RATE_MCS_6_INDEX,
223 	IWX_RATE_MCS_7_INDEX,
224 	IWX_RATE_MCS_8_INDEX,
225 	IWX_RATE_MCS_9_INDEX,
226 	IWX_RATE_MCS_10_INDEX,
227 	IWX_RATE_MCS_11_INDEX,
228 	IWX_RATE_MCS_12_INDEX,
229 	IWX_RATE_MCS_13_INDEX,
230 	IWX_RATE_MCS_14_INDEX,
231 	IWX_RATE_MCS_15_INDEX,
232 };
233 
234 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
235 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
236 int	iwx_is_mimo_ht_plcp(uint8_t);
237 int	iwx_is_mimo_mcs(int);
238 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241 int	iwx_apply_debug_destination(struct iwx_softc *);
242 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
243 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
244 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
245 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
246 	    struct iwx_context_info_dram *);
247 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
248 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
249 	    uint8_t *, size_t);
250 int	iwx_set_default_calib(struct iwx_softc *, const void *);
251 void	iwx_fw_info_free(struct iwx_fw_info *);
252 int	iwx_read_firmware(struct iwx_softc *);
253 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
254 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
255 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
256 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
257 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
258 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
259 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
260 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
261 int	iwx_nic_lock(struct iwx_softc *);
262 void	iwx_nic_assert_locked(struct iwx_softc *);
263 void	iwx_nic_unlock(struct iwx_softc *);
264 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
267 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
268 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwx_dma_contig_free(struct iwx_dma_info *);
271 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
272 void	iwx_disable_rx_dma(struct iwx_softc *);
273 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
274 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
275 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
276 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
277 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
278 void	iwx_enable_rfkill_int(struct iwx_softc *);
279 int	iwx_check_rfkill(struct iwx_softc *);
280 void	iwx_enable_interrupts(struct iwx_softc *);
281 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
282 void	iwx_restore_interrupts(struct iwx_softc *);
283 void	iwx_disable_interrupts(struct iwx_softc *);
284 void	iwx_ict_reset(struct iwx_softc *);
285 int	iwx_set_hw_ready(struct iwx_softc *);
286 int	iwx_prepare_card_hw(struct iwx_softc *);
287 int	iwx_force_power_gating(struct iwx_softc *);
288 void	iwx_apm_config(struct iwx_softc *);
289 int	iwx_apm_init(struct iwx_softc *);
290 void	iwx_apm_stop(struct iwx_softc *);
291 int	iwx_allow_mcast(struct iwx_softc *);
292 void	iwx_init_msix_hw(struct iwx_softc *);
293 void	iwx_conf_msix_hw(struct iwx_softc *, int);
294 int	iwx_clear_persistence_bit(struct iwx_softc *);
295 int	iwx_start_hw(struct iwx_softc *);
296 void	iwx_stop_device(struct iwx_softc *);
297 void	iwx_nic_config(struct iwx_softc *);
298 int	iwx_nic_rx_init(struct iwx_softc *);
299 int	iwx_nic_init(struct iwx_softc *);
300 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
301 void	iwx_post_alive(struct iwx_softc *);
302 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
303 	    uint32_t);
304 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
305 void	iwx_setup_ht_rates(struct iwx_softc *);
306 int	iwx_mimo_enabled(struct iwx_softc *);
307 void	iwx_mac_ctxt_task(void *);
308 void	iwx_phy_ctxt_task(void *);
309 void	iwx_updatechan(struct ieee80211com *);
310 void	iwx_updateprot(struct ieee80211com *);
311 void	iwx_updateslot(struct ieee80211com *);
312 void	iwx_updateedca(struct ieee80211com *);
313 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
314 	    uint16_t);
315 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
316 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
317 	    uint8_t);
318 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
319 	    uint8_t);
320 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
321 	    uint8_t);
322 void	iwx_rx_ba_session_expired(void *);
323 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
324 	    struct iwx_rx_data *, struct mbuf_list *);
325 void	iwx_reorder_timer_expired(void *);
326 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
327 	    uint16_t, uint16_t, int, int);
328 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
329 	    uint8_t);
330 void	iwx_ba_task(void *);
331 
332 int	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
333 int	iwx_is_valid_mac_addr(const uint8_t *);
334 int	iwx_nvm_get(struct iwx_softc *);
335 int	iwx_load_firmware(struct iwx_softc *);
336 int	iwx_start_fw(struct iwx_softc *);
337 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
338 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
339 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
340 int	iwx_send_dqa_cmd(struct iwx_softc *);
341 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
342 int	iwx_config_ltr(struct iwx_softc *);
343 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
344 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
345 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
346 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
347 	    struct iwx_rx_data *);
348 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
349 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
350 	    struct ieee80211_rxinfo *);
351 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
352 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
353 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
354 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
355 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
356 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
357 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, int);
358 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
359 	    struct iwx_rx_data *);
360 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
361 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
362 	    struct iwx_rx_data *);
363 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
364 int	iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
365 	    uint8_t, uint32_t, uint8_t);
366 int	iwx_phy_ctxt_cmd_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
367 	    uint8_t, uint32_t, uint8_t);
368 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
369 	    uint8_t, uint32_t, uint32_t, uint8_t);
370 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
371 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
372 	    const void *);
373 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
374 	    uint32_t *);
375 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
376 	    const void *, uint32_t *);
377 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
378 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
379 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
380 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
381 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
382 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
383 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
384 int	iwx_wait_tx_queues_empty(struct iwx_softc *);
385 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
386 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
387 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
388 	    struct iwx_beacon_filter_cmd *);
389 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
390 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
391 	    struct iwx_mac_power_cmd *);
392 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
393 int	iwx_power_update_device(struct iwx_softc *);
394 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
395 int	iwx_disable_beacon_filter(struct iwx_softc *);
396 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
397 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
398 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
399 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
400 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
401 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
402 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
403 	    struct iwx_scan_general_params_v10 *, int);
404 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
405 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
406 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
407 	    struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
408 int	iwx_umac_scan_v14(struct iwx_softc *, int);
409 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
410 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
411 int	iwx_rval2ridx(int);
412 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
413 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
414 	    struct iwx_mac_ctx_cmd *, uint32_t);
415 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
416 	    struct iwx_mac_data_sta *, int);
417 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
418 int	iwx_clear_statistics(struct iwx_softc *);
419 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
420 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
421 int	iwx_scan(struct iwx_softc *);
422 int	iwx_bgscan(struct ieee80211com *);
423 int	iwx_umac_scan_abort(struct iwx_softc *);
424 int	iwx_scan_abort(struct iwx_softc *);
425 int	iwx_enable_mgmt_queue(struct iwx_softc *);
426 int	iwx_rs_rval2idx(uint8_t);
427 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
428 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
429 int	iwx_enable_data_tx_queues(struct iwx_softc *);
430 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
431 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t);
432 int	iwx_auth(struct iwx_softc *);
433 int	iwx_deauth(struct iwx_softc *);
434 int	iwx_run(struct iwx_softc *);
435 int	iwx_run_stop(struct iwx_softc *);
436 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
437 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
438 	    struct ieee80211_key *);
439 void	iwx_setkey_task(void *);
440 void	iwx_delete_key(struct ieee80211com *,
441 	    struct ieee80211_node *, struct ieee80211_key *);
442 int	iwx_media_change(struct ifnet *);
443 void	iwx_newstate_task(void *);
444 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
445 void	iwx_endscan(struct iwx_softc *);
446 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
447 	    struct ieee80211_node *);
448 int	iwx_sf_config(struct iwx_softc *, int);
449 int	iwx_send_bt_init_conf(struct iwx_softc *);
450 int	iwx_send_soc_conf(struct iwx_softc *);
451 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
452 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
453 int	iwx_init_hw(struct iwx_softc *);
454 int	iwx_init(struct ifnet *);
455 void	iwx_start(struct ifnet *);
456 void	iwx_stop(struct ifnet *);
457 void	iwx_watchdog(struct ifnet *);
458 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
459 const char *iwx_desc_lookup(uint32_t);
460 void	iwx_nic_error(struct iwx_softc *);
461 void	iwx_dump_driver_status(struct iwx_softc *);
462 void	iwx_nic_umac_error(struct iwx_softc *);
463 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
464 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
465 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
466 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
467 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
468 	    struct mbuf_list *);
469 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
470 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
471 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
472 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
473 	    struct ieee80211_rxinfo *, struct mbuf_list *);
474 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
475 	    struct mbuf_list *);
476 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
477 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
478 	    struct mbuf_list *);
479 void	iwx_notif_intr(struct iwx_softc *);
480 int	iwx_intr(void *);
481 int	iwx_intr_msix(void *);
482 int	iwx_match(struct device *, void *, void *);
483 int	iwx_preinit(struct iwx_softc *);
484 void	iwx_attach_hook(struct device *);
485 void	iwx_attach(struct device *, struct device *, void *);
486 void	iwx_init_task(void *);
487 int	iwx_activate(struct device *, int);
488 void	iwx_resume(struct iwx_softc *);
489 int	iwx_wakeup(struct iwx_softc *);
490 
491 #if NBPFILTER > 0
492 void	iwx_radiotap_attach(struct iwx_softc *);
493 #endif
494 
495 uint8_t
496 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
497 {
498 	const struct iwx_fw_cmd_version *entry;
499 	int i;
500 
501 	for (i = 0; i < sc->n_cmd_versions; i++) {
502 		entry = &sc->cmd_versions[i];
503 		if (entry->group == grp && entry->cmd == cmd)
504 			return entry->cmd_ver;
505 	}
506 
507 	return IWX_FW_CMD_VER_UNKNOWN;
508 }
509 
510 uint8_t
511 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
512 {
513 	const struct iwx_fw_cmd_version *entry;
514 	int i;
515 
516 	for (i = 0; i < sc->n_cmd_versions; i++) {
517 		entry = &sc->cmd_versions[i];
518 		if (entry->group == grp && entry->cmd == cmd)
519 			return entry->notif_ver;
520 	}
521 
522 	return IWX_FW_CMD_VER_UNKNOWN;
523 }
524 
525 int
526 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
527 {
528 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
529 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
530 }
531 
532 int
533 iwx_is_mimo_mcs(int mcs)
534 {
535 	int ridx = iwx_mcs2ridx[mcs];
536 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
537 
538 }
539 
540 int
541 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
542 {
543 	struct iwx_fw_cscheme_list *l = (void *)data;
544 
545 	if (dlen < sizeof(*l) ||
546 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
547 		return EINVAL;
548 
549 	/* we don't actually store anything for now, always use s/w crypto */
550 
551 	return 0;
552 }
553 
554 int
555 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
556     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
557 {
558 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
559 	if (err) {
560 		printf("%s: could not allocate context info DMA memory\n",
561 		    DEVNAME(sc));
562 		return err;
563 	}
564 
565 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
566 
567 	return 0;
568 }
569 
570 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
571 {
572 	struct iwx_self_init_dram *dram = &sc->init_dram;
573 	int i;
574 
575 	if (!dram->paging)
576 		return;
577 
578 	/* free paging*/
579 	for (i = 0; i < dram->paging_cnt; i++)
580 		iwx_dma_contig_free(&dram->paging[i]);
581 
582 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
583 	dram->paging_cnt = 0;
584 	dram->paging = NULL;
585 }
586 
587 int
588 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
589 {
590 	int i = 0;
591 
592 	while (start < fws->fw_count &&
593 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
594 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
595 		start++;
596 		i++;
597 	}
598 
599 	return i;
600 }
601 
602 int
603 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
604     struct iwx_context_info_dram *ctxt_dram)
605 {
606 	struct iwx_self_init_dram *dram = &sc->init_dram;
607 	int i, ret, fw_cnt = 0;
608 
609 	KASSERT(dram->paging == NULL);
610 
611 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
612 	/* add 1 due to separator */
613 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
614 	/* add 2 due to separators */
615 	dram->paging_cnt = iwx_get_num_sections(fws,
616 	    dram->lmac_cnt + dram->umac_cnt + 2);
617 
618 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
619 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
620 	if (!dram->fw) {
621 		printf("%s: could not allocate memory for firmware sections\n",
622 		    DEVNAME(sc));
623 		return ENOMEM;
624 	}
625 
626 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
627 	    M_DEVBUF, M_ZERO | M_NOWAIT);
628 	if (!dram->paging) {
629 		printf("%s: could not allocate memory for firmware paging\n",
630 		    DEVNAME(sc));
631 		return ENOMEM;
632 	}
633 
634 	/* initialize lmac sections */
635 	for (i = 0; i < dram->lmac_cnt; i++) {
636 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
637 						   &dram->fw[fw_cnt]);
638 		if (ret)
639 			return ret;
640 		ctxt_dram->lmac_img[i] =
641 			htole64(dram->fw[fw_cnt].paddr);
642 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
643 		    (unsigned long long)dram->fw[fw_cnt].paddr,
644 		    (unsigned long long)dram->fw[fw_cnt].size));
645 		fw_cnt++;
646 	}
647 
648 	/* initialize umac sections */
649 	for (i = 0; i < dram->umac_cnt; i++) {
650 		/* access FW with +1 to make up for lmac separator */
651 		ret = iwx_ctxt_info_alloc_dma(sc,
652 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
653 		if (ret)
654 			return ret;
655 		ctxt_dram->umac_img[i] =
656 			htole64(dram->fw[fw_cnt].paddr);
657 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
658 			(unsigned long long)dram->fw[fw_cnt].paddr,
659 			(unsigned long long)dram->fw[fw_cnt].size));
660 		fw_cnt++;
661 	}
662 
663 	/*
664 	 * Initialize paging.
665 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
666 	 * stored separately.
667 	 * This is since the timing of its release is different -
668 	 * while fw memory can be released on alive, the paging memory can be
669 	 * freed only when the device goes down.
670 	 * Given that, the logic here in accessing the fw image is a bit
671 	 * different - fw_cnt isn't changing so loop counter is added to it.
672 	 */
673 	for (i = 0; i < dram->paging_cnt; i++) {
674 		/* access FW with +2 to make up for lmac & umac separators */
675 		int fw_idx = fw_cnt + i + 2;
676 
677 		ret = iwx_ctxt_info_alloc_dma(sc,
678 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
679 		if (ret)
680 			return ret;
681 
682 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
683 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
684 		    (unsigned long long)dram->paging[i].paddr,
685 		    (unsigned long long)dram->paging[i].size));
686 	}
687 
688 	return 0;
689 }
690 
691 void
692 iwx_fw_version_str(char *buf, size_t bufsize,
693     uint32_t major, uint32_t minor, uint32_t api)
694 {
695 	/*
696 	 * Starting with major version 35 the Linux driver prints the minor
697 	 * version in hexadecimal.
698 	 */
699 	if (major >= 35)
700 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
701 	else
702 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
703 }
704 
705 int
706 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
707     uint8_t min_power)
708 {
709 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
710 	uint32_t size = 0;
711 	uint8_t power;
712 	int err;
713 
714 	if (fw_mon->size)
715 		return 0;
716 
717 	for (power = max_power; power >= min_power; power--) {
718 		size = (1 << power);
719 
720 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
721 		if (err)
722 			continue;
723 
724 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
725 			 DEVNAME(sc), size));
726 		break;
727 	}
728 
729 	if (err) {
730 		fw_mon->size = 0;
731 		return err;
732 	}
733 
734 	if (power != max_power)
735 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
736 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
737 			(unsigned long)(1 << (max_power - 10))));
738 
739 	return 0;
740 }
741 
742 int
743 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
744 {
745 	if (!max_power) {
746 		/* default max_power is maximum */
747 		max_power = 26;
748 	} else {
749 		max_power += 11;
750 	}
751 
752 	if (max_power > 26) {
753 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
754 		     "check the FW TLV\n", DEVNAME(sc), max_power));
755 		return 0;
756 	}
757 
758 	if (sc->fw_mon.size)
759 		return 0;
760 
761 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
762 }
763 
764 int
765 iwx_apply_debug_destination(struct iwx_softc *sc)
766 {
767 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
768 	int i, err;
769 	uint8_t mon_mode, size_power, base_shift, end_shift;
770 	uint32_t base_reg, end_reg;
771 
772 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
773 	mon_mode = dest_v1->monitor_mode;
774 	size_power = dest_v1->size_power;
775 	base_reg = le32toh(dest_v1->base_reg);
776 	end_reg = le32toh(dest_v1->end_reg);
777 	base_shift = dest_v1->base_shift;
778 	end_shift = dest_v1->end_shift;
779 
780 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
781 
782 	if (mon_mode == EXTERNAL_MODE) {
783 		err = iwx_alloc_fw_monitor(sc, size_power);
784 		if (err)
785 			return err;
786 	}
787 
788 	if (!iwx_nic_lock(sc))
789 		return EBUSY;
790 
791 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
792 		uint32_t addr, val;
793 		uint8_t op;
794 
795 		addr = le32toh(dest_v1->reg_ops[i].addr);
796 		val = le32toh(dest_v1->reg_ops[i].val);
797 		op = dest_v1->reg_ops[i].op;
798 
799 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
800 		switch (op) {
801 		case CSR_ASSIGN:
802 			IWX_WRITE(sc, addr, val);
803 			break;
804 		case CSR_SETBIT:
805 			IWX_SETBITS(sc, addr, (1 << val));
806 			break;
807 		case CSR_CLEARBIT:
808 			IWX_CLRBITS(sc, addr, (1 << val));
809 			break;
810 		case PRPH_ASSIGN:
811 			iwx_write_prph(sc, addr, val);
812 			break;
813 		case PRPH_SETBIT:
814 			err = iwx_set_bits_prph(sc, addr, (1 << val));
815 			if (err)
816 				return err;
817 			break;
818 		case PRPH_CLEARBIT:
819 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
820 			if (err)
821 				return err;
822 			break;
823 		case PRPH_BLOCKBIT:
824 			if (iwx_read_prph(sc, addr) & (1 << val))
825 				goto monitor;
826 			break;
827 		default:
828 			DPRINTF(("%s: FW debug - unknown OP %d\n",
829 			    DEVNAME(sc), op));
830 			break;
831 		}
832 	}
833 
834 monitor:
835 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
836 		iwx_write_prph(sc, le32toh(base_reg),
837 		    sc->fw_mon.paddr >> base_shift);
838 		iwx_write_prph(sc, end_reg,
839 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
840 		    >> end_shift);
841 	}
842 
843 	iwx_nic_unlock(sc);
844 	return 0;
845 }
846 
847 int
848 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
849 {
850 	struct iwx_context_info *ctxt_info;
851 	struct iwx_context_info_rbd_cfg *rx_cfg;
852 	uint32_t control_flags = 0, rb_size;
853 	uint64_t paddr;
854 	int err;
855 
856 	ctxt_info = sc->ctxt_info_dma.vaddr;
857 
858 	ctxt_info->version.version = 0;
859 	ctxt_info->version.mac_id =
860 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
861 	/* size is in DWs */
862 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
863 
864 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
865 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
866 	else
867 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
868 
869 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
870 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
871 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
872 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
873 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
874 	ctxt_info->control.control_flags = htole32(control_flags);
875 
876 	/* initialize RX default queue */
877 	rx_cfg = &ctxt_info->rbd_cfg;
878 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
879 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
880 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
881 
882 	/* initialize TX command queue */
883 	ctxt_info->hcmd_cfg.cmd_queue_addr =
884 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
885 	ctxt_info->hcmd_cfg.cmd_queue_size =
886 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
887 
888 	/* allocate ucode sections in dram and set addresses */
889 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
890 	if (err) {
891 		iwx_ctxt_info_free_fw_img(sc);
892 		return err;
893 	}
894 
895 	/* Configure debug, if exists */
896 	if (sc->sc_fw.dbg_dest_tlv_v1) {
897 		err = iwx_apply_debug_destination(sc);
898 		if (err) {
899 			iwx_ctxt_info_free_fw_img(sc);
900 			return err;
901 		}
902 	}
903 
904 	/*
905 	 * Write the context info DMA base address. The device expects a
906 	 * 64-bit address but a simple bus_space_write_8 to this register
907 	 * won't work on some devices, such as the AX201.
908 	 */
909 	paddr = sc->ctxt_info_dma.paddr;
910 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
911 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
912 
913 	/* kick FW self load */
914 	if (!iwx_nic_lock(sc)) {
915 		iwx_ctxt_info_free_fw_img(sc);
916 		return EBUSY;
917 	}
918 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
919 	iwx_nic_unlock(sc);
920 
921 	/* Context info will be released upon alive or failure to get one */
922 
923 	return 0;
924 }
925 
926 void
927 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
928 {
929 	struct iwx_self_init_dram *dram = &sc->init_dram;
930 	int i;
931 
932 	if (!dram->fw)
933 		return;
934 
935 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
936 		iwx_dma_contig_free(&dram->fw[i]);
937 
938 	free(dram->fw, M_DEVBUF,
939 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
940 	dram->lmac_cnt = 0;
941 	dram->umac_cnt = 0;
942 	dram->fw = NULL;
943 }
944 
945 int
946 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
947     uint8_t *data, size_t dlen)
948 {
949 	struct iwx_fw_sects *fws;
950 	struct iwx_fw_onesect *fwone;
951 
952 	if (type >= IWX_UCODE_TYPE_MAX)
953 		return EINVAL;
954 	if (dlen < sizeof(uint32_t))
955 		return EINVAL;
956 
957 	fws = &sc->sc_fw.fw_sects[type];
958 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
959 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
960 		return EINVAL;
961 
962 	fwone = &fws->fw_sect[fws->fw_count];
963 
964 	/* first 32bit are device load offset */
965 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
966 
967 	/* rest is data */
968 	fwone->fws_data = data + sizeof(uint32_t);
969 	fwone->fws_len = dlen - sizeof(uint32_t);
970 
971 	fws->fw_count++;
972 	fws->fw_totlen += fwone->fws_len;
973 
974 	return 0;
975 }
976 
977 #define IWX_DEFAULT_SCAN_CHANNELS	40
978 /* Newer firmware might support more channels. Raise this value if needed. */
979 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
980 
981 struct iwx_tlv_calib_data {
982 	uint32_t ucode_type;
983 	struct iwx_tlv_calib_ctrl calib;
984 } __packed;
985 
986 int
987 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
988 {
989 	const struct iwx_tlv_calib_data *def_calib = data;
990 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
991 
992 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
993 		return EINVAL;
994 
995 	sc->sc_default_calib[ucode_type].flow_trigger =
996 	    def_calib->calib.flow_trigger;
997 	sc->sc_default_calib[ucode_type].event_trigger =
998 	    def_calib->calib.event_trigger;
999 
1000 	return 0;
1001 }
1002 
1003 void
1004 iwx_fw_info_free(struct iwx_fw_info *fw)
1005 {
1006 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1007 	fw->fw_rawdata = NULL;
1008 	fw->fw_rawsize = 0;
1009 	/* don't touch fw->fw_status */
1010 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1011 }
1012 
1013 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1014 
1015 int
1016 iwx_read_firmware(struct iwx_softc *sc)
1017 {
1018 	struct iwx_fw_info *fw = &sc->sc_fw;
1019 	struct iwx_tlv_ucode_header *uhdr;
1020 	struct iwx_ucode_tlv tlv;
1021 	uint32_t tlv_type;
1022 	uint8_t *data;
1023 	int err;
1024 	size_t len;
1025 
1026 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1027 		return 0;
1028 
1029 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1030 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1031 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1032 
1033 	if (fw->fw_rawdata != NULL)
1034 		iwx_fw_info_free(fw);
1035 
1036 	err = loadfirmware(sc->sc_fwname,
1037 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1038 	if (err) {
1039 		printf("%s: could not read firmware %s (error %d)\n",
1040 		    DEVNAME(sc), sc->sc_fwname, err);
1041 		goto out;
1042 	}
1043 
1044 	sc->sc_capaflags = 0;
1045 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1046 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1047 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1048 	sc->n_cmd_versions = 0;
1049 
1050 	uhdr = (void *)fw->fw_rawdata;
1051 	if (*(uint32_t *)fw->fw_rawdata != 0
1052 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1053 		printf("%s: invalid firmware %s\n",
1054 		    DEVNAME(sc), sc->sc_fwname);
1055 		err = EINVAL;
1056 		goto out;
1057 	}
1058 
1059 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1060 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1061 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1062 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1063 
1064 	data = uhdr->data;
1065 	len = fw->fw_rawsize - sizeof(*uhdr);
1066 
1067 	while (len >= sizeof(tlv)) {
1068 		size_t tlv_len;
1069 		void *tlv_data;
1070 
1071 		memcpy(&tlv, data, sizeof(tlv));
1072 		tlv_len = le32toh(tlv.length);
1073 		tlv_type = le32toh(tlv.type);
1074 
1075 		len -= sizeof(tlv);
1076 		data += sizeof(tlv);
1077 		tlv_data = data;
1078 
1079 		if (len < tlv_len) {
1080 			printf("%s: firmware too short: %zu bytes\n",
1081 			    DEVNAME(sc), len);
1082 			err = EINVAL;
1083 			goto parse_out;
1084 		}
1085 
1086 		switch (tlv_type) {
1087 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1088 			if (tlv_len < sizeof(uint32_t)) {
1089 				err = EINVAL;
1090 				goto parse_out;
1091 			}
1092 			sc->sc_capa_max_probe_len
1093 			    = le32toh(*(uint32_t *)tlv_data);
1094 			if (sc->sc_capa_max_probe_len >
1095 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1096 				err = EINVAL;
1097 				goto parse_out;
1098 			}
1099 			break;
1100 		case IWX_UCODE_TLV_PAN:
1101 			if (tlv_len) {
1102 				err = EINVAL;
1103 				goto parse_out;
1104 			}
1105 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1106 			break;
1107 		case IWX_UCODE_TLV_FLAGS:
1108 			if (tlv_len < sizeof(uint32_t)) {
1109 				err = EINVAL;
1110 				goto parse_out;
1111 			}
1112 			/*
1113 			 * Apparently there can be many flags, but Linux driver
1114 			 * parses only the first one, and so do we.
1115 			 *
1116 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1117 			 * Intentional or a bug?  Observations from
1118 			 * current firmware file:
1119 			 *  1) TLV_PAN is parsed first
1120 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1121 			 * ==> this resets TLV_PAN to itself... hnnnk
1122 			 */
1123 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1124 			break;
1125 		case IWX_UCODE_TLV_CSCHEME:
1126 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1127 			if (err)
1128 				goto parse_out;
1129 			break;
1130 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1131 			uint32_t num_cpu;
1132 			if (tlv_len != sizeof(uint32_t)) {
1133 				err = EINVAL;
1134 				goto parse_out;
1135 			}
1136 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1137 			if (num_cpu < 1 || num_cpu > 2) {
1138 				err = EINVAL;
1139 				goto parse_out;
1140 			}
1141 			break;
1142 		}
1143 		case IWX_UCODE_TLV_SEC_RT:
1144 			err = iwx_firmware_store_section(sc,
1145 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1146 			if (err)
1147 				goto parse_out;
1148 			break;
1149 		case IWX_UCODE_TLV_SEC_INIT:
1150 			err = iwx_firmware_store_section(sc,
1151 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1152 			if (err)
1153 				goto parse_out;
1154 			break;
1155 		case IWX_UCODE_TLV_SEC_WOWLAN:
1156 			err = iwx_firmware_store_section(sc,
1157 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1158 			if (err)
1159 				goto parse_out;
1160 			break;
1161 		case IWX_UCODE_TLV_DEF_CALIB:
1162 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1163 				err = EINVAL;
1164 				goto parse_out;
1165 			}
1166 			err = iwx_set_default_calib(sc, tlv_data);
1167 			if (err)
1168 				goto parse_out;
1169 			break;
1170 		case IWX_UCODE_TLV_PHY_SKU:
1171 			if (tlv_len != sizeof(uint32_t)) {
1172 				err = EINVAL;
1173 				goto parse_out;
1174 			}
1175 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1176 			break;
1177 
1178 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1179 			struct iwx_ucode_api *api;
1180 			int idx, i;
1181 			if (tlv_len != sizeof(*api)) {
1182 				err = EINVAL;
1183 				goto parse_out;
1184 			}
1185 			api = (struct iwx_ucode_api *)tlv_data;
1186 			idx = le32toh(api->api_index);
1187 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1188 				err = EINVAL;
1189 				goto parse_out;
1190 			}
1191 			for (i = 0; i < 32; i++) {
1192 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1193 					continue;
1194 				setbit(sc->sc_ucode_api, i + (32 * idx));
1195 			}
1196 			break;
1197 		}
1198 
1199 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1200 			struct iwx_ucode_capa *capa;
1201 			int idx, i;
1202 			if (tlv_len != sizeof(*capa)) {
1203 				err = EINVAL;
1204 				goto parse_out;
1205 			}
1206 			capa = (struct iwx_ucode_capa *)tlv_data;
1207 			idx = le32toh(capa->api_index);
1208 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1209 				goto parse_out;
1210 			}
1211 			for (i = 0; i < 32; i++) {
1212 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1213 					continue;
1214 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1215 			}
1216 			break;
1217 		}
1218 
1219 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1220 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1221 			/* ignore, not used by current driver */
1222 			break;
1223 
1224 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1225 			err = iwx_firmware_store_section(sc,
1226 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1227 			    tlv_len);
1228 			if (err)
1229 				goto parse_out;
1230 			break;
1231 
1232 		case IWX_UCODE_TLV_PAGING:
1233 			if (tlv_len != sizeof(uint32_t)) {
1234 				err = EINVAL;
1235 				goto parse_out;
1236 			}
1237 			break;
1238 
1239 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1240 			if (tlv_len != sizeof(uint32_t)) {
1241 				err = EINVAL;
1242 				goto parse_out;
1243 			}
1244 			sc->sc_capa_n_scan_channels =
1245 			  le32toh(*(uint32_t *)tlv_data);
1246 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1247 				err = ERANGE;
1248 				goto parse_out;
1249 			}
1250 			break;
1251 
1252 		case IWX_UCODE_TLV_FW_VERSION:
1253 			if (tlv_len != sizeof(uint32_t) * 3) {
1254 				err = EINVAL;
1255 				goto parse_out;
1256 			}
1257 
1258 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1259 			    le32toh(((uint32_t *)tlv_data)[0]),
1260 			    le32toh(((uint32_t *)tlv_data)[1]),
1261 			    le32toh(((uint32_t *)tlv_data)[2]));
1262 			break;
1263 
1264 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1265 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1266 
1267 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1268 			if (*fw->dbg_dest_ver != 0) {
1269 				err = EINVAL;
1270 				goto parse_out;
1271 			}
1272 
1273 			if (fw->dbg_dest_tlv_init)
1274 				break;
1275 			fw->dbg_dest_tlv_init = true;
1276 
1277 			dest_v1 = (void *)tlv_data;
1278 			fw->dbg_dest_tlv_v1 = dest_v1;
1279 			fw->n_dest_reg = tlv_len -
1280 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1281 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1282 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1283 			break;
1284 		}
1285 
1286 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1287 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1288 
1289 			if (!fw->dbg_dest_tlv_init ||
1290 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1291 			    fw->dbg_conf_tlv[conf->id] != NULL)
1292 				break;
1293 
1294 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1295 			fw->dbg_conf_tlv[conf->id] = conf;
1296 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1297 			break;
1298 		}
1299 
1300 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1301 			struct iwx_umac_debug_addrs *dbg_ptrs =
1302 				(void *)tlv_data;
1303 
1304 			if (tlv_len != sizeof(*dbg_ptrs)) {
1305 				err = EINVAL;
1306 				goto parse_out;
1307 			}
1308 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1309 				break;
1310 			sc->sc_uc.uc_umac_error_event_table =
1311 				le32toh(dbg_ptrs->error_info_addr) &
1312 				~IWX_FW_ADDR_CACHE_CONTROL;
1313 			sc->sc_uc.error_event_table_tlv_status |=
1314 				IWX_ERROR_EVENT_TABLE_UMAC;
1315 			break;
1316 		}
1317 
1318 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1319 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1320 				(void *)tlv_data;
1321 
1322 			if (tlv_len != sizeof(*dbg_ptrs)) {
1323 				err = EINVAL;
1324 				goto parse_out;
1325 			}
1326 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1327 				break;
1328 			sc->sc_uc.uc_lmac_error_event_table[0] =
1329 				le32toh(dbg_ptrs->error_event_table_ptr) &
1330 				~IWX_FW_ADDR_CACHE_CONTROL;
1331 			sc->sc_uc.error_event_table_tlv_status |=
1332 				IWX_ERROR_EVENT_TABLE_LMAC1;
1333 			break;
1334 		}
1335 
1336 		case IWX_UCODE_TLV_FW_MEM_SEG:
1337 			break;
1338 
1339 		case IWX_UCODE_TLV_CMD_VERSIONS:
1340 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1341 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1342 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1343 			}
1344 			if (sc->n_cmd_versions != 0) {
1345 				err = EINVAL;
1346 				goto parse_out;
1347 			}
1348 			if (tlv_len > sizeof(sc->cmd_versions)) {
1349 				err = EINVAL;
1350 				goto parse_out;
1351 			}
1352 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1353 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1354 			break;
1355 
1356 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1357 			break;
1358 
1359 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1360 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1361 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1362 			break;
1363 
1364 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1365 		case 58:
1366 		case 0x1000003:
1367 		case 0x1000004:
1368 			break;
1369 
1370 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1371 		case 0x1000000:
1372 		case 0x1000002:
1373 			break;
1374 
1375 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1376 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1377 		case IWX_UCODE_TLV_TYPE_HCMD:
1378 		case IWX_UCODE_TLV_TYPE_REGIONS:
1379 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1380 			break;
1381 
1382 		default:
1383 			err = EINVAL;
1384 			goto parse_out;
1385 		}
1386 
1387 		len -= roundup(tlv_len, 4);
1388 		data += roundup(tlv_len, 4);
1389 	}
1390 
1391 	KASSERT(err == 0);
1392 
1393  parse_out:
1394 	if (err) {
1395 		printf("%s: firmware parse error %d, "
1396 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1397 	}
1398 
1399  out:
1400 	if (err) {
1401 		fw->fw_status = IWX_FW_STATUS_NONE;
1402 		if (fw->fw_rawdata != NULL)
1403 			iwx_fw_info_free(fw);
1404 	} else
1405 		fw->fw_status = IWX_FW_STATUS_DONE;
1406 	wakeup(&sc->sc_fw);
1407 
1408 	return err;
1409 }
1410 
1411 uint32_t
1412 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1413 {
1414 	IWX_WRITE(sc,
1415 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1416 	IWX_BARRIER_READ_WRITE(sc);
1417 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1418 }
1419 
1420 uint32_t
1421 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1422 {
1423 	iwx_nic_assert_locked(sc);
1424 	return iwx_read_prph_unlocked(sc, addr);
1425 }
1426 
1427 void
1428 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1429 {
1430 	IWX_WRITE(sc,
1431 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1432 	IWX_BARRIER_WRITE(sc);
1433 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1434 }
1435 
1436 void
1437 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1438 {
1439 	iwx_nic_assert_locked(sc);
1440 	iwx_write_prph_unlocked(sc, addr, val);
1441 }
1442 
1443 void
1444 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1445 {
1446 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1447 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1448 }
1449 
1450 int
1451 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1452 {
1453 	int offs, err = 0;
1454 	uint32_t *vals = buf;
1455 
1456 	if (iwx_nic_lock(sc)) {
1457 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1458 		for (offs = 0; offs < dwords; offs++)
1459 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1460 		iwx_nic_unlock(sc);
1461 	} else {
1462 		err = EBUSY;
1463 	}
1464 	return err;
1465 }
1466 
1467 int
1468 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1469 {
1470 	int offs;
1471 	const uint32_t *vals = buf;
1472 
1473 	if (iwx_nic_lock(sc)) {
1474 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1475 		/* WADDR auto-increments */
1476 		for (offs = 0; offs < dwords; offs++) {
1477 			uint32_t val = vals ? vals[offs] : 0;
1478 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1479 		}
1480 		iwx_nic_unlock(sc);
1481 	} else {
1482 		return EBUSY;
1483 	}
1484 	return 0;
1485 }
1486 
1487 int
1488 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1489 {
1490 	return iwx_write_mem(sc, addr, &val, 1);
1491 }
1492 
1493 int
1494 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1495     int timo)
1496 {
1497 	for (;;) {
1498 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1499 			return 1;
1500 		}
1501 		if (timo < 10) {
1502 			return 0;
1503 		}
1504 		timo -= 10;
1505 		DELAY(10);
1506 	}
1507 }
1508 
1509 int
1510 iwx_nic_lock(struct iwx_softc *sc)
1511 {
1512 	if (sc->sc_nic_locks > 0) {
1513 		iwx_nic_assert_locked(sc);
1514 		sc->sc_nic_locks++;
1515 		return 1; /* already locked */
1516 	}
1517 
1518 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1519 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1520 
1521 	DELAY(2);
1522 
1523 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1524 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1525 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1526 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1527 		sc->sc_nic_locks++;
1528 		return 1;
1529 	}
1530 
1531 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1532 	return 0;
1533 }
1534 
1535 void
1536 iwx_nic_assert_locked(struct iwx_softc *sc)
1537 {
1538 	if (sc->sc_nic_locks <= 0)
1539 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1540 }
1541 
1542 void
1543 iwx_nic_unlock(struct iwx_softc *sc)
1544 {
1545 	if (sc->sc_nic_locks > 0) {
1546 		if (--sc->sc_nic_locks == 0)
1547 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1548 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1549 	} else
1550 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1551 }
1552 
1553 int
1554 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1555     uint32_t mask)
1556 {
1557 	uint32_t val;
1558 
1559 	if (iwx_nic_lock(sc)) {
1560 		val = iwx_read_prph(sc, reg) & mask;
1561 		val |= bits;
1562 		iwx_write_prph(sc, reg, val);
1563 		iwx_nic_unlock(sc);
1564 		return 0;
1565 	}
1566 	return EBUSY;
1567 }
1568 
1569 int
1570 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1571 {
1572 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1573 }
1574 
1575 int
1576 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1577 {
1578 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1579 }
1580 
1581 int
1582 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1583     bus_size_t size, bus_size_t alignment)
1584 {
1585 	int nsegs, err;
1586 	caddr_t va;
1587 
1588 	dma->tag = tag;
1589 	dma->size = size;
1590 
1591 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1592 	    &dma->map);
1593 	if (err)
1594 		goto fail;
1595 
1596 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1597 	    BUS_DMA_NOWAIT);
1598 	if (err)
1599 		goto fail;
1600 
1601 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1602 	    BUS_DMA_NOWAIT);
1603 	if (err)
1604 		goto fail;
1605 	dma->vaddr = va;
1606 
1607 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1608 	    BUS_DMA_NOWAIT);
1609 	if (err)
1610 		goto fail;
1611 
1612 	memset(dma->vaddr, 0, size);
1613 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1614 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1615 
1616 	return 0;
1617 
1618 fail:	iwx_dma_contig_free(dma);
1619 	return err;
1620 }
1621 
1622 void
1623 iwx_dma_contig_free(struct iwx_dma_info *dma)
1624 {
1625 	if (dma->map != NULL) {
1626 		if (dma->vaddr != NULL) {
1627 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1628 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1629 			bus_dmamap_unload(dma->tag, dma->map);
1630 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1631 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1632 			dma->vaddr = NULL;
1633 		}
1634 		bus_dmamap_destroy(dma->tag, dma->map);
1635 		dma->map = NULL;
1636 	}
1637 }
1638 
1639 int
1640 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1641 {
1642 	bus_size_t size;
1643 	int i, err;
1644 
1645 	ring->cur = 0;
1646 
1647 	/* Allocate RX descriptors (256-byte aligned). */
1648 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1649 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1650 	if (err) {
1651 		printf("%s: could not allocate RX ring DMA memory\n",
1652 		    DEVNAME(sc));
1653 		goto fail;
1654 	}
1655 	ring->desc = ring->free_desc_dma.vaddr;
1656 
1657 	/* Allocate RX status area (16-byte aligned). */
1658 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1659 	    sizeof(*ring->stat), 16);
1660 	if (err) {
1661 		printf("%s: could not allocate RX status DMA memory\n",
1662 		    DEVNAME(sc));
1663 		goto fail;
1664 	}
1665 	ring->stat = ring->stat_dma.vaddr;
1666 
1667 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1668 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1669 	    size, 256);
1670 	if (err) {
1671 		printf("%s: could not allocate RX ring DMA memory\n",
1672 		    DEVNAME(sc));
1673 		goto fail;
1674 	}
1675 
1676 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1677 		struct iwx_rx_data *data = &ring->data[i];
1678 
1679 		memset(data, 0, sizeof(*data));
1680 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1681 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1682 		    &data->map);
1683 		if (err) {
1684 			printf("%s: could not create RX buf DMA map\n",
1685 			    DEVNAME(sc));
1686 			goto fail;
1687 		}
1688 
1689 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1690 		if (err)
1691 			goto fail;
1692 	}
1693 	return 0;
1694 
1695 fail:	iwx_free_rx_ring(sc, ring);
1696 	return err;
1697 }
1698 
1699 void
1700 iwx_disable_rx_dma(struct iwx_softc *sc)
1701 {
1702 	int ntries;
1703 
1704 	if (iwx_nic_lock(sc)) {
1705 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1706 		for (ntries = 0; ntries < 1000; ntries++) {
1707 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1708 			    IWX_RXF_DMA_IDLE)
1709 				break;
1710 			DELAY(10);
1711 		}
1712 		iwx_nic_unlock(sc);
1713 	}
1714 }
1715 
1716 void
1717 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1718 {
1719 	ring->cur = 0;
1720 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1721 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1722 	memset(ring->stat, 0, sizeof(*ring->stat));
1723 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1724 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1725 
1726 }
1727 
1728 void
1729 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1730 {
1731 	int i;
1732 
1733 	iwx_dma_contig_free(&ring->free_desc_dma);
1734 	iwx_dma_contig_free(&ring->stat_dma);
1735 	iwx_dma_contig_free(&ring->used_desc_dma);
1736 
1737 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1738 		struct iwx_rx_data *data = &ring->data[i];
1739 
1740 		if (data->m != NULL) {
1741 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1742 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1743 			bus_dmamap_unload(sc->sc_dmat, data->map);
1744 			m_freem(data->m);
1745 			data->m = NULL;
1746 		}
1747 		if (data->map != NULL)
1748 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1749 	}
1750 }
1751 
1752 int
1753 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1754 {
1755 	bus_addr_t paddr;
1756 	bus_size_t size;
1757 	int i, err;
1758 
1759 	ring->qid = qid;
1760 	ring->queued = 0;
1761 	ring->cur = 0;
1762 	ring->tail = 0;
1763 
1764 	/* Allocate TX descriptors (256-byte aligned). */
1765 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
1766 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1767 	if (err) {
1768 		printf("%s: could not allocate TX ring DMA memory\n",
1769 		    DEVNAME(sc));
1770 		goto fail;
1771 	}
1772 	ring->desc = ring->desc_dma.vaddr;
1773 
1774 	/*
1775 	 * The hardware supports up to 512 Tx rings which is more
1776 	 * than we currently need.
1777 	 *
1778 	 * In DQA mode we use 1 command queue + 1 default queue for
1779 	 * managment, control, and non-QoS data frames.
1780 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
1781 	 *
1782 	 * Tx aggregation requires additional queues, one queue per TID for
1783 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
1784 	 * Firmware may assign its own internal IDs for these queues
1785 	 * depending on which TID gets aggregation enabled first.
1786 	 * The driver maintains a table mapping driver-side queue IDs
1787 	 * to firmware-side queue IDs.
1788 	 */
1789 
1790 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1791 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1792 	if (err) {
1793 		printf("%s: could not allocate byte count table DMA memory\n",
1794 		    DEVNAME(sc));
1795 		goto fail;
1796 	}
1797 
1798 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
1799 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1800 	    IWX_FIRST_TB_SIZE_ALIGN);
1801 	if (err) {
1802 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1803 		goto fail;
1804 	}
1805 	ring->cmd = ring->cmd_dma.vaddr;
1806 
1807 	paddr = ring->cmd_dma.paddr;
1808 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1809 		struct iwx_tx_data *data = &ring->data[i];
1810 		size_t mapsize;
1811 
1812 		data->cmd_paddr = paddr;
1813 		paddr += sizeof(struct iwx_device_cmd);
1814 
1815 		/* FW commands may require more mapped space than packets. */
1816 		if (qid == IWX_DQA_CMD_QUEUE)
1817 			mapsize = (sizeof(struct iwx_cmd_header) +
1818 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1819 		else
1820 			mapsize = MCLBYTES;
1821 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1822 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1823 		    &data->map);
1824 		if (err) {
1825 			printf("%s: could not create TX buf DMA map\n",
1826 			    DEVNAME(sc));
1827 			goto fail;
1828 		}
1829 	}
1830 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1831 	return 0;
1832 
1833 fail:	iwx_free_tx_ring(sc, ring);
1834 	return err;
1835 }
1836 
1837 void
1838 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1839 {
1840 	int i;
1841 
1842 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1843 		struct iwx_tx_data *data = &ring->data[i];
1844 
1845 		if (data->m != NULL) {
1846 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1847 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1848 			bus_dmamap_unload(sc->sc_dmat, data->map);
1849 			m_freem(data->m);
1850 			data->m = NULL;
1851 		}
1852 	}
1853 
1854 	/* Clear byte count table. */
1855 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
1856 
1857 	/* Clear TX descriptors. */
1858 	memset(ring->desc, 0, ring->desc_dma.size);
1859 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1860 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1861 	sc->qfullmsk &= ~(1 << ring->qid);
1862 	sc->qenablemsk &= ~(1 << ring->qid);
1863 	for (i = 0; i < nitems(sc->aggqid); i++) {
1864 		if (sc->aggqid[i] == ring->qid) {
1865 			sc->aggqid[i] = 0;
1866 			break;
1867 		}
1868 	}
1869 	ring->queued = 0;
1870 	ring->cur = 0;
1871 	ring->tail = 0;
1872 	ring->tid = 0;
1873 }
1874 
1875 void
1876 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1877 {
1878 	int i;
1879 
1880 	iwx_dma_contig_free(&ring->desc_dma);
1881 	iwx_dma_contig_free(&ring->cmd_dma);
1882 	iwx_dma_contig_free(&ring->bc_tbl);
1883 
1884 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1885 		struct iwx_tx_data *data = &ring->data[i];
1886 
1887 		if (data->m != NULL) {
1888 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1889 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1890 			bus_dmamap_unload(sc->sc_dmat, data->map);
1891 			m_freem(data->m);
1892 			data->m = NULL;
1893 		}
1894 		if (data->map != NULL)
1895 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1896 	}
1897 }
1898 
1899 void
1900 iwx_enable_rfkill_int(struct iwx_softc *sc)
1901 {
1902 	if (!sc->sc_msix) {
1903 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1904 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1905 	} else {
1906 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1907 		    sc->sc_fh_init_mask);
1908 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1909 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1910 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1911 	}
1912 
1913 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1914 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1915 }
1916 
1917 int
1918 iwx_check_rfkill(struct iwx_softc *sc)
1919 {
1920 	uint32_t v;
1921 	int rv;
1922 
1923 	/*
1924 	 * "documentation" is not really helpful here:
1925 	 *  27:	HW_RF_KILL_SW
1926 	 *	Indicates state of (platform's) hardware RF-Kill switch
1927 	 *
1928 	 * But apparently when it's off, it's on ...
1929 	 */
1930 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1931 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1932 	if (rv) {
1933 		sc->sc_flags |= IWX_FLAG_RFKILL;
1934 	} else {
1935 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1936 	}
1937 
1938 	return rv;
1939 }
1940 
1941 void
1942 iwx_enable_interrupts(struct iwx_softc *sc)
1943 {
1944 	if (!sc->sc_msix) {
1945 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1946 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1947 	} else {
1948 		/*
1949 		 * fh/hw_mask keeps all the unmasked causes.
1950 		 * Unlike msi, in msix cause is enabled when it is unset.
1951 		 */
1952 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1953 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1954 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1955 		    ~sc->sc_fh_mask);
1956 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1957 		    ~sc->sc_hw_mask);
1958 	}
1959 }
1960 
1961 void
1962 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1963 {
1964 	if (!sc->sc_msix) {
1965 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1966 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1967 	} else {
1968 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1969 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1970 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1971 		/*
1972 		 * Leave all the FH causes enabled to get the ALIVE
1973 		 * notification.
1974 		 */
1975 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1976 		    ~sc->sc_fh_init_mask);
1977 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1978 	}
1979 }
1980 
1981 void
1982 iwx_restore_interrupts(struct iwx_softc *sc)
1983 {
1984 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1985 }
1986 
1987 void
1988 iwx_disable_interrupts(struct iwx_softc *sc)
1989 {
1990 	if (!sc->sc_msix) {
1991 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
1992 
1993 		/* acknowledge all interrupts */
1994 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
1995 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
1996 	} else {
1997 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1998 		    sc->sc_fh_init_mask);
1999 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2000 		    sc->sc_hw_init_mask);
2001 	}
2002 }
2003 
2004 void
2005 iwx_ict_reset(struct iwx_softc *sc)
2006 {
2007 	iwx_disable_interrupts(sc);
2008 
2009 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2010 	sc->ict_cur = 0;
2011 
2012 	/* Set physical address of ICT (4KB aligned). */
2013 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2014 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2015 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2016 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2017 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2018 
2019 	/* Switch to ICT interrupt mode in driver. */
2020 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2021 
2022 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2023 	iwx_enable_interrupts(sc);
2024 }
2025 
2026 #define IWX_HW_READY_TIMEOUT 50
2027 int
2028 iwx_set_hw_ready(struct iwx_softc *sc)
2029 {
2030 	int ready;
2031 
2032 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2033 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2034 
2035 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2036 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2037 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2038 	    IWX_HW_READY_TIMEOUT);
2039 	if (ready)
2040 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2041 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2042 
2043 	return ready;
2044 }
2045 #undef IWX_HW_READY_TIMEOUT
2046 
2047 int
2048 iwx_prepare_card_hw(struct iwx_softc *sc)
2049 {
2050 	int t = 0;
2051 	int ntries;
2052 
2053 	if (iwx_set_hw_ready(sc))
2054 		return 0;
2055 
2056 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2057 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2058 	DELAY(1000);
2059 
2060 	for (ntries = 0; ntries < 10; ntries++) {
2061 		/* If HW is not ready, prepare the conditions to check again */
2062 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2063 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2064 
2065 		do {
2066 			if (iwx_set_hw_ready(sc))
2067 				return 0;
2068 			DELAY(200);
2069 			t += 200;
2070 		} while (t < 150000);
2071 		DELAY(25000);
2072 	}
2073 
2074 	return ETIMEDOUT;
2075 }
2076 
2077 int
2078 iwx_force_power_gating(struct iwx_softc *sc)
2079 {
2080 	int err;
2081 
2082 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2083 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2084 	if (err)
2085 		return err;
2086 	DELAY(20);
2087 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2088 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2089 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2090 	if (err)
2091 		return err;
2092 	DELAY(20);
2093 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2094 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2095 	return err;
2096 }
2097 
2098 void
2099 iwx_apm_config(struct iwx_softc *sc)
2100 {
2101 	pcireg_t lctl, cap;
2102 
2103 	/*
2104 	 * L0S states have been found to be unstable with our devices
2105 	 * and in newer hardware they are not officially supported at
2106 	 * all, so we must always set the L0S_DISABLED bit.
2107 	 */
2108 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2109 
2110 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2111 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2112 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2113 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2114 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2115 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2116 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2117 	    DEVNAME(sc),
2118 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2119 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2120 }
2121 
2122 /*
2123  * Start up NIC's basic functionality after it has been reset
2124  * e.g. after platform boot or shutdown.
2125  * NOTE:  This does not load uCode nor start the embedded processor
2126  */
2127 int
2128 iwx_apm_init(struct iwx_softc *sc)
2129 {
2130 	int err = 0;
2131 
2132 	/*
2133 	 * Disable L0s without affecting L1;
2134 	 *  don't wait for ICH L0s (ICH bug W/A)
2135 	 */
2136 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2137 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2138 
2139 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2140 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2141 
2142 	/*
2143 	 * Enable HAP INTA (interrupt from management bus) to
2144 	 * wake device's PCI Express link L1a -> L0s
2145 	 */
2146 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2147 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2148 
2149 	iwx_apm_config(sc);
2150 
2151 	/*
2152 	 * Set "initialization complete" bit to move adapter from
2153 	 * D0U* --> D0A* (powered-up active) state.
2154 	 */
2155 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2156 
2157 	/*
2158 	 * Wait for clock stabilization; once stabilized, access to
2159 	 * device-internal resources is supported, e.g. iwx_write_prph()
2160 	 * and accesses to uCode SRAM.
2161 	 */
2162 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2163 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2164 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2165 		printf("%s: timeout waiting for clock stabilization\n",
2166 		    DEVNAME(sc));
2167 		err = ETIMEDOUT;
2168 		goto out;
2169 	}
2170  out:
2171 	if (err)
2172 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2173 	return err;
2174 }
2175 
2176 void
2177 iwx_apm_stop(struct iwx_softc *sc)
2178 {
2179 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2180 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2181 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2182 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2183 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2184 	DELAY(1000);
2185 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2186 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2187 	DELAY(5000);
2188 
2189 	/* stop device's busmaster DMA activity */
2190 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2191 
2192 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2193 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2194 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2195 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2196 
2197 	/*
2198 	 * Clear "initialization complete" bit to move adapter from
2199 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2200 	 */
2201 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2202 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2203 }
2204 
2205 void
2206 iwx_init_msix_hw(struct iwx_softc *sc)
2207 {
2208 	iwx_conf_msix_hw(sc, 0);
2209 
2210 	if (!sc->sc_msix)
2211 		return;
2212 
2213 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2214 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2215 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2216 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2217 }
2218 
2219 void
2220 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2221 {
2222 	int vector = 0;
2223 
2224 	if (!sc->sc_msix) {
2225 		/* Newer chips default to MSIX. */
2226 		if (!stopped && iwx_nic_lock(sc)) {
2227 			iwx_write_prph(sc, IWX_UREG_CHICK,
2228 			    IWX_UREG_CHICK_MSI_ENABLE);
2229 			iwx_nic_unlock(sc);
2230 		}
2231 		return;
2232 	}
2233 
2234 	if (!stopped && iwx_nic_lock(sc)) {
2235 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2236 		iwx_nic_unlock(sc);
2237 	}
2238 
2239 	/* Disable all interrupts */
2240 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2241 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2242 
2243 	/* Map fallback-queue (command/mgmt) to a single vector */
2244 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2245 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2246 	/* Map RSS queue (data) to the same vector */
2247 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2248 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2249 
2250 	/* Enable the RX queues cause interrupts */
2251 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2252 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2253 
2254 	/* Map non-RX causes to the same vector */
2255 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2256 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2257 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2258 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2259 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2260 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2261 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2262 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2263 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2264 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2265 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2266 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2267 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2268 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2269 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2270 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2271 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2272 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2273 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2274 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2275 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2276 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2277 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2278 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2279 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2280 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2281 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2282 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2283 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2284 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2285 
2286 	/* Enable non-RX causes interrupts */
2287 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2288 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2289 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2290 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2291 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2292 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2293 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2294 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2295 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2296 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2297 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2298 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2299 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2300 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2301 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2302 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2303 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2304 }
2305 
2306 int
2307 iwx_clear_persistence_bit(struct iwx_softc *sc)
2308 {
2309 	uint32_t hpm, wprot;
2310 
2311 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2312 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2313 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2314 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2315 			printf("%s: cannot clear persistence bit\n",
2316 			    DEVNAME(sc));
2317 			return EPERM;
2318 		}
2319 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2320 		    hpm & ~IWX_PERSISTENCE_BIT);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 int
2327 iwx_start_hw(struct iwx_softc *sc)
2328 {
2329 	int err;
2330 
2331 	err = iwx_prepare_card_hw(sc);
2332 	if (err)
2333 		return err;
2334 
2335 	err = iwx_clear_persistence_bit(sc);
2336 	if (err)
2337 		return err;
2338 
2339 	/* Reset the entire device */
2340 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2341 	DELAY(5000);
2342 
2343 	if (sc->sc_integrated) {
2344 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2345 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2346 		DELAY(20);
2347 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2348 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2349 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2350 			printf("%s: timeout waiting for clock stabilization\n",
2351 			    DEVNAME(sc));
2352 			return ETIMEDOUT;
2353 		}
2354 
2355 		err = iwx_force_power_gating(sc);
2356 		if (err)
2357 			return err;
2358 
2359 		/* Reset the entire device */
2360 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2361 		DELAY(5000);
2362 	}
2363 
2364 	err = iwx_apm_init(sc);
2365 	if (err)
2366 		return err;
2367 
2368 	iwx_init_msix_hw(sc);
2369 
2370 	iwx_enable_rfkill_int(sc);
2371 	iwx_check_rfkill(sc);
2372 
2373 	return 0;
2374 }
2375 
2376 void
2377 iwx_stop_device(struct iwx_softc *sc)
2378 {
2379 	struct ieee80211com *ic = &sc->sc_ic;
2380 	struct ieee80211_node *ni = ic->ic_bss;
2381 	int i;
2382 
2383 	iwx_disable_interrupts(sc);
2384 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2385 
2386 	iwx_disable_rx_dma(sc);
2387 	iwx_reset_rx_ring(sc, &sc->rxq);
2388 	for (i = 0; i < nitems(sc->txq); i++)
2389 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2390 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2391 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2392 		if (ba->ba_state != IEEE80211_BA_AGREED)
2393 			continue;
2394 		ieee80211_delba_request(ic, ni, 0, 1, i);
2395 	}
2396 
2397 	/* Make sure (redundant) we've released our request to stay awake */
2398 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2399 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2400 	if (sc->sc_nic_locks > 0)
2401 		printf("%s: %d active NIC locks forcefully cleared\n",
2402 		    DEVNAME(sc), sc->sc_nic_locks);
2403 	sc->sc_nic_locks = 0;
2404 
2405 	/* Stop the device, and put it in low power state */
2406 	iwx_apm_stop(sc);
2407 
2408 	/* Reset the on-board processor. */
2409 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2410 	DELAY(5000);
2411 
2412 	/*
2413 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2414 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2415 	 * that enables radio won't fire on the correct irq, and the
2416 	 * driver won't be able to handle the interrupt.
2417 	 * Configure the IVAR table again after reset.
2418 	 */
2419 	iwx_conf_msix_hw(sc, 1);
2420 
2421 	/*
2422 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2423 	 * Clear the interrupt again.
2424 	 */
2425 	iwx_disable_interrupts(sc);
2426 
2427 	/* Even though we stop the HW we still want the RF kill interrupt. */
2428 	iwx_enable_rfkill_int(sc);
2429 	iwx_check_rfkill(sc);
2430 
2431 	iwx_prepare_card_hw(sc);
2432 
2433 	iwx_ctxt_info_free_paging(sc);
2434 }
2435 
2436 void
2437 iwx_nic_config(struct iwx_softc *sc)
2438 {
2439 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2440 	uint32_t mask, val, reg_val = 0;
2441 
2442 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2443 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2444 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2445 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2446 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2447 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2448 
2449 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2450 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2451 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2452 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2453 
2454 	/* radio configuration */
2455 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2456 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2457 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2458 
2459 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2460 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2461 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2462 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2463 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2464 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2465 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2466 
2467 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2468 	val &= ~mask;
2469 	val |= reg_val;
2470 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2471 }
2472 
2473 int
2474 iwx_nic_rx_init(struct iwx_softc *sc)
2475 {
2476 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2477 
2478 	/*
2479 	 * We don't configure the RFH; the firmware will do that.
2480 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2481 	 */
2482 	return 0;
2483 }
2484 
2485 int
2486 iwx_nic_init(struct iwx_softc *sc)
2487 {
2488 	int err;
2489 
2490 	iwx_apm_init(sc);
2491 	iwx_nic_config(sc);
2492 
2493 	err = iwx_nic_rx_init(sc);
2494 	if (err)
2495 		return err;
2496 
2497 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2498 
2499 	return 0;
2500 }
2501 
2502 /* Map a TID to an ieee80211_edca_ac category. */
2503 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2504 	EDCA_AC_BE,
2505 	EDCA_AC_BK,
2506 	EDCA_AC_BK,
2507 	EDCA_AC_BE,
2508 	EDCA_AC_VI,
2509 	EDCA_AC_VI,
2510 	EDCA_AC_VO,
2511 	EDCA_AC_VO,
2512 };
2513 
2514 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2515 const uint8_t iwx_ac_to_tx_fifo[] = {
2516 	IWX_GEN2_EDCA_TX_FIFO_BE,
2517 	IWX_GEN2_EDCA_TX_FIFO_BK,
2518 	IWX_GEN2_EDCA_TX_FIFO_VI,
2519 	IWX_GEN2_EDCA_TX_FIFO_VO,
2520 };
2521 
2522 int
2523 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2524     int num_slots)
2525 {
2526 	struct iwx_tx_queue_cfg_cmd cmd;
2527 	struct iwx_rx_packet *pkt;
2528 	struct iwx_tx_queue_cfg_rsp *resp;
2529 	struct iwx_host_cmd hcmd = {
2530 		.id = IWX_SCD_QUEUE_CFG,
2531 		.flags = IWX_CMD_WANT_RESP,
2532 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2533 	};
2534 	struct iwx_tx_ring *ring = &sc->txq[qid];
2535 	int err, fwqid;
2536 	uint32_t wr_idx;
2537 	size_t resp_len;
2538 
2539 	iwx_reset_tx_ring(sc, ring);
2540 
2541 	memset(&cmd, 0, sizeof(cmd));
2542 	cmd.sta_id = sta_id;
2543 	cmd.tid = tid;
2544 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2545 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2546 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2547 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2548 
2549 	hcmd.data[0] = &cmd;
2550 	hcmd.len[0] = sizeof(cmd);
2551 
2552 	err = iwx_send_cmd(sc, &hcmd);
2553 	if (err)
2554 		return err;
2555 
2556 	pkt = hcmd.resp_pkt;
2557 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2558 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2559 		err = EIO;
2560 		goto out;
2561 	}
2562 
2563 	resp_len = iwx_rx_packet_payload_len(pkt);
2564 	if (resp_len != sizeof(*resp)) {
2565 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2566 		err = EIO;
2567 		goto out;
2568 	}
2569 
2570 	resp = (void *)pkt->data;
2571 	fwqid = le16toh(resp->queue_number);
2572 	wr_idx = le16toh(resp->write_pointer);
2573 
2574 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2575 	if (fwqid != qid) {
2576 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2577 		err = EIO;
2578 		goto out;
2579 	}
2580 
2581 	if (wr_idx != ring->cur) {
2582 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2583 		err = EIO;
2584 		goto out;
2585 	}
2586 
2587 	sc->qenablemsk |= (1 << qid);
2588 	ring->tid = tid;
2589 out:
2590 	iwx_free_resp(sc, &hcmd);
2591 	return err;
2592 }
2593 
2594 void
2595 iwx_post_alive(struct iwx_softc *sc)
2596 {
2597 	iwx_ict_reset(sc);
2598 }
2599 
2600 /*
2601  * For the high priority TE use a time event type that has similar priority to
2602  * the FW's action scan priority.
2603  */
2604 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2605 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2606 
2607 int
2608 iwx_send_time_event_cmd(struct iwx_softc *sc,
2609     const struct iwx_time_event_cmd *cmd)
2610 {
2611 	struct iwx_rx_packet *pkt;
2612 	struct iwx_time_event_resp *resp;
2613 	struct iwx_host_cmd hcmd = {
2614 		.id = IWX_TIME_EVENT_CMD,
2615 		.flags = IWX_CMD_WANT_RESP,
2616 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2617 	};
2618 	uint32_t resp_len;
2619 	int err;
2620 
2621 	hcmd.data[0] = cmd;
2622 	hcmd.len[0] = sizeof(*cmd);
2623 	err = iwx_send_cmd(sc, &hcmd);
2624 	if (err)
2625 		return err;
2626 
2627 	pkt = hcmd.resp_pkt;
2628 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2629 		err = EIO;
2630 		goto out;
2631 	}
2632 
2633 	resp_len = iwx_rx_packet_payload_len(pkt);
2634 	if (resp_len != sizeof(*resp)) {
2635 		err = EIO;
2636 		goto out;
2637 	}
2638 
2639 	resp = (void *)pkt->data;
2640 	if (le32toh(resp->status) == 0)
2641 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2642 	else
2643 		err = EIO;
2644 out:
2645 	iwx_free_resp(sc, &hcmd);
2646 	return err;
2647 }
2648 
2649 int
2650 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2651     uint32_t duration)
2652 {
2653 	struct iwx_session_prot_cmd cmd = {
2654 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2655 		    in->in_color)),
2656 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2657 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2658 		.duration_tu = htole32(duration * IEEE80211_DUR_TU),
2659 	};
2660 	uint32_t cmd_id;
2661 
2662 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2663 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2664 }
2665 
2666 /*
2667  * NVM read access and content parsing.  We do not support
2668  * external NVM or writing NVM.
2669  */
2670 
2671 uint8_t
2672 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2673 {
2674 	uint8_t tx_ant;
2675 
2676 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2677 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2678 
2679 	if (sc->sc_nvm.valid_tx_ant)
2680 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2681 
2682 	return tx_ant;
2683 }
2684 
2685 uint8_t
2686 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2687 {
2688 	uint8_t rx_ant;
2689 
2690 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2691 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2692 
2693 	if (sc->sc_nvm.valid_rx_ant)
2694 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2695 
2696 	return rx_ant;
2697 }
2698 
2699 void
2700 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2701     uint32_t *channel_profile_v4, int nchan_profile)
2702 {
2703 	struct ieee80211com *ic = &sc->sc_ic;
2704 	struct iwx_nvm_data *data = &sc->sc_nvm;
2705 	int ch_idx;
2706 	struct ieee80211_channel *channel;
2707 	uint32_t ch_flags;
2708 	int is_5ghz;
2709 	int flags, hw_value;
2710 	int nchan;
2711 	const uint8_t *nvm_channels;
2712 
2713 	if (sc->sc_uhb_supported) {
2714 		nchan = nitems(iwx_nvm_channels_uhb);
2715 		nvm_channels = iwx_nvm_channels_uhb;
2716 	} else {
2717 		nchan = nitems(iwx_nvm_channels_8000);
2718 		nvm_channels = iwx_nvm_channels_8000;
2719 	}
2720 
2721 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2722 		if (channel_profile_v4)
2723 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
2724 		else
2725 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
2726 
2727 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2728 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2729 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2730 
2731 		hw_value = nvm_channels[ch_idx];
2732 		channel = &ic->ic_channels[hw_value];
2733 
2734 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
2735 			channel->ic_freq = 0;
2736 			channel->ic_flags = 0;
2737 			continue;
2738 		}
2739 
2740 		if (!is_5ghz) {
2741 			flags = IEEE80211_CHAN_2GHZ;
2742 			channel->ic_flags
2743 			    = IEEE80211_CHAN_CCK
2744 			    | IEEE80211_CHAN_OFDM
2745 			    | IEEE80211_CHAN_DYN
2746 			    | IEEE80211_CHAN_2GHZ;
2747 		} else {
2748 			flags = IEEE80211_CHAN_5GHZ;
2749 			channel->ic_flags =
2750 			    IEEE80211_CHAN_A;
2751 		}
2752 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2753 
2754 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2755 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2756 
2757 		if (data->sku_cap_11n_enable) {
2758 			channel->ic_flags |= IEEE80211_CHAN_HT;
2759 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
2760 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
2761 		}
2762 	}
2763 }
2764 
2765 int
2766 iwx_mimo_enabled(struct iwx_softc *sc)
2767 {
2768 	struct ieee80211com *ic = &sc->sc_ic;
2769 
2770 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2771 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2772 }
2773 
2774 void
2775 iwx_setup_ht_rates(struct iwx_softc *sc)
2776 {
2777 	struct ieee80211com *ic = &sc->sc_ic;
2778 	uint8_t rx_ant;
2779 
2780 	/* TX is supported with the same MCS as RX. */
2781 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2782 
2783 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2784 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2785 
2786 	if (!iwx_mimo_enabled(sc))
2787 		return;
2788 
2789 	rx_ant = iwx_fw_valid_rx_ant(sc);
2790 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2791 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2792 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2793 }
2794 
2795 void
2796 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
2797     uint16_t ssn, uint16_t buf_size)
2798 {
2799 	reorder_buf->head_sn = ssn;
2800 	reorder_buf->num_stored = 0;
2801 	reorder_buf->buf_size = buf_size;
2802 	reorder_buf->last_amsdu = 0;
2803 	reorder_buf->last_sub_index = 0;
2804 	reorder_buf->removed = 0;
2805 	reorder_buf->valid = 0;
2806 	reorder_buf->consec_oldsn_drops = 0;
2807 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2808 	reorder_buf->consec_oldsn_prev_drop = 0;
2809 }
2810 
2811 void
2812 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
2813 {
2814 	int i;
2815 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2816 	struct iwx_reorder_buf_entry *entry;
2817 
2818 	for (i = 0; i < reorder_buf->buf_size; i++) {
2819 		entry = &rxba->entries[i];
2820 		ml_purge(&entry->frames);
2821 		timerclear(&entry->reorder_time);
2822 	}
2823 
2824 	reorder_buf->removed = 1;
2825 	timeout_del(&reorder_buf->reorder_timer);
2826 	timerclear(&rxba->last_rx);
2827 	timeout_del(&rxba->session_timer);
2828 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
2829 }
2830 
2831 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
2832 
2833 void
2834 iwx_rx_ba_session_expired(void *arg)
2835 {
2836 	struct iwx_rxba_data *rxba = arg;
2837 	struct iwx_softc *sc = rxba->sc;
2838 	struct ieee80211com *ic = &sc->sc_ic;
2839 	struct ieee80211_node *ni = ic->ic_bss;
2840 	struct timeval now, timeout, expiry;
2841 	int s;
2842 
2843 	s = splnet();
2844 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
2845 	    ic->ic_state == IEEE80211_S_RUN &&
2846 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
2847 		getmicrouptime(&now);
2848 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2849 		timeradd(&rxba->last_rx, &timeout, &expiry);
2850 		if (timercmp(&now, &expiry, <)) {
2851 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
2852 		} else {
2853 			ic->ic_stats.is_ht_rx_ba_timeout++;
2854 			ieee80211_delba_request(ic, ni,
2855 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2856 		}
2857 	}
2858 	splx(s);
2859 }
2860 
2861 void
2862 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
2863     struct iwx_rx_data *data, struct mbuf_list *ml)
2864 {
2865 	struct ieee80211com *ic = &sc->sc_ic;
2866 	struct ieee80211_node *ni = ic->ic_bss;
2867 	struct iwx_bar_frame_release *release = (void *)data;
2868 	struct iwx_reorder_buffer *buf;
2869 	struct iwx_rxba_data *rxba;
2870 	unsigned int baid, nssn, sta_id, tid;
2871 
2872 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
2873 		return;
2874 
2875 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
2876 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
2877 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
2878 	    baid >= nitems(sc->sc_rxba_data))
2879 		return;
2880 
2881 	rxba = &sc->sc_rxba_data[baid];
2882 	if (rxba == NULL || rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
2883 		return;
2884 
2885 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
2886 	sta_id = (le32toh(release->sta_tid) &
2887 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
2888 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
2889 		return;
2890 
2891 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
2892 	buf = &rxba->reorder_buf;
2893 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
2894 }
2895 
2896 void
2897 iwx_reorder_timer_expired(void *arg)
2898 {
2899 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2900 	struct iwx_reorder_buffer *buf = arg;
2901 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
2902 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
2903 	struct iwx_softc *sc = rxba->sc;
2904 	struct ieee80211com *ic = &sc->sc_ic;
2905 	struct ieee80211_node *ni = ic->ic_bss;
2906 	int i, s;
2907 	uint16_t sn = 0, index = 0;
2908 	int expired = 0;
2909 	int cont = 0;
2910 	struct timeval now, timeout, expiry;
2911 
2912 	if (!buf->num_stored || buf->removed)
2913 		return;
2914 
2915 	s = splnet();
2916 	getmicrouptime(&now);
2917 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2918 
2919 	for (i = 0; i < buf->buf_size ; i++) {
2920 		index = (buf->head_sn + i) % buf->buf_size;
2921 
2922 		if (ml_empty(&entries[index].frames)) {
2923 			/*
2924 			 * If there is a hole and the next frame didn't expire
2925 			 * we want to break and not advance SN.
2926 			 */
2927 			cont = 0;
2928 			continue;
2929 		}
2930 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
2931 		if (!cont && timercmp(&now, &expiry, <))
2932 			break;
2933 
2934 		expired = 1;
2935 		/* continue until next hole after this expired frame */
2936 		cont = 1;
2937 		sn = (buf->head_sn + (i + 1)) & 0xfff;
2938 	}
2939 
2940 	if (expired) {
2941 		/* SN is set to the last expired frame + 1 */
2942 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
2943 		if_input(&sc->sc_ic.ic_if, &ml);
2944 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
2945 	} else {
2946 		/*
2947 		 * If no frame expired and there are stored frames, index is now
2948 		 * pointing to the first unexpired frame - modify reorder timeout
2949 		 * accordingly.
2950 		 */
2951 		timeout_add_usec(&buf->reorder_timer,
2952 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
2953 	}
2954 
2955 	splx(s);
2956 }
2957 
2958 #define IWX_MAX_RX_BA_SESSIONS 16
2959 
2960 void
2961 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2962     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
2963 {
2964 	struct ieee80211com *ic = &sc->sc_ic;
2965 	struct iwx_add_sta_cmd cmd;
2966 	struct iwx_node *in = (void *)ni;
2967 	int err, s;
2968 	uint32_t status;
2969 	struct iwx_rxba_data *rxba = NULL;
2970 	uint8_t baid = 0;
2971 
2972 	s = splnet();
2973 
2974 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
2975 		ieee80211_addba_req_refuse(ic, ni, tid);
2976 		splx(s);
2977 		return;
2978 	}
2979 
2980 	memset(&cmd, 0, sizeof(cmd));
2981 
2982 	cmd.sta_id = IWX_STATION_ID;
2983 	cmd.mac_id_n_color
2984 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2985 	cmd.add_modify = IWX_STA_MODE_MODIFY;
2986 
2987 	if (start) {
2988 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2989 		cmd.add_immediate_ba_ssn = htole16(ssn);
2990 		cmd.rx_ba_window = htole16(winsize);
2991 	} else {
2992 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2993 	}
2994 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
2995 	    IWX_STA_MODIFY_REMOVE_BA_TID;
2996 
2997 	status = IWX_ADD_STA_SUCCESS;
2998 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
2999 	    &status);
3000 
3001 	if (err || (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) {
3002 		if (start)
3003 			ieee80211_addba_req_refuse(ic, ni, tid);
3004 		splx(s);
3005 		return;
3006 	}
3007 
3008 	/* Deaggregation is done in hardware. */
3009 	if (start) {
3010 		if (!(status & IWX_ADD_STA_BAID_VALID_MASK)) {
3011 			ieee80211_addba_req_refuse(ic, ni, tid);
3012 			splx(s);
3013 			return;
3014 		}
3015 		baid = (status & IWX_ADD_STA_BAID_MASK) >>
3016 		    IWX_ADD_STA_BAID_SHIFT;
3017 		if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3018 		    baid >= nitems(sc->sc_rxba_data)) {
3019 			ieee80211_addba_req_refuse(ic, ni, tid);
3020 			splx(s);
3021 			return;
3022 		}
3023 		rxba = &sc->sc_rxba_data[baid];
3024 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3025 			ieee80211_addba_req_refuse(ic, ni, tid);
3026 			splx(s);
3027 			return;
3028 		}
3029 		rxba->sta_id = IWX_STATION_ID;
3030 		rxba->tid = tid;
3031 		rxba->baid = baid;
3032 		rxba->timeout = timeout_val;
3033 		getmicrouptime(&rxba->last_rx);
3034 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3035 		    winsize);
3036 		if (timeout_val != 0) {
3037 			struct ieee80211_rx_ba *ba;
3038 			timeout_add_usec(&rxba->session_timer,
3039 			    timeout_val);
3040 			/* XXX disable net80211's BA timeout handler */
3041 			ba = &ni->ni_rx_ba[tid];
3042 			ba->ba_timeout_val = 0;
3043 		}
3044 	} else {
3045 		int i;
3046 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3047 			rxba = &sc->sc_rxba_data[i];
3048 			if (rxba->baid ==
3049 			    IWX_RX_REORDER_DATA_INVALID_BAID)
3050 				continue;
3051 			if (rxba->tid != tid)
3052 				continue;
3053 			iwx_clear_reorder_buffer(sc, rxba);
3054 			break;
3055 		}
3056 	}
3057 
3058 	if (start) {
3059 		sc->sc_rx_ba_sessions++;
3060 		ieee80211_addba_req_accept(ic, ni, tid);
3061 	} else if (sc->sc_rx_ba_sessions > 0)
3062 		sc->sc_rx_ba_sessions--;
3063 
3064 	splx(s);
3065 }
3066 
3067 void
3068 iwx_mac_ctxt_task(void *arg)
3069 {
3070 	struct iwx_softc *sc = arg;
3071 	struct ieee80211com *ic = &sc->sc_ic;
3072 	struct iwx_node *in = (void *)ic->ic_bss;
3073 	int err, s = splnet();
3074 
3075 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3076 	    ic->ic_state != IEEE80211_S_RUN) {
3077 		refcnt_rele_wake(&sc->task_refs);
3078 		splx(s);
3079 		return;
3080 	}
3081 
3082 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3083 	if (err)
3084 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3085 
3086 	refcnt_rele_wake(&sc->task_refs);
3087 	splx(s);
3088 }
3089 
3090 void
3091 iwx_phy_ctxt_task(void *arg)
3092 {
3093 	struct iwx_softc *sc = arg;
3094 	struct ieee80211com *ic = &sc->sc_ic;
3095 	struct iwx_node *in = (void *)ic->ic_bss;
3096 	struct ieee80211_node *ni = &in->in_ni;
3097 	uint8_t chains, sco;
3098 	int err, s = splnet();
3099 
3100 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3101 	    ic->ic_state != IEEE80211_S_RUN ||
3102 	    in->in_phyctxt == NULL) {
3103 		refcnt_rele_wake(&sc->task_refs);
3104 		splx(s);
3105 		return;
3106 	}
3107 
3108 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3109 	if (ieee80211_node_supports_ht_chan40(ni))
3110 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3111 	else
3112 		sco = IEEE80211_HTOP0_SCO_SCN;
3113 	if (in->in_phyctxt->sco != sco) {
3114 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3115 		    in->in_phyctxt->channel, chains, chains, 0, sco);
3116 		if (err)
3117 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3118 	}
3119 
3120 	refcnt_rele_wake(&sc->task_refs);
3121 	splx(s);
3122 }
3123 
3124 void
3125 iwx_updatechan(struct ieee80211com *ic)
3126 {
3127 	struct iwx_softc *sc = ic->ic_softc;
3128 
3129 	if (ic->ic_state == IEEE80211_S_RUN &&
3130 	    !task_pending(&sc->newstate_task))
3131 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3132 }
3133 
3134 void
3135 iwx_updateprot(struct ieee80211com *ic)
3136 {
3137 	struct iwx_softc *sc = ic->ic_softc;
3138 
3139 	if (ic->ic_state == IEEE80211_S_RUN &&
3140 	    !task_pending(&sc->newstate_task))
3141 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3142 }
3143 
3144 void
3145 iwx_updateslot(struct ieee80211com *ic)
3146 {
3147 	struct iwx_softc *sc = ic->ic_softc;
3148 
3149 	if (ic->ic_state == IEEE80211_S_RUN &&
3150 	    !task_pending(&sc->newstate_task))
3151 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3152 }
3153 
3154 void
3155 iwx_updateedca(struct ieee80211com *ic)
3156 {
3157 	struct iwx_softc *sc = ic->ic_softc;
3158 
3159 	if (ic->ic_state == IEEE80211_S_RUN &&
3160 	    !task_pending(&sc->newstate_task))
3161 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3162 }
3163 
3164 void
3165 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3166     uint8_t tid)
3167 {
3168 	struct ieee80211com *ic = &sc->sc_ic;
3169 	struct ieee80211_tx_ba *ba;
3170 	int err, qid;
3171 	struct iwx_tx_ring *ring;
3172 
3173 	/* Ensure we can map this TID to an aggregation queue. */
3174 	if (tid >= IWX_MAX_TID_COUNT)
3175 		return;
3176 
3177 	ba = &ni->ni_tx_ba[tid];
3178 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3179 		return;
3180 
3181 	qid = sc->aggqid[tid];
3182 	if (qid == 0) {
3183 		/* Firmware should pick the next unused Tx queue. */
3184 		qid = fls(sc->qenablemsk);
3185 	}
3186 
3187 	/*
3188 	 * Simply enable the queue.
3189 	 * Firmware handles Tx Ba session setup and teardown.
3190 	 */
3191 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3192 		if (!iwx_nic_lock(sc)) {
3193 			ieee80211_addba_resp_refuse(ic, ni, tid,
3194 			    IEEE80211_STATUS_UNSPECIFIED);
3195 			return;
3196 		}
3197 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3198 		    IWX_TX_RING_COUNT);
3199 		iwx_nic_unlock(sc);
3200 		if (err) {
3201 			printf("%s: could not enable Tx queue %d "
3202 			    "(error %d)\n", DEVNAME(sc), qid, err);
3203 			ieee80211_addba_resp_refuse(ic, ni, tid,
3204 			    IEEE80211_STATUS_UNSPECIFIED);
3205 			return;
3206 		}
3207 
3208 		ba->ba_winstart = 0;
3209 	} else
3210 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3211 
3212 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3213 
3214 	ring = &sc->txq[qid];
3215 	ba->ba_timeout_val = 0;
3216 	ieee80211_addba_resp_accept(ic, ni, tid);
3217 	sc->aggqid[tid] = qid;
3218 }
3219 
3220 void
3221 iwx_ba_task(void *arg)
3222 {
3223 	struct iwx_softc *sc = arg;
3224 	struct ieee80211com *ic = &sc->sc_ic;
3225 	struct ieee80211_node *ni = ic->ic_bss;
3226 	int s = splnet();
3227 	int tid;
3228 
3229 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3230 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3231 			break;
3232 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3233 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3234 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3235 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3236 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3237 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3238 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3239 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3240 		}
3241 	}
3242 
3243 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3244 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3245 			break;
3246 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3247 			iwx_sta_tx_agg_start(sc, ni, tid);
3248 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3249 		}
3250 	}
3251 
3252 	refcnt_rele_wake(&sc->task_refs);
3253 	splx(s);
3254 }
3255 
3256 /*
3257  * This function is called by upper layer when an ADDBA request is received
3258  * from another STA and before the ADDBA response is sent.
3259  */
3260 int
3261 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3262     uint8_t tid)
3263 {
3264 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3265 
3266 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3267 	    tid > IWX_MAX_TID_COUNT)
3268 		return ENOSPC;
3269 
3270 	if (sc->ba_rx.start_tidmask & (1 << tid))
3271 		return EBUSY;
3272 
3273 	sc->ba_rx.start_tidmask |= (1 << tid);
3274 	iwx_add_task(sc, systq, &sc->ba_task);
3275 
3276 	return EBUSY;
3277 }
3278 
3279 /*
3280  * This function is called by upper layer on teardown of an HT-immediate
3281  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3282  */
3283 void
3284 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3285     uint8_t tid)
3286 {
3287 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3288 
3289 	if (tid > IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3290 		return;
3291 
3292 	sc->ba_rx.stop_tidmask = (1 << tid);
3293 	iwx_add_task(sc, systq, &sc->ba_task);
3294 }
3295 
3296 int
3297 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3298     uint8_t tid)
3299 {
3300 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3301 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3302 
3303 	/*
3304 	 * Require a firmware version which uses an internal AUX queue.
3305 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3306 	 */
3307 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3308 		return ENOTSUP;
3309 
3310 	/* Ensure we can map this TID to an aggregation queue. */
3311 	if (tid >= IWX_MAX_TID_COUNT)
3312 		return EINVAL;
3313 
3314 	/* We only support a fixed Tx aggregation window size, for now. */
3315 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3316 		return ENOTSUP;
3317 
3318 	/* Is firmware already using an agg queue with this TID? */
3319 	if (sc->aggqid[tid] != 0)
3320 		return ENOSPC;
3321 
3322 	/* Are we already processing an ADDBA request? */
3323 	if (sc->ba_tx.start_tidmask & (1 << tid))
3324 		return EBUSY;
3325 
3326 	sc->ba_tx.start_tidmask |= (1 << tid);
3327 	iwx_add_task(sc, systq, &sc->ba_task);
3328 
3329 	return EBUSY;
3330 }
3331 
3332 /* Read the mac address from WFMP registers. */
3333 int
3334 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3335 {
3336 	const uint8_t *hw_addr;
3337 	uint32_t mac_addr0, mac_addr1;
3338 
3339 	if (!iwx_nic_lock(sc))
3340 		return EBUSY;
3341 
3342 	mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
3343 	mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
3344 
3345 	hw_addr = (const uint8_t *)&mac_addr0;
3346 	data->hw_addr[0] = hw_addr[3];
3347 	data->hw_addr[1] = hw_addr[2];
3348 	data->hw_addr[2] = hw_addr[1];
3349 	data->hw_addr[3] = hw_addr[0];
3350 
3351 	hw_addr = (const uint8_t *)&mac_addr1;
3352 	data->hw_addr[4] = hw_addr[1];
3353 	data->hw_addr[5] = hw_addr[0];
3354 
3355 	iwx_nic_unlock(sc);
3356 	return 0;
3357 }
3358 
3359 int
3360 iwx_is_valid_mac_addr(const uint8_t *addr)
3361 {
3362 	static const uint8_t reserved_mac[] = {
3363 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3364 	};
3365 
3366 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3367 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3368 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3369 	    !ETHER_IS_MULTICAST(addr));
3370 }
3371 
3372 int
3373 iwx_nvm_get(struct iwx_softc *sc)
3374 {
3375 	struct iwx_nvm_get_info cmd = {};
3376 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3377 	struct iwx_host_cmd hcmd = {
3378 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3379 		.data = { &cmd, },
3380 		.len = { sizeof(cmd) },
3381 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3382 		    IWX_NVM_GET_INFO)
3383 	};
3384 	int err;
3385 	uint32_t mac_flags;
3386 	/*
3387 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3388 	 * in v3, except for the channel profile part of the
3389 	 * regulatory.  So we can just access the new struct, with the
3390 	 * exception of the latter.
3391 	 */
3392 	struct iwx_nvm_get_info_rsp *rsp;
3393 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3394 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3395 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3396 
3397 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3398 	err = iwx_send_cmd(sc, &hcmd);
3399 	if (err)
3400 		return err;
3401 
3402 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3403 		err = EIO;
3404 		goto out;
3405 	}
3406 
3407 	memset(nvm, 0, sizeof(*nvm));
3408 
3409 	iwx_set_mac_addr_from_csr(sc, nvm);
3410 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3411 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3412 		err = EINVAL;
3413 		goto out;
3414 	}
3415 
3416 	rsp = (void *)hcmd.resp_pkt->data;
3417 
3418 	/* Initialize general data */
3419 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3420 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3421 
3422 	/* Initialize MAC sku data */
3423 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3424 	nvm->sku_cap_11ac_enable =
3425 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3426 	nvm->sku_cap_11n_enable =
3427 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3428 	nvm->sku_cap_11ax_enable =
3429 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3430 	nvm->sku_cap_band_24GHz_enable =
3431 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3432 	nvm->sku_cap_band_52GHz_enable =
3433 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3434 	nvm->sku_cap_mimo_disable =
3435 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3436 
3437 	/* Initialize PHY sku data */
3438 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3439 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3440 
3441 	if (le32toh(rsp->regulatory.lar_enabled) &&
3442 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3443 		nvm->lar_enabled = 1;
3444 	}
3445 
3446 	if (v4) {
3447 		iwx_init_channel_map(sc, NULL,
3448 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3449 	} else {
3450 		rsp_v3 = (void *)rsp;
3451 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3452 		    NULL, IWX_NUM_CHANNELS_V1);
3453 	}
3454 out:
3455 	iwx_free_resp(sc, &hcmd);
3456 	return err;
3457 }
3458 
3459 int
3460 iwx_load_firmware(struct iwx_softc *sc)
3461 {
3462 	struct iwx_fw_sects *fws;
3463 	int err;
3464 
3465 	splassert(IPL_NET);
3466 
3467 	sc->sc_uc.uc_intr = 0;
3468 	sc->sc_uc.uc_ok = 0;
3469 
3470 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3471 	err = iwx_ctxt_info_init(sc, fws);
3472 	if (err) {
3473 		printf("%s: could not init context info\n", DEVNAME(sc));
3474 		return err;
3475 	}
3476 
3477 	/* wait for the firmware to load */
3478 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3479 	if (err || !sc->sc_uc.uc_ok) {
3480 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
3481 		iwx_ctxt_info_free_paging(sc);
3482 	}
3483 
3484 	iwx_ctxt_info_free_fw_img(sc);
3485 
3486 	if (!sc->sc_uc.uc_ok)
3487 		return EINVAL;
3488 
3489 	return err;
3490 }
3491 
3492 int
3493 iwx_start_fw(struct iwx_softc *sc)
3494 {
3495 	int err;
3496 
3497 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3498 
3499 	iwx_disable_interrupts(sc);
3500 
3501 	/* make sure rfkill handshake bits are cleared */
3502 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3503 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3504 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3505 
3506 	/* clear (again), then enable firwmare load interrupt */
3507 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3508 
3509 	err = iwx_nic_init(sc);
3510 	if (err) {
3511 		printf("%s: unable to init nic\n", DEVNAME(sc));
3512 		return err;
3513 	}
3514 
3515 	iwx_enable_fwload_interrupt(sc);
3516 
3517 	return iwx_load_firmware(sc);
3518 }
3519 
3520 int
3521 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3522 {
3523 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3524 		.valid = htole32(valid_tx_ant),
3525 	};
3526 
3527 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3528 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3529 }
3530 
3531 int
3532 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3533 {
3534 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3535 
3536 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3537 	phy_cfg_cmd.calib_control.event_trigger =
3538 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3539 	phy_cfg_cmd.calib_control.flow_trigger =
3540 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3541 
3542 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3543 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3544 }
3545 
3546 int
3547 iwx_send_dqa_cmd(struct iwx_softc *sc)
3548 {
3549 	struct iwx_dqa_enable_cmd dqa_cmd = {
3550 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3551 	};
3552 	uint32_t cmd_id;
3553 
3554 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3555 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3556 }
3557 
3558 int
3559 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3560 {
3561 	int err;
3562 
3563 	err = iwx_read_firmware(sc);
3564 	if (err)
3565 		return err;
3566 
3567 	err = iwx_start_fw(sc);
3568 	if (err)
3569 		return err;
3570 
3571 	iwx_post_alive(sc);
3572 
3573 	return 0;
3574 }
3575 
3576 int
3577 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3578 {
3579 	const int wait_flags = IWX_INIT_COMPLETE;
3580 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3581 	struct iwx_init_extended_cfg_cmd init_cfg = {
3582 		.init_flags = htole32(IWX_INIT_NVM),
3583 	};
3584 	int err, s;
3585 
3586 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3587 		printf("%s: radio is disabled by hardware switch\n",
3588 		    DEVNAME(sc));
3589 		return EPERM;
3590 	}
3591 
3592 	s = splnet();
3593 	sc->sc_init_complete = 0;
3594 	err = iwx_load_ucode_wait_alive(sc);
3595 	if (err) {
3596 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3597 		splx(s);
3598 		return err;
3599 	}
3600 
3601 	/*
3602 	 * Send init config command to mark that we are sending NVM
3603 	 * access commands
3604 	 */
3605 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3606 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3607 	if (err) {
3608 		splx(s);
3609 		return err;
3610 	}
3611 
3612 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3613 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3614 	if (err) {
3615 		splx(s);
3616 		return err;
3617 	}
3618 
3619 	/* Wait for the init complete notification from the firmware. */
3620 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3621 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3622 		    SEC_TO_NSEC(2));
3623 		if (err) {
3624 			splx(s);
3625 			return err;
3626 		}
3627 	}
3628 	splx(s);
3629 	if (readnvm) {
3630 		err = iwx_nvm_get(sc);
3631 		if (err) {
3632 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3633 			return err;
3634 		}
3635 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3636 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3637 			    sc->sc_nvm.hw_addr);
3638 
3639 	}
3640 	return 0;
3641 }
3642 
3643 int
3644 iwx_config_ltr(struct iwx_softc *sc)
3645 {
3646 	struct iwx_ltr_config_cmd cmd = {
3647 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3648 	};
3649 
3650 	if (!sc->sc_ltr_enabled)
3651 		return 0;
3652 
3653 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3654 }
3655 
3656 void
3657 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3658 {
3659 	struct iwx_rx_data *data = &ring->data[idx];
3660 
3661 	((uint64_t *)ring->desc)[idx] =
3662 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3663 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3664 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3665 	    BUS_DMASYNC_PREWRITE);
3666 }
3667 
3668 int
3669 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3670 {
3671 	struct iwx_rx_ring *ring = &sc->rxq;
3672 	struct iwx_rx_data *data = &ring->data[idx];
3673 	struct mbuf *m;
3674 	int err;
3675 	int fatal = 0;
3676 
3677 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3678 	if (m == NULL)
3679 		return ENOBUFS;
3680 
3681 	if (size <= MCLBYTES) {
3682 		MCLGET(m, M_DONTWAIT);
3683 	} else {
3684 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
3685 	}
3686 	if ((m->m_flags & M_EXT) == 0) {
3687 		m_freem(m);
3688 		return ENOBUFS;
3689 	}
3690 
3691 	if (data->m != NULL) {
3692 		bus_dmamap_unload(sc->sc_dmat, data->map);
3693 		fatal = 1;
3694 	}
3695 
3696 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3697 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3698 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3699 	if (err) {
3700 		/* XXX */
3701 		if (fatal)
3702 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3703 		m_freem(m);
3704 		return err;
3705 	}
3706 	data->m = m;
3707 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3708 
3709 	/* Update RX descriptor. */
3710 	iwx_update_rx_desc(sc, ring, idx);
3711 
3712 	return 0;
3713 }
3714 
3715 int
3716 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3717     struct iwx_rx_mpdu_desc *desc)
3718 {
3719 	int energy_a, energy_b;
3720 
3721 	energy_a = desc->v1.energy_a;
3722 	energy_b = desc->v1.energy_b;
3723 	energy_a = energy_a ? -energy_a : -256;
3724 	energy_b = energy_b ? -energy_b : -256;
3725 	return MAX(energy_a, energy_b);
3726 }
3727 
3728 void
3729 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3730     struct iwx_rx_data *data)
3731 {
3732 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3733 
3734 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3735 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3736 
3737 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3738 }
3739 
3740 /*
3741  * Retrieve the average noise (in dBm) among receivers.
3742  */
3743 int
3744 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3745 {
3746 	int i, total, nbant, noise;
3747 
3748 	total = nbant = noise = 0;
3749 	for (i = 0; i < 3; i++) {
3750 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3751 		if (noise) {
3752 			total += noise;
3753 			nbant++;
3754 		}
3755 	}
3756 
3757 	/* There should be at least one antenna but check anyway. */
3758 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3759 }
3760 
3761 int
3762 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3763     struct ieee80211_rxinfo *rxi)
3764 {
3765 	struct ieee80211com *ic = &sc->sc_ic;
3766 	struct ieee80211_key *k;
3767 	struct ieee80211_frame *wh;
3768 	uint64_t pn, *prsc;
3769 	uint8_t *ivp;
3770 	uint8_t tid;
3771 	int hdrlen, hasqos;
3772 
3773 	wh = mtod(m, struct ieee80211_frame *);
3774 	hdrlen = ieee80211_get_hdrlen(wh);
3775 	ivp = (uint8_t *)wh + hdrlen;
3776 
3777 	/* find key for decryption */
3778 	k = ieee80211_get_rxkey(ic, m, ni);
3779 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
3780 		return 1;
3781 
3782 	/* Check that ExtIV bit is be set. */
3783 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3784 		return 1;
3785 
3786 	hasqos = ieee80211_has_qos(wh);
3787 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3788 	prsc = &k->k_rsc[tid];
3789 
3790 	/* Extract the 48-bit PN from the CCMP header. */
3791 	pn = (uint64_t)ivp[0]       |
3792 	     (uint64_t)ivp[1] <<  8 |
3793 	     (uint64_t)ivp[4] << 16 |
3794 	     (uint64_t)ivp[5] << 24 |
3795 	     (uint64_t)ivp[6] << 32 |
3796 	     (uint64_t)ivp[7] << 40;
3797 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
3798 		if (pn < *prsc) {
3799 			ic->ic_stats.is_ccmp_replays++;
3800 			return 1;
3801 		}
3802 	} else if (pn <= *prsc) {
3803 		ic->ic_stats.is_ccmp_replays++;
3804 		return 1;
3805 	}
3806 	/* Last seen packet number is updated in ieee80211_inputm(). */
3807 
3808 	/*
3809 	 * Some firmware versions strip the MIC, and some don't. It is not
3810 	 * clear which of the capability flags could tell us what to expect.
3811 	 * For now, keep things simple and just leave the MIC in place if
3812 	 * it is present.
3813 	 *
3814 	 * The IV will be stripped by ieee80211_inputm().
3815 	 */
3816 	return 0;
3817 }
3818 
3819 int
3820 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
3821     struct ieee80211_rxinfo *rxi)
3822 {
3823 	struct ieee80211com *ic = &sc->sc_ic;
3824 	struct ifnet *ifp = IC2IFP(ic);
3825 	struct ieee80211_frame *wh;
3826 	struct ieee80211_node *ni;
3827 	int ret = 0;
3828 	uint8_t type, subtype;
3829 
3830 	wh = mtod(m, struct ieee80211_frame *);
3831 
3832 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3833 	if (type == IEEE80211_FC0_TYPE_CTL)
3834 		return 0;
3835 
3836 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3837 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
3838 		return 0;
3839 
3840 	ni = ieee80211_find_rxnode(ic, wh);
3841 	/* Handle hardware decryption. */
3842 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3843 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3844 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3845 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3846 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
3847 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3848 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
3849 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3850 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3851 			ic->ic_stats.is_ccmp_dec_errs++;
3852 			ret = 1;
3853 			goto out;
3854 		}
3855 		/* Check whether decryption was successful or not. */
3856 		if ((rx_pkt_status &
3857 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3858 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
3859 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3860 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
3861 			ic->ic_stats.is_ccmp_dec_errs++;
3862 			ret = 1;
3863 			goto out;
3864 		}
3865 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
3866 	}
3867 out:
3868 	if (ret)
3869 		ifp->if_ierrors++;
3870 	ieee80211_release_node(ic, ni);
3871 	return ret;
3872 }
3873 
3874 void
3875 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3876     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3877     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3878     struct mbuf_list *ml)
3879 {
3880 	struct ieee80211com *ic = &sc->sc_ic;
3881 	struct ifnet *ifp = IC2IFP(ic);
3882 	struct ieee80211_frame *wh;
3883 	struct ieee80211_node *ni;
3884 	struct ieee80211_channel *bss_chan;
3885 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3886 
3887 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3888 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3889 
3890 	wh = mtod(m, struct ieee80211_frame *);
3891 	ni = ieee80211_find_rxnode(ic, wh);
3892 	if (ni == ic->ic_bss) {
3893 		/*
3894 		 * We may switch ic_bss's channel during scans.
3895 		 * Record the current channel so we can restore it later.
3896 		 */
3897 		bss_chan = ni->ni_chan;
3898 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3899 	}
3900 	ni->ni_chan = &ic->ic_channels[chanidx];
3901 
3902 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
3903 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
3904 		ifp->if_ierrors++;
3905 		m_freem(m);
3906 		ieee80211_release_node(ic, ni);
3907 		return;
3908 	}
3909 
3910 #if NBPFILTER > 0
3911 	if (sc->sc_drvbpf != NULL) {
3912 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
3913 		uint16_t chan_flags;
3914 
3915 		tap->wr_flags = 0;
3916 		if (is_shortpre)
3917 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3918 		tap->wr_chan_freq =
3919 		    htole16(ic->ic_channels[chanidx].ic_freq);
3920 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3921 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3922 			chan_flags &= ~IEEE80211_CHAN_HT;
3923 		tap->wr_chan_flags = htole16(chan_flags);
3924 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3925 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3926 		tap->wr_tsft = device_timestamp;
3927 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
3928 			uint8_t mcs = (rate_n_flags &
3929 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
3930 			    IWX_RATE_HT_MCS_NSS_MSK));
3931 			tap->wr_rate = (0x80 | mcs);
3932 		} else {
3933 			uint8_t rate = (rate_n_flags &
3934 			    IWX_RATE_LEGACY_RATE_MSK);
3935 			switch (rate) {
3936 			/* CCK rates. */
3937 			case  10: tap->wr_rate =   2; break;
3938 			case  20: tap->wr_rate =   4; break;
3939 			case  55: tap->wr_rate =  11; break;
3940 			case 110: tap->wr_rate =  22; break;
3941 			/* OFDM rates. */
3942 			case 0xd: tap->wr_rate =  12; break;
3943 			case 0xf: tap->wr_rate =  18; break;
3944 			case 0x5: tap->wr_rate =  24; break;
3945 			case 0x7: tap->wr_rate =  36; break;
3946 			case 0x9: tap->wr_rate =  48; break;
3947 			case 0xb: tap->wr_rate =  72; break;
3948 			case 0x1: tap->wr_rate =  96; break;
3949 			case 0x3: tap->wr_rate = 108; break;
3950 			/* Unknown rate: should not happen. */
3951 			default:  tap->wr_rate =   0;
3952 			}
3953 		}
3954 
3955 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
3956 		    m, BPF_DIRECTION_IN);
3957 	}
3958 #endif
3959 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
3960 	/*
3961 	 * ieee80211_inputm() might have changed our BSS.
3962 	 * Restore ic_bss's channel if we are still in the same BSS.
3963 	 */
3964 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3965 		ni->ni_chan = bss_chan;
3966 	ieee80211_release_node(ic, ni);
3967 }
3968 
3969 /*
3970  * Drop duplicate 802.11 retransmissions
3971  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
3972  * and handle pseudo-duplicate frames which result from deaggregation
3973  * of A-MSDU frames in hardware.
3974  */
3975 int
3976 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
3977     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
3978 {
3979 	struct ieee80211com *ic = &sc->sc_ic;
3980 	struct iwx_node *in = (void *)ic->ic_bss;
3981 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
3982 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
3983 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3984 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3985 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3986 	int hasqos = ieee80211_has_qos(wh);
3987 	uint16_t seq;
3988 
3989 	if (type == IEEE80211_FC0_TYPE_CTL ||
3990 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
3991 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
3992 		return 0;
3993 
3994 	if (hasqos) {
3995 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
3996 		if (tid > IWX_MAX_TID_COUNT)
3997 			tid = IWX_MAX_TID_COUNT;
3998 	}
3999 
4000 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4001 	subframe_idx = desc->amsdu_info &
4002 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4003 
4004 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4005 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4006 	    dup_data->last_seq[tid] == seq &&
4007 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4008 		return 1;
4009 
4010 	/*
4011 	 * Allow the same frame sequence number for all A-MSDU subframes
4012 	 * following the first subframe.
4013 	 * Otherwise these subframes would be discarded as replays.
4014 	 */
4015 	if (dup_data->last_seq[tid] == seq &&
4016 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4017 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4018 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4019 	}
4020 
4021 	dup_data->last_seq[tid] = seq;
4022 	dup_data->last_sub_frame[tid] = subframe_idx;
4023 
4024 	return 0;
4025 }
4026 
4027 /*
4028  * Returns true if sn2 - buffer_size < sn1 < sn2.
4029  * To be used only in order to compare reorder buffer head with NSSN.
4030  * We fully trust NSSN unless it is behind us due to reorder timeout.
4031  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4032  */
4033 int
4034 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4035 {
4036 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4037 }
4038 
4039 void
4040 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4041     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4042     uint16_t nssn, struct mbuf_list *ml)
4043 {
4044 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4045 	uint16_t ssn = reorder_buf->head_sn;
4046 
4047 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4048 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4049 		goto set_timer;
4050 
4051 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4052 		int index = ssn % reorder_buf->buf_size;
4053 		struct mbuf *m;
4054 		int chanidx, is_shortpre;
4055 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4056 		struct ieee80211_rxinfo *rxi;
4057 
4058 		/* This data is the same for all A-MSDU subframes. */
4059 		chanidx = entries[index].chanidx;
4060 		rx_pkt_status = entries[index].rx_pkt_status;
4061 		is_shortpre = entries[index].is_shortpre;
4062 		rate_n_flags = entries[index].rate_n_flags;
4063 		device_timestamp = entries[index].device_timestamp;
4064 		rxi = &entries[index].rxi;
4065 
4066 		/*
4067 		 * Empty the list. Will have more than one frame for A-MSDU.
4068 		 * Empty list is valid as well since nssn indicates frames were
4069 		 * received.
4070 		 */
4071 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4072 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4073 			    rate_n_flags, device_timestamp, rxi, ml);
4074 			reorder_buf->num_stored--;
4075 
4076 			/*
4077 			 * Allow the same frame sequence number and CCMP PN for
4078 			 * all A-MSDU subframes following the first subframe.
4079 			 * Otherwise they would be discarded as replays.
4080 			 */
4081 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4082 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4083 		}
4084 
4085 		ssn = (ssn + 1) & 0xfff;
4086 	}
4087 	reorder_buf->head_sn = nssn;
4088 
4089 set_timer:
4090 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4091 		timeout_add_usec(&reorder_buf->reorder_timer,
4092 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4093 	} else
4094 		timeout_del(&reorder_buf->reorder_timer);
4095 }
4096 
4097 int
4098 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4099     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4100 {
4101 	struct ieee80211com *ic = &sc->sc_ic;
4102 
4103 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4104 		/* we have a new (A-)MPDU ... */
4105 
4106 		/*
4107 		 * reset counter to 0 if we didn't have any oldsn in
4108 		 * the last A-MPDU (as detected by GP2 being identical)
4109 		 */
4110 		if (!buffer->consec_oldsn_prev_drop)
4111 			buffer->consec_oldsn_drops = 0;
4112 
4113 		/* either way, update our tracking state */
4114 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4115 	} else if (buffer->consec_oldsn_prev_drop) {
4116 		/*
4117 		 * tracking state didn't change, and we had an old SN
4118 		 * indication before - do nothing in this case, we
4119 		 * already noted this one down and are waiting for the
4120 		 * next A-MPDU (by GP2)
4121 		 */
4122 		return 0;
4123 	}
4124 
4125 	/* return unless this MPDU has old SN */
4126 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4127 		return 0;
4128 
4129 	/* update state */
4130 	buffer->consec_oldsn_prev_drop = 1;
4131 	buffer->consec_oldsn_drops++;
4132 
4133 	/* if limit is reached, send del BA and reset state */
4134 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4135 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4136 		    0, tid);
4137 		buffer->consec_oldsn_prev_drop = 0;
4138 		buffer->consec_oldsn_drops = 0;
4139 		return 1;
4140 	}
4141 
4142 	return 0;
4143 }
4144 
4145 /*
4146  * Handle re-ordering of frames which were de-aggregated in hardware.
4147  * Returns 1 if the MPDU was consumed (buffered or dropped).
4148  * Returns 0 if the MPDU should be passed to upper layer.
4149  */
4150 int
4151 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4152     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4153     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4154     struct mbuf_list *ml)
4155 {
4156 	struct ieee80211com *ic = &sc->sc_ic;
4157 	struct ieee80211_frame *wh;
4158 	struct ieee80211_node *ni;
4159 	struct iwx_rxba_data *rxba;
4160 	struct iwx_reorder_buffer *buffer;
4161 	uint32_t reorder_data = le32toh(desc->reorder_data);
4162 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4163 	int last_subframe =
4164 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4165 	uint8_t tid;
4166 	uint8_t subframe_idx = (desc->amsdu_info &
4167 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4168 	struct iwx_reorder_buf_entry *entries;
4169 	int index;
4170 	uint16_t nssn, sn;
4171 	uint8_t baid, type, subtype;
4172 	int hasqos;
4173 
4174 	wh = mtod(m, struct ieee80211_frame *);
4175 	hasqos = ieee80211_has_qos(wh);
4176 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4177 
4178 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4179 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4180 
4181 	/*
4182 	 * We are only interested in Block Ack requests and unicast QoS data.
4183 	 */
4184 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4185 		return 0;
4186 	if (hasqos) {
4187 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4188 			return 0;
4189 	} else {
4190 		if (type != IEEE80211_FC0_TYPE_CTL ||
4191 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4192 			return 0;
4193 	}
4194 
4195 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4196 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4197 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4198 	    baid >= nitems(sc->sc_rxba_data))
4199 		return 0;
4200 
4201 	rxba = &sc->sc_rxba_data[baid];
4202 	if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4203 		return 0;
4204 
4205 	/* Bypass A-MPDU re-ordering in net80211. */
4206 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4207 
4208 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
4209 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
4210 		IWX_RX_MPDU_REORDER_SN_SHIFT;
4211 
4212 	buffer = &rxba->reorder_buf;
4213 	entries = &rxba->entries[0];
4214 
4215 	if (!buffer->valid) {
4216 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
4217 			return 0;
4218 		buffer->valid = 1;
4219 	}
4220 
4221 	ni = ieee80211_find_rxnode(ic, wh);
4222 	if (type == IEEE80211_FC0_TYPE_CTL &&
4223 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
4224 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4225 		goto drop;
4226 	}
4227 
4228 	/*
4229 	 * If there was a significant jump in the nssn - adjust.
4230 	 * If the SN is smaller than the NSSN it might need to first go into
4231 	 * the reorder buffer, in which case we just release up to it and the
4232 	 * rest of the function will take care of storing it and releasing up to
4233 	 * the nssn.
4234 	 */
4235 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4236 	    buffer->buf_size) ||
4237 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
4238 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
4239 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4240 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4241 	}
4242 
4243 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4244 	    device_timestamp)) {
4245 		 /* BA session will be torn down. */
4246 		ic->ic_stats.is_ht_rx_ba_window_jump++;
4247 		goto drop;
4248 
4249 	}
4250 
4251 	/* drop any outdated packets */
4252 	if (SEQ_LT(sn, buffer->head_sn)) {
4253 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4254 		goto drop;
4255 	}
4256 
4257 	/* release immediately if allowed by nssn and no stored frames */
4258 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
4259 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4260 		   (!is_amsdu || last_subframe))
4261 			buffer->head_sn = nssn;
4262 		ieee80211_release_node(ic, ni);
4263 		return 0;
4264 	}
4265 
4266 	/*
4267 	 * release immediately if there are no stored frames, and the sn is
4268 	 * equal to the head.
4269 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
4270 	 * When we released everything, and we got the next frame in the
4271 	 * sequence, according to the NSSN we can't release immediately,
4272 	 * while technically there is no hole and we can move forward.
4273 	 */
4274 	if (!buffer->num_stored && sn == buffer->head_sn) {
4275 		if (!is_amsdu || last_subframe)
4276 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4277 		ieee80211_release_node(ic, ni);
4278 		return 0;
4279 	}
4280 
4281 	index = sn % buffer->buf_size;
4282 
4283 	/*
4284 	 * Check if we already stored this frame
4285 	 * As AMSDU is either received or not as whole, logic is simple:
4286 	 * If we have frames in that position in the buffer and the last frame
4287 	 * originated from AMSDU had a different SN then it is a retransmission.
4288 	 * If it is the same SN then if the subframe index is incrementing it
4289 	 * is the same AMSDU - otherwise it is a retransmission.
4290 	 */
4291 	if (!ml_empty(&entries[index].frames)) {
4292 		if (!is_amsdu) {
4293 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4294 			goto drop;
4295 		} else if (sn != buffer->last_amsdu ||
4296 		    buffer->last_sub_index >= subframe_idx) {
4297 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4298 			goto drop;
4299 		}
4300 	} else {
4301 		/* This data is the same for all A-MSDU subframes. */
4302 		entries[index].chanidx = chanidx;
4303 		entries[index].is_shortpre = is_shortpre;
4304 		entries[index].rate_n_flags = rate_n_flags;
4305 		entries[index].device_timestamp = device_timestamp;
4306 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
4307 	}
4308 
4309 	/* put in reorder buffer */
4310 	ml_enqueue(&entries[index].frames, m);
4311 	buffer->num_stored++;
4312 	getmicrouptime(&entries[index].reorder_time);
4313 
4314 	if (is_amsdu) {
4315 		buffer->last_amsdu = sn;
4316 		buffer->last_sub_index = subframe_idx;
4317 	}
4318 
4319 	/*
4320 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4321 	 * The reason is that NSSN advances on the first sub-frame, and may
4322 	 * cause the reorder buffer to advance before all the sub-frames arrive.
4323 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4324 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4325 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4326 	 * already ahead and it will be dropped.
4327 	 * If the last sub-frame is not on this queue - we will get frame
4328 	 * release notification with up to date NSSN.
4329 	 */
4330 	if (!is_amsdu || last_subframe)
4331 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4332 
4333 	ieee80211_release_node(ic, ni);
4334 	return 1;
4335 
4336 drop:
4337 	m_freem(m);
4338 	ieee80211_release_node(ic, ni);
4339 	return 1;
4340 }
4341 
4342 void
4343 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4344     size_t maxlen, struct mbuf_list *ml)
4345 {
4346 	struct ieee80211com *ic = &sc->sc_ic;
4347 	struct ieee80211_rxinfo rxi;
4348 	struct iwx_rx_mpdu_desc *desc;
4349 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4350 	int rssi;
4351 	uint8_t chanidx;
4352 	uint16_t phy_info;
4353 
4354 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4355 
4356 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4357 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4358 		m_freem(m);
4359 		return; /* drop */
4360 	}
4361 
4362 	len = le16toh(desc->mpdu_len);
4363 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4364 		/* Allow control frames in monitor mode. */
4365 		if (len < sizeof(struct ieee80211_frame_cts)) {
4366 			ic->ic_stats.is_rx_tooshort++;
4367 			IC2IFP(ic)->if_ierrors++;
4368 			m_freem(m);
4369 			return;
4370 		}
4371 	} else if (len < sizeof(struct ieee80211_frame)) {
4372 		ic->ic_stats.is_rx_tooshort++;
4373 		IC2IFP(ic)->if_ierrors++;
4374 		m_freem(m);
4375 		return;
4376 	}
4377 	if (len > maxlen - sizeof(*desc)) {
4378 		IC2IFP(ic)->if_ierrors++;
4379 		m_freem(m);
4380 		return;
4381 	}
4382 
4383 	m->m_data = pktdata + sizeof(*desc);
4384 	m->m_pkthdr.len = m->m_len = len;
4385 
4386 	/* Account for padding following the frame header. */
4387 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4388 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4389 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4390 		if (type == IEEE80211_FC0_TYPE_CTL) {
4391 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4392 			case IEEE80211_FC0_SUBTYPE_CTS:
4393 				hdrlen = sizeof(struct ieee80211_frame_cts);
4394 				break;
4395 			case IEEE80211_FC0_SUBTYPE_ACK:
4396 				hdrlen = sizeof(struct ieee80211_frame_ack);
4397 				break;
4398 			default:
4399 				hdrlen = sizeof(struct ieee80211_frame_min);
4400 				break;
4401 			}
4402 		} else
4403 			hdrlen = ieee80211_get_hdrlen(wh);
4404 
4405 		if ((le16toh(desc->status) &
4406 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4407 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4408 			/* Padding is inserted after the IV. */
4409 			hdrlen += IEEE80211_CCMP_HDRLEN;
4410 		}
4411 
4412 		memmove(m->m_data + 2, m->m_data, hdrlen);
4413 		m_adj(m, 2);
4414 	}
4415 
4416 	memset(&rxi, 0, sizeof(rxi));
4417 
4418 	/*
4419 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4420 	 * in place for each subframe. But it leaves the 'A-MSDU present'
4421 	 * bit set in the frame header. We need to clear this bit ourselves.
4422 	 * (XXX This workaround is not required on AX200/AX201 devices that
4423 	 * have been tested by me, but it's unclear when this problem was
4424 	 * fixed in the hardware. It definitely affects the 9k generation.
4425 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
4426 	 * to exist that we may eventually add support for.)
4427 	 *
4428 	 * And we must allow the same CCMP PN for subframes following the
4429 	 * first subframe. Otherwise they would be discarded as replays.
4430 	 */
4431 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4432 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4433 		uint8_t subframe_idx = (desc->amsdu_info &
4434 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4435 		if (subframe_idx > 0)
4436 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4437 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4438 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4439 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4440 			    struct ieee80211_qosframe_addr4 *);
4441 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4442 		} else if (ieee80211_has_qos(wh) &&
4443 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4444 			struct ieee80211_qosframe *qwh = mtod(m,
4445 			    struct ieee80211_qosframe *);
4446 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4447 		}
4448 	}
4449 
4450 	/*
4451 	 * Verify decryption before duplicate detection. The latter uses
4452 	 * the TID supplied in QoS frame headers and this TID is implicitly
4453 	 * verified as part of the CCMP nonce.
4454 	 */
4455 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
4456 		m_freem(m);
4457 		return;
4458 	}
4459 
4460 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
4461 		m_freem(m);
4462 		return;
4463 	}
4464 
4465 	phy_info = le16toh(desc->phy_info);
4466 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4467 	chanidx = desc->v1.channel;
4468 	device_timestamp = desc->v1.gp2_on_air_rise;
4469 
4470 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
4471 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
4472 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4473 
4474 	rxi.rxi_rssi = rssi;
4475 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4476 
4477 	if (iwx_rx_reorder(sc, m, chanidx, desc,
4478 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4479 	    rate_n_flags, device_timestamp, &rxi, ml))
4480 		return;
4481 
4482 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4483 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4484 	    rate_n_flags, device_timestamp, &rxi, ml);
4485 }
4486 
4487 void
4488 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4489 {
4490 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
4491 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4492 	int i;
4493 
4494 	/* First TB is never cleared - it is bidirectional DMA data. */
4495 	for (i = 1; i < num_tbs; i++) {
4496 		struct iwx_tfh_tb *tb = &desc->tbs[i];
4497 		memset(tb, 0, sizeof(*tb));
4498 	}
4499 	desc->num_tbs = 0;
4500 
4501 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4502 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4503 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
4504 }
4505 
4506 void
4507 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
4508 {
4509 	struct ieee80211com *ic = &sc->sc_ic;
4510 
4511 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4512 	    BUS_DMASYNC_POSTWRITE);
4513 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4514 	m_freem(txd->m);
4515 	txd->m = NULL;
4516 
4517 	KASSERT(txd->in);
4518 	ieee80211_release_node(ic, &txd->in->in_ni);
4519 	txd->in = NULL;
4520 }
4521 
4522 void
4523 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4524 {
4525  	struct iwx_tx_data *txd;
4526 
4527 	while (ring->tail != idx) {
4528 		txd = &ring->data[ring->tail];
4529 		if (txd->m != NULL) {
4530 			iwx_clear_tx_desc(sc, ring, ring->tail);
4531 			iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
4532 			iwx_txd_done(sc, txd);
4533 			ring->queued--;
4534 		}
4535 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4536 	}
4537 }
4538 
4539 void
4540 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4541     struct iwx_rx_data *data)
4542 {
4543 	struct ieee80211com *ic = &sc->sc_ic;
4544 	struct ifnet *ifp = IC2IFP(ic);
4545 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4546 	int qid = cmd_hdr->qid, status, txfail;
4547 	struct iwx_tx_ring *ring = &sc->txq[qid];
4548 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4549 	uint32_t ssn;
4550 	uint32_t len = iwx_rx_packet_len(pkt);
4551 
4552 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
4553 	    BUS_DMASYNC_POSTREAD);
4554 
4555 	sc->sc_tx_timer = 0;
4556 
4557 	/* Sanity checks. */
4558 	if (sizeof(*tx_resp) > len)
4559 		return;
4560 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4561 		return;
4562 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4563 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
4564 		return;
4565 
4566 	if (tx_resp->frame_count > 1) /* A-MPDU */
4567 		return;
4568 
4569 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4570 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
4571 	    status != IWX_TX_STATUS_DIRECT_DONE);
4572 
4573 	if (txfail)
4574 		ifp->if_oerrors++;
4575 
4576 	/*
4577 	 * On hardware supported by iwx(4) the SSN counter is only
4578 	 * 8 bit and corresponds to a Tx ring index rather than a
4579 	 * sequence number. Frames up to this index (non-inclusive)
4580 	 * can now be freed.
4581 	 */
4582 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4583 	ssn = le32toh(ssn) & 0xff;
4584 	iwx_txq_advance(sc, ring, ssn);
4585 	iwx_clear_oactive(sc, ring);
4586 }
4587 
4588 void
4589 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4590 {
4591 	struct ieee80211com *ic = &sc->sc_ic;
4592 	struct ifnet *ifp = IC2IFP(ic);
4593 
4594 	if (ring->queued < IWX_TX_RING_LOMARK) {
4595 		sc->qfullmsk &= ~(1 << ring->qid);
4596 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4597 			ifq_clr_oactive(&ifp->if_snd);
4598 			/*
4599 			 * Well, we're in interrupt context, but then again
4600 			 * I guess net80211 does all sorts of stunts in
4601 			 * interrupt context, so maybe this is no biggie.
4602 			 */
4603 			(*ifp->if_start)(ifp);
4604 		}
4605 	}
4606 }
4607 
4608 void
4609 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4610     struct iwx_rx_data *data)
4611 {
4612 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4613 	struct ieee80211com *ic = &sc->sc_ic;
4614 	struct ieee80211_node *ni;
4615 	struct ieee80211_tx_ba *ba;
4616 	struct iwx_node *in;
4617 	struct iwx_tx_ring *ring;
4618 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4619 	int qid;
4620 
4621 	if (ic->ic_state != IEEE80211_S_RUN)
4622 		return;
4623 
4624 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4625 		return;
4626 
4627 	if (ba_res->sta_id != IWX_STATION_ID)
4628 		return;
4629 
4630 	ni = ic->ic_bss;
4631 	in = (void *)ni;
4632 
4633 	tfd_cnt = le16toh(ba_res->tfd_cnt);
4634 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4635 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4636 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4637 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
4638 		return;
4639 
4640 	for (i = 0; i < tfd_cnt; i++) {
4641 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4642 		uint8_t tid;
4643 
4644 		tid = ba_tfd->tid;
4645 		if (tid >= nitems(sc->aggqid))
4646 			continue;
4647 
4648 		qid = sc->aggqid[tid];
4649 		if (qid != htole16(ba_tfd->q_num))
4650 			continue;
4651 
4652 		ring = &sc->txq[qid];
4653 
4654 		ba = &ni->ni_tx_ba[tid];
4655 		if (ba->ba_state != IEEE80211_BA_AGREED)
4656 			continue;
4657 
4658 		idx = le16toh(ba_tfd->tfd_index);
4659 		if (idx >= IWX_TX_RING_COUNT)
4660 			continue;
4661 		sc->sc_tx_timer = 0;
4662 		iwx_txq_advance(sc, ring, idx);
4663 		iwx_clear_oactive(sc, ring);
4664 	}
4665 }
4666 
4667 void
4668 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4669     struct iwx_rx_data *data)
4670 {
4671 	struct ieee80211com *ic = &sc->sc_ic;
4672 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4673 	uint32_t missed;
4674 
4675 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4676 	    (ic->ic_state != IEEE80211_S_RUN))
4677 		return;
4678 
4679 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4680 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4681 
4682 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4683 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4684 		if (ic->ic_if.if_flags & IFF_DEBUG)
4685 			printf("%s: receiving no beacons from %s; checking if "
4686 			    "this AP is still responding to probe requests\n",
4687 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4688 		/*
4689 		 * Rather than go directly to scan state, try to send a
4690 		 * directed probe request first. If that fails then the
4691 		 * state machine will drop us into scanning after timing
4692 		 * out waiting for a probe response.
4693 		 */
4694 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4695 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4696 	}
4697 
4698 }
4699 
4700 int
4701 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4702 {
4703 	struct iwx_binding_cmd cmd;
4704 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
4705 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4706 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4707 	uint32_t status;
4708 
4709 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
4710 		panic("binding already added");
4711 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4712 		panic("binding already removed");
4713 
4714 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
4715 		return EINVAL;
4716 
4717 	memset(&cmd, 0, sizeof(cmd));
4718 
4719 	cmd.id_and_color
4720 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4721 	cmd.action = htole32(action);
4722 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4723 
4724 	cmd.macs[0] = htole32(mac_id);
4725 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4726 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4727 
4728 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4729 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4730 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4731 	else
4732 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4733 
4734 	status = 0;
4735 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4736 	    &cmd, &status);
4737 	if (err == 0 && status != 0)
4738 		err = EIO;
4739 
4740 	return err;
4741 }
4742 
4743 int
4744 iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4745     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4746 {
4747 	struct ieee80211com *ic = &sc->sc_ic;
4748 	struct iwx_phy_context_cmd_uhb cmd;
4749 	uint8_t active_cnt, idle_cnt;
4750 	struct ieee80211_channel *chan = ctxt->channel;
4751 
4752 	memset(&cmd, 0, sizeof(cmd));
4753 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4754 	    ctxt->color));
4755 	cmd.action = htole32(action);
4756 
4757 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
4758 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4759 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4760 	else
4761 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4762 
4763 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4764 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4765 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
4766 	if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
4767 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
4768 			/* secondary chan above -> control chan below */
4769 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4770 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4771 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
4772 			/* secondary chan below -> control chan above */
4773 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4774 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4775 		} else {
4776 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4777 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4778 		}
4779 	} else {
4780 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4781 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4782 	}
4783 
4784 	idle_cnt = chains_static;
4785 	active_cnt = chains_dynamic;
4786 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4787 	    IWX_PHY_RX_CHAIN_VALID_POS);
4788 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4789 	cmd.rxchain_info |= htole32(active_cnt <<
4790 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4791 
4792 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4793 }
4794 
4795 int
4796 iwx_phy_ctxt_cmd_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4797     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4798 {
4799 	struct ieee80211com *ic = &sc->sc_ic;
4800 	struct iwx_phy_context_cmd cmd;
4801 	uint8_t active_cnt, idle_cnt;
4802 	struct ieee80211_channel *chan = ctxt->channel;
4803 
4804 	memset(&cmd, 0, sizeof(cmd));
4805 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4806 	    ctxt->color));
4807 	cmd.action = htole32(action);
4808 
4809 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
4810 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4811 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4812 	else
4813 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4814 
4815 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4816 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4817 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
4818 	if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
4819 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
4820 			/* secondary chan above -> control chan below */
4821 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4822 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4823 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
4824 			/* secondary chan below -> control chan above */
4825 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4826 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4827 		} else {
4828 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4829 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4830 		}
4831 	} else {
4832 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4833 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4834 	}
4835 
4836 	idle_cnt = chains_static;
4837 	active_cnt = chains_dynamic;
4838 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4839 	    IWX_PHY_RX_CHAIN_VALID_POS);
4840 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4841 	cmd.rxchain_info |= htole32(active_cnt <<
4842 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4843 
4844 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4845 }
4846 
4847 int
4848 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4849     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4850     uint32_t apply_time, uint8_t sco)
4851 {
4852 	int cmdver;
4853 
4854 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
4855 	if (cmdver != 3) {
4856 		printf("%s: firmware does not support phy-context-cmd v3\n",
4857 		    DEVNAME(sc));
4858 		return ENOTSUP;
4859 	}
4860 
4861 	/*
4862 	 * Intel increased the size of the fw_channel_info struct and neglected
4863 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
4864 	 * member in the middle.
4865 	 * To keep things simple we use a separate function to handle the larger
4866 	 * variant of the phy context command.
4867 	 */
4868 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
4869 		return iwx_phy_ctxt_cmd_uhb_v3(sc, ctxt, chains_static,
4870 		    chains_dynamic, action, sco);
4871 	}
4872 
4873 	return iwx_phy_ctxt_cmd_v3(sc, ctxt, chains_static, chains_dynamic,
4874 	    action, sco);
4875 }
4876 
4877 int
4878 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4879 {
4880 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
4881 	struct iwx_tfh_tfd *desc;
4882 	struct iwx_tx_data *txdata;
4883 	struct iwx_device_cmd *cmd;
4884 	struct mbuf *m;
4885 	bus_addr_t paddr;
4886 	uint64_t addr;
4887 	int err = 0, i, paylen, off, s;
4888 	int idx, code, async, group_id;
4889 	size_t hdrlen, datasz;
4890 	uint8_t *data;
4891 	int generation = sc->sc_generation;
4892 
4893 	code = hcmd->id;
4894 	async = hcmd->flags & IWX_CMD_ASYNC;
4895 	idx = ring->cur;
4896 
4897 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
4898 		paylen += hcmd->len[i];
4899 	}
4900 
4901 	/* If this command waits for a response, allocate response buffer. */
4902 	hcmd->resp_pkt = NULL;
4903 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
4904 		uint8_t *resp_buf;
4905 		KASSERT(!async);
4906 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
4907 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
4908 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
4909 			return ENOSPC;
4910 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
4911 		    M_NOWAIT | M_ZERO);
4912 		if (resp_buf == NULL)
4913 			return ENOMEM;
4914 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
4915 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4916 	} else {
4917 		sc->sc_cmd_resp_pkt[idx] = NULL;
4918 	}
4919 
4920 	s = splnet();
4921 
4922 	desc = &ring->desc[idx];
4923 	txdata = &ring->data[idx];
4924 
4925 	/*
4926 	 * XXX Intel inside (tm)
4927 	 * Firmware API versions >= 50 reject old-style commands in
4928 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
4929 	 * that such commands were in the LONG_GROUP instead in order
4930 	 * for firmware to accept them.
4931 	 */
4932 	if (iwx_cmd_groupid(code) == 0) {
4933 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
4934 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
4935 	} else
4936 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
4937 
4938 	group_id = iwx_cmd_groupid(code);
4939 
4940 	hdrlen = sizeof(cmd->hdr_wide);
4941 	datasz = sizeof(cmd->data_wide);
4942 
4943 	if (paylen > datasz) {
4944 		/* Command is too large to fit in pre-allocated space. */
4945 		size_t totlen = hdrlen + paylen;
4946 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
4947 			printf("%s: firmware command too long (%zd bytes)\n",
4948 			    DEVNAME(sc), totlen);
4949 			err = EINVAL;
4950 			goto out;
4951 		}
4952 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
4953 		if (m == NULL) {
4954 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
4955 			    DEVNAME(sc), totlen);
4956 			err = ENOMEM;
4957 			goto out;
4958 		}
4959 		cmd = mtod(m, struct iwx_device_cmd *);
4960 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4961 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4962 		if (err) {
4963 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
4964 			    DEVNAME(sc), totlen);
4965 			m_freem(m);
4966 			goto out;
4967 		}
4968 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
4969 		paddr = txdata->map->dm_segs[0].ds_addr;
4970 	} else {
4971 		cmd = &ring->cmd[idx];
4972 		paddr = txdata->cmd_paddr;
4973 	}
4974 
4975 	memset(cmd, 0, sizeof(*cmd));
4976 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
4977 	cmd->hdr_wide.group_id = group_id;
4978 	cmd->hdr_wide.qid = ring->qid;
4979 	cmd->hdr_wide.idx = idx;
4980 	cmd->hdr_wide.length = htole16(paylen);
4981 	cmd->hdr_wide.version = iwx_cmd_version(code);
4982 	data = cmd->data_wide;
4983 
4984 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
4985 		if (hcmd->len[i] == 0)
4986 			continue;
4987 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4988 		off += hcmd->len[i];
4989 	}
4990 	KASSERT(off == paylen);
4991 
4992 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
4993 	addr = htole64(paddr);
4994 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
4995 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
4996 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
4997 		    IWX_FIRST_TB_SIZE);
4998 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
4999 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5000 		desc->num_tbs = htole16(2);
5001 	} else
5002 		desc->num_tbs = htole16(1);
5003 
5004 	if (paylen > datasz) {
5005 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5006 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5007 	} else {
5008 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5009 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5010 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5011 	}
5012 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5013 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5014 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5015 	/* Kick command ring. */
5016 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5017 	ring->queued++;
5018 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5019 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
5020 
5021 	if (!async) {
5022 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5023 		if (err == 0) {
5024 			/* if hardware is no longer up, return error */
5025 			if (generation != sc->sc_generation) {
5026 				err = ENXIO;
5027 				goto out;
5028 			}
5029 
5030 			/* Response buffer will be freed in iwx_free_resp(). */
5031 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5032 			sc->sc_cmd_resp_pkt[idx] = NULL;
5033 		} else if (generation == sc->sc_generation) {
5034 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5035 			    sc->sc_cmd_resp_len[idx]);
5036 			sc->sc_cmd_resp_pkt[idx] = NULL;
5037 		}
5038 	}
5039  out:
5040 	splx(s);
5041 
5042 	return err;
5043 }
5044 
5045 int
5046 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5047     uint16_t len, const void *data)
5048 {
5049 	struct iwx_host_cmd cmd = {
5050 		.id = id,
5051 		.len = { len, },
5052 		.data = { data, },
5053 		.flags = flags,
5054 	};
5055 
5056 	return iwx_send_cmd(sc, &cmd);
5057 }
5058 
5059 int
5060 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5061     uint32_t *status)
5062 {
5063 	struct iwx_rx_packet *pkt;
5064 	struct iwx_cmd_response *resp;
5065 	int err, resp_len;
5066 
5067 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5068 	cmd->flags |= IWX_CMD_WANT_RESP;
5069 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5070 
5071 	err = iwx_send_cmd(sc, cmd);
5072 	if (err)
5073 		return err;
5074 
5075 	pkt = cmd->resp_pkt;
5076 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5077 		return EIO;
5078 
5079 	resp_len = iwx_rx_packet_payload_len(pkt);
5080 	if (resp_len != sizeof(*resp)) {
5081 		iwx_free_resp(sc, cmd);
5082 		return EIO;
5083 	}
5084 
5085 	resp = (void *)pkt->data;
5086 	*status = le32toh(resp->status);
5087 	iwx_free_resp(sc, cmd);
5088 	return err;
5089 }
5090 
5091 int
5092 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5093     const void *data, uint32_t *status)
5094 {
5095 	struct iwx_host_cmd cmd = {
5096 		.id = id,
5097 		.len = { len, },
5098 		.data = { data, },
5099 	};
5100 
5101 	return iwx_send_cmd_status(sc, &cmd, status);
5102 }
5103 
5104 void
5105 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5106 {
5107 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5108 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5109 	hcmd->resp_pkt = NULL;
5110 }
5111 
5112 void
5113 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5114 {
5115 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5116 	struct iwx_tx_data *data;
5117 
5118 	if (qid != IWX_DQA_CMD_QUEUE) {
5119 		return;	/* Not a command ack. */
5120 	}
5121 
5122 	data = &ring->data[idx];
5123 
5124 	if (data->m != NULL) {
5125 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5126 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5127 		bus_dmamap_unload(sc->sc_dmat, data->map);
5128 		m_freem(data->m);
5129 		data->m = NULL;
5130 	}
5131 	wakeup(&ring->desc[idx]);
5132 
5133 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5134 	if (ring->queued == 0) {
5135 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5136 			DEVNAME(sc), code));
5137 	} else if (ring->queued > 0)
5138 		ring->queued--;
5139 }
5140 
5141 /*
5142  * Fill in various bit for management frames, and leave them
5143  * unfilled for data frames (firmware takes care of that).
5144  * Return the selected TX rate.
5145  */
5146 const struct iwx_rate *
5147 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5148     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
5149 {
5150 	struct ieee80211com *ic = &sc->sc_ic;
5151 	struct ieee80211_node *ni = &in->in_ni;
5152 	struct ieee80211_rateset *rs = &ni->ni_rates;
5153 	const struct iwx_rate *rinfo;
5154 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5155 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
5156 	int ridx, rate_flags;
5157 	uint32_t flags = 0;
5158 
5159 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5160 	    type != IEEE80211_FC0_TYPE_DATA) {
5161 		/* for non-data, use the lowest supported rate */
5162 		ridx = min_ridx;
5163 		flags |= IWX_TX_FLAGS_CMD_RATE;
5164 	} else if (ic->ic_fixed_mcs != -1) {
5165 		ridx = sc->sc_fixed_ridx;
5166 		flags |= IWX_TX_FLAGS_CMD_RATE;
5167 	} else if (ic->ic_fixed_rate != -1) {
5168 		ridx = sc->sc_fixed_ridx;
5169 		flags |= IWX_TX_FLAGS_CMD_RATE;
5170 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5171 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
5172 	} else {
5173 		uint8_t rval;
5174 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
5175 		ridx = iwx_rval2ridx(rval);
5176 		if (ridx < min_ridx)
5177 			ridx = min_ridx;
5178 	}
5179 
5180 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
5181 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
5182 		flags |= IWX_TX_FLAGS_HIGH_PRI;
5183 	tx->flags = htole32(flags);
5184 
5185 	rinfo = &iwx_rates[ridx];
5186 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
5187 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
5188 	else
5189 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5190 	if (IWX_RIDX_IS_CCK(ridx))
5191 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
5192 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5193  	    type == IEEE80211_FC0_TYPE_DATA &&
5194 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5195 		uint8_t sco;
5196 		if (ieee80211_node_supports_ht_chan40(ni))
5197 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
5198 		else
5199 			sco = IEEE80211_HTOP0_SCO_SCN;
5200 		rate_flags |= IWX_RATE_MCS_HT_MSK;
5201 		if ((sco == IEEE80211_HTOP0_SCO_SCA ||
5202 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
5203 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
5204 			rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_40;
5205 			if (ieee80211_node_supports_ht_sgi40(ni))
5206 				rate_flags |= IWX_RATE_MCS_SGI_MSK;
5207 		} else if (ieee80211_node_supports_ht_sgi20(ni))
5208 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
5209 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
5210 	} else
5211 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
5212 
5213 	return rinfo;
5214 }
5215 
5216 void
5217 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
5218     uint16_t num_tbs)
5219 {
5220 	uint8_t filled_tfd_size, num_fetch_chunks;
5221 	uint16_t len = byte_cnt;
5222 	uint16_t bc_ent;
5223 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5224 
5225 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5226 			  num_tbs * sizeof(struct iwx_tfh_tb);
5227 	/*
5228 	 * filled_tfd_size contains the number of filled bytes in the TFD.
5229 	 * Dividing it by 64 will give the number of chunks to fetch
5230 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5231 	 * If, for example, TFD contains only 3 TBs then 32 bytes
5232 	 * of the TFD are used, and only one chunk of 64 bytes should
5233 	 * be fetched
5234 	 */
5235 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5236 
5237 	/* Before AX210, the HW expects DW */
5238 	len = howmany(len, 4);
5239 	bc_ent = htole16(len | (num_fetch_chunks << 12));
5240 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
5241 }
5242 
5243 int
5244 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5245 {
5246 	struct ieee80211com *ic = &sc->sc_ic;
5247 	struct iwx_node *in = (void *)ni;
5248 	struct iwx_tx_ring *ring;
5249 	struct iwx_tx_data *data;
5250 	struct iwx_tfh_tfd *desc;
5251 	struct iwx_device_cmd *cmd;
5252 	struct iwx_tx_cmd_gen2 *tx;
5253 	struct ieee80211_frame *wh;
5254 	struct ieee80211_key *k = NULL;
5255 	const struct iwx_rate *rinfo;
5256 	uint64_t paddr;
5257 	u_int hdrlen;
5258 	bus_dma_segment_t *seg;
5259 	uint16_t num_tbs;
5260 	uint8_t type, subtype;
5261 	int i, totlen, err, pad, qid;
5262 
5263 	wh = mtod(m, struct ieee80211_frame *);
5264 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5265 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5266 	if (type == IEEE80211_FC0_TYPE_CTL)
5267 		hdrlen = sizeof(struct ieee80211_frame_min);
5268 	else
5269 		hdrlen = ieee80211_get_hdrlen(wh);
5270 
5271 	qid = sc->first_data_qid;
5272 
5273 	/* Put QoS frames on the data queue which maps to their TID. */
5274 	if (ieee80211_has_qos(wh)) {
5275 		struct ieee80211_tx_ba *ba;
5276 		uint16_t qos = ieee80211_get_qos(wh);
5277 		uint8_t tid = qos & IEEE80211_QOS_TID;
5278 
5279 		ba = &ni->ni_tx_ba[tid];
5280 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5281 		    type == IEEE80211_FC0_TYPE_DATA &&
5282 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5283 		    sc->aggqid[tid] != 0 &&
5284 		    ba->ba_state == IEEE80211_BA_AGREED) {
5285 			qid = sc->aggqid[tid];
5286 		}
5287 	}
5288 
5289 	ring = &sc->txq[qid];
5290 	desc = &ring->desc[ring->cur];
5291 	memset(desc, 0, sizeof(*desc));
5292 	data = &ring->data[ring->cur];
5293 
5294 	cmd = &ring->cmd[ring->cur];
5295 	cmd->hdr.code = IWX_TX_CMD;
5296 	cmd->hdr.flags = 0;
5297 	cmd->hdr.qid = ring->qid;
5298 	cmd->hdr.idx = ring->cur;
5299 
5300 	tx = (void *)cmd->data;
5301 	memset(tx, 0, sizeof(*tx));
5302 
5303 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
5304 
5305 #if NBPFILTER > 0
5306 	if (sc->sc_drvbpf != NULL) {
5307 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5308 		uint16_t chan_flags;
5309 
5310 		tap->wt_flags = 0;
5311 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5312 		chan_flags = ni->ni_chan->ic_flags;
5313 		if (ic->ic_curmode != IEEE80211_MODE_11N)
5314 			chan_flags &= ~IEEE80211_CHAN_HT;
5315 		tap->wt_chan_flags = htole16(chan_flags);
5316 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5317 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5318 		    type == IEEE80211_FC0_TYPE_DATA &&
5319 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5320 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
5321 		} else
5322 			tap->wt_rate = rinfo->rate;
5323 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
5324 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
5325 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5326 
5327 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
5328 		    m, BPF_DIRECTION_OUT);
5329 	}
5330 #endif
5331 
5332 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5333                 k = ieee80211_get_txkey(ic, wh, ni);
5334 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5335 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
5336 				return ENOBUFS;
5337 			/* 802.11 header may have moved. */
5338 			wh = mtod(m, struct ieee80211_frame *);
5339 			tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
5340 		} else {
5341 			k->k_tsc++;
5342 			/* Hardware increments PN internally and adds IV. */
5343 		}
5344 	} else
5345 		tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
5346 
5347 	totlen = m->m_pkthdr.len;
5348 
5349 	if (hdrlen & 3) {
5350 		/* First segment length must be a multiple of 4. */
5351 		pad = 4 - (hdrlen & 3);
5352 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
5353 	} else
5354 		pad = 0;
5355 
5356 	tx->len = htole16(totlen);
5357 
5358 	/* Copy 802.11 header in TX command. */
5359 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
5360 
5361 	/* Trim 802.11 header. */
5362 	m_adj(m, hdrlen);
5363 
5364 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5365 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5366 	if (err && err != EFBIG) {
5367 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5368 		m_freem(m);
5369 		return err;
5370 	}
5371 	if (err) {
5372 		/* Too many DMA segments, linearize mbuf. */
5373 		if (m_defrag(m, M_DONTWAIT)) {
5374 			m_freem(m);
5375 			return ENOBUFS;
5376 		}
5377 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5378 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5379 		if (err) {
5380 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
5381 			    err);
5382 			m_freem(m);
5383 			return err;
5384 		}
5385 	}
5386 	data->m = m;
5387 	data->in = in;
5388 
5389 	/* Fill TX descriptor. */
5390 	num_tbs = 2 + data->map->dm_nsegs;
5391 	desc->num_tbs = htole16(num_tbs);
5392 
5393 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5394 	paddr = htole64(data->cmd_paddr);
5395 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5396 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5397 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5398 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5399 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
5400 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5401 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5402 
5403 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5404 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5405 
5406 	/* Other DMA segments are for data payload. */
5407 	seg = data->map->dm_segs;
5408 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5409 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5410 		paddr = htole64(seg->ds_addr);
5411 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5412 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5413 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5414 	}
5415 
5416 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5417 	    BUS_DMASYNC_PREWRITE);
5418 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5419 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5420 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5421 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5422 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5423 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5424 
5425 	iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
5426 
5427 	/* Kick TX ring. */
5428 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5429 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
5430 
5431 	/* Mark TX ring as full if we reach a certain threshold. */
5432 	if (++ring->queued > IWX_TX_RING_HIMARK) {
5433 		sc->qfullmsk |= 1 << ring->qid;
5434 	}
5435 
5436 	return 0;
5437 }
5438 
5439 int
5440 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5441 {
5442 	struct iwx_rx_packet *pkt;
5443 	struct iwx_tx_path_flush_cmd_rsp *resp;
5444 	struct iwx_tx_path_flush_cmd flush_cmd = {
5445 		.sta_id = htole32(sta_id),
5446 		.tid_mask = htole16(tids),
5447 	};
5448 	struct iwx_host_cmd hcmd = {
5449 		.id = IWX_TXPATH_FLUSH,
5450 		.len = { sizeof(flush_cmd), },
5451 		.data = { &flush_cmd, },
5452 		.flags = IWX_CMD_WANT_RESP,
5453 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5454 	};
5455 	int err, resp_len, i, num_flushed_queues;
5456 
5457 	err = iwx_send_cmd(sc, &hcmd);
5458 	if (err)
5459 		return err;
5460 
5461 	pkt = hcmd.resp_pkt;
5462 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5463 		err = EIO;
5464 		goto out;
5465 	}
5466 
5467 	resp_len = iwx_rx_packet_payload_len(pkt);
5468 	/* Some firmware versions don't provide a response. */
5469 	if (resp_len == 0)
5470 		goto out;
5471 	else if (resp_len != sizeof(*resp)) {
5472 		err = EIO;
5473 		goto out;
5474 	}
5475 
5476 	resp = (void *)pkt->data;
5477 
5478 	if (le16toh(resp->sta_id) != sta_id) {
5479 		err = EIO;
5480 		goto out;
5481 	}
5482 
5483 	num_flushed_queues = le16toh(resp->num_flushed_queues);
5484 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5485 		err = EIO;
5486 		goto out;
5487 	}
5488 
5489 	for (i = 0; i < num_flushed_queues; i++) {
5490 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5491 		uint16_t tid = le16toh(queue_info->tid);
5492 		uint16_t read_after = le16toh(queue_info->read_after_flush);
5493 		uint16_t qid = le16toh(queue_info->queue_num);
5494 		struct iwx_tx_ring *txq;
5495 
5496 		if (qid >= nitems(sc->txq))
5497 			continue;
5498 
5499 		txq = &sc->txq[qid];
5500 		if (tid != txq->tid)
5501 			continue;
5502 
5503 		iwx_txq_advance(sc, txq, read_after);
5504 	}
5505 out:
5506 	iwx_free_resp(sc, &hcmd);
5507 	return err;
5508 }
5509 
5510 #define IWX_FLUSH_WAIT_MS	2000
5511 
5512 int
5513 iwx_wait_tx_queues_empty(struct iwx_softc *sc)
5514 {
5515 	int i, err;
5516 
5517 	for (i = 0; i < nitems(sc->txq); i++) {
5518 		struct iwx_tx_ring *ring = &sc->txq[i];
5519 
5520 		if (i == IWX_DQA_CMD_QUEUE)
5521 			continue;
5522 
5523 		while (ring->queued > 0) {
5524 			err = tsleep_nsec(ring, 0, "iwxflush",
5525 			    MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS));
5526 			if (err)
5527 				return err;
5528 		}
5529 	}
5530 
5531 	return 0;
5532 }
5533 
5534 int
5535 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5536 {
5537 	struct iwx_add_sta_cmd cmd;
5538 	int err;
5539 	uint32_t status;
5540 
5541 	memset(&cmd, 0, sizeof(cmd));
5542 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5543 	    in->in_color));
5544 	cmd.sta_id = IWX_STATION_ID;
5545 	cmd.add_modify = IWX_STA_MODE_MODIFY;
5546 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5547 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5548 
5549 	status = IWX_ADD_STA_SUCCESS;
5550 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5551 	    sizeof(cmd), &cmd, &status);
5552 	if (err) {
5553 		printf("%s: could not update sta (error %d)\n",
5554 		    DEVNAME(sc), err);
5555 		return err;
5556 	}
5557 
5558 	switch (status & IWX_ADD_STA_STATUS_MASK) {
5559 	case IWX_ADD_STA_SUCCESS:
5560 		break;
5561 	default:
5562 		err = EIO;
5563 		printf("%s: Couldn't %s draining for station\n",
5564 		    DEVNAME(sc), drain ? "enable" : "disable");
5565 		break;
5566 	}
5567 
5568 	return err;
5569 }
5570 
5571 int
5572 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5573 {
5574 	int err;
5575 
5576 	splassert(IPL_NET);
5577 
5578 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
5579 
5580 	err = iwx_drain_sta(sc, in, 1);
5581 	if (err)
5582 		goto done;
5583 
5584 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5585 	if (err) {
5586 		printf("%s: could not flush Tx path (error %d)\n",
5587 		    DEVNAME(sc), err);
5588 		goto done;
5589 	}
5590 
5591 	err = iwx_wait_tx_queues_empty(sc);
5592 	if (err) {
5593 		printf("%s: Could not empty Tx queues (error %d)\n",
5594 		    DEVNAME(sc), err);
5595 		goto done;
5596 	}
5597 
5598 	err = iwx_drain_sta(sc, in, 0);
5599 done:
5600 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5601 	return err;
5602 }
5603 
5604 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
5605 
5606 int
5607 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5608     struct iwx_beacon_filter_cmd *cmd)
5609 {
5610 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5611 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5612 }
5613 
5614 int
5615 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5616 {
5617 	struct iwx_beacon_filter_cmd cmd = {
5618 		IWX_BF_CMD_CONFIG_DEFAULTS,
5619 		.bf_enable_beacon_filter = htole32(1),
5620 		.ba_enable_beacon_abort = htole32(enable),
5621 	};
5622 
5623 	if (!sc->sc_bf.bf_enabled)
5624 		return 0;
5625 
5626 	sc->sc_bf.ba_enabled = enable;
5627 	return iwx_beacon_filter_send_cmd(sc, &cmd);
5628 }
5629 
5630 void
5631 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5632     struct iwx_mac_power_cmd *cmd)
5633 {
5634 	struct ieee80211com *ic = &sc->sc_ic;
5635 	struct ieee80211_node *ni = &in->in_ni;
5636 	int dtim_period, dtim_msec, keep_alive;
5637 
5638 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5639 	    in->in_color));
5640 	if (ni->ni_dtimperiod)
5641 		dtim_period = ni->ni_dtimperiod;
5642 	else
5643 		dtim_period = 1;
5644 
5645 	/*
5646 	 * Regardless of power management state the driver must set
5647 	 * keep alive period. FW will use it for sending keep alive NDPs
5648 	 * immediately after association. Check that keep alive period
5649 	 * is at least 3 * DTIM.
5650 	 */
5651 	dtim_msec = dtim_period * ni->ni_intval;
5652 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
5653 	keep_alive = roundup(keep_alive, 1000) / 1000;
5654 	cmd->keep_alive_seconds = htole16(keep_alive);
5655 
5656 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5657 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5658 }
5659 
5660 int
5661 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5662 {
5663 	int err;
5664 	int ba_enable;
5665 	struct iwx_mac_power_cmd cmd;
5666 
5667 	memset(&cmd, 0, sizeof(cmd));
5668 
5669 	iwx_power_build_cmd(sc, in, &cmd);
5670 
5671 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
5672 	    sizeof(cmd), &cmd);
5673 	if (err != 0)
5674 		return err;
5675 
5676 	ba_enable = !!(cmd.flags &
5677 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5678 	return iwx_update_beacon_abort(sc, in, ba_enable);
5679 }
5680 
5681 int
5682 iwx_power_update_device(struct iwx_softc *sc)
5683 {
5684 	struct iwx_device_power_cmd cmd = { };
5685 	struct ieee80211com *ic = &sc->sc_ic;
5686 
5687 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5688 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5689 
5690 	return iwx_send_cmd_pdu(sc,
5691 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5692 }
5693 
5694 int
5695 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
5696 {
5697 	struct iwx_beacon_filter_cmd cmd = {
5698 		IWX_BF_CMD_CONFIG_DEFAULTS,
5699 		.bf_enable_beacon_filter = htole32(1),
5700 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
5701 	};
5702 	int err;
5703 
5704 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5705 	if (err == 0)
5706 		sc->sc_bf.bf_enabled = 1;
5707 
5708 	return err;
5709 }
5710 
5711 int
5712 iwx_disable_beacon_filter(struct iwx_softc *sc)
5713 {
5714 	struct iwx_beacon_filter_cmd cmd;
5715 	int err;
5716 
5717 	memset(&cmd, 0, sizeof(cmd));
5718 
5719 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5720 	if (err == 0)
5721 		sc->sc_bf.bf_enabled = 0;
5722 
5723 	return err;
5724 }
5725 
5726 int
5727 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
5728 {
5729 	struct iwx_add_sta_cmd add_sta_cmd;
5730 	int err;
5731 	uint32_t status;
5732 	struct ieee80211com *ic = &sc->sc_ic;
5733 
5734 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
5735 		panic("STA already added");
5736 
5737 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5738 
5739 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5740 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5741 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
5742 	} else {
5743 		add_sta_cmd.sta_id = IWX_STATION_ID;
5744 		add_sta_cmd.station_type = IWX_STA_LINK;
5745 	}
5746 	add_sta_cmd.mac_id_n_color
5747 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5748 	if (!update) {
5749 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5750 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5751 			    etheranyaddr);
5752 		else
5753 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5754 			    in->in_macaddr);
5755 	}
5756 	add_sta_cmd.add_modify = update ? 1 : 0;
5757 	add_sta_cmd.station_flags_msk
5758 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
5759 
5760 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5761 		add_sta_cmd.station_flags_msk
5762 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
5763 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
5764 
5765 		if (iwx_mimo_enabled(sc)) {
5766 			if (in->in_ni.ni_rxmcs[1] != 0) {
5767 				add_sta_cmd.station_flags |=
5768 				    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
5769 			}
5770 			if (in->in_ni.ni_rxmcs[2] != 0) {
5771 				add_sta_cmd.station_flags |=
5772 				    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
5773 			}
5774 		}
5775 
5776 		if (ieee80211_node_supports_ht_chan40(&in->in_ni)) {
5777 			add_sta_cmd.station_flags |= htole32(
5778 			    IWX_STA_FLG_FAT_EN_40MHZ);
5779 		}
5780 
5781 		add_sta_cmd.station_flags
5782 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
5783 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5784 		case IEEE80211_AMPDU_PARAM_SS_2:
5785 			add_sta_cmd.station_flags
5786 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
5787 			break;
5788 		case IEEE80211_AMPDU_PARAM_SS_4:
5789 			add_sta_cmd.station_flags
5790 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
5791 			break;
5792 		case IEEE80211_AMPDU_PARAM_SS_8:
5793 			add_sta_cmd.station_flags
5794 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
5795 			break;
5796 		case IEEE80211_AMPDU_PARAM_SS_16:
5797 			add_sta_cmd.station_flags
5798 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
5799 			break;
5800 		default:
5801 			break;
5802 		}
5803 	}
5804 
5805 	status = IWX_ADD_STA_SUCCESS;
5806 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
5807 	    &add_sta_cmd, &status);
5808 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
5809 		err = EIO;
5810 
5811 	return err;
5812 }
5813 
5814 int
5815 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
5816 {
5817 	struct ieee80211com *ic = &sc->sc_ic;
5818 	struct iwx_rm_sta_cmd rm_sta_cmd;
5819 	int err;
5820 
5821 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
5822 		panic("sta already removed");
5823 
5824 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
5825 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5826 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5827 	else
5828 		rm_sta_cmd.sta_id = IWX_STATION_ID;
5829 
5830 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
5831 	    &rm_sta_cmd);
5832 
5833 	return err;
5834 }
5835 
5836 int
5837 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
5838 {
5839 	struct ieee80211com *ic = &sc->sc_ic;
5840 	struct ieee80211_node *ni = &in->in_ni;
5841 	int err, i;
5842 
5843 	err = iwx_flush_sta(sc, in);
5844 	if (err) {
5845 		printf("%s: could not flush Tx path (error %d)\n",
5846 		    DEVNAME(sc), err);
5847 		return err;
5848 	}
5849 	err = iwx_rm_sta_cmd(sc, in);
5850 	if (err) {
5851 		printf("%s: could not remove STA (error %d)\n",
5852 		    DEVNAME(sc), err);
5853 		return err;
5854 	}
5855 
5856 	in->in_flags = 0;
5857 
5858 	sc->sc_rx_ba_sessions = 0;
5859 	sc->ba_rx.start_tidmask = 0;
5860 	sc->ba_rx.stop_tidmask = 0;
5861 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
5862 	sc->ba_tx.start_tidmask = 0;
5863 	sc->ba_tx.stop_tidmask = 0;
5864 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
5865 		sc->qenablemsk &= ~(1 << i);
5866 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
5867 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
5868 		if (ba->ba_state != IEEE80211_BA_AGREED)
5869 			continue;
5870 		ieee80211_delba_request(ic, ni, 0, 1, i);
5871 	}
5872 
5873 	return 0;
5874 }
5875 
5876 uint8_t
5877 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
5878     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
5879     int n_ssids, int bgscan)
5880 {
5881 	struct ieee80211com *ic = &sc->sc_ic;
5882 	struct ieee80211_channel *c;
5883 	uint8_t nchan;
5884 
5885 	for (nchan = 0, c = &ic->ic_channels[1];
5886 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5887 	    nchan < chan_nitems &&
5888 	    nchan < sc->sc_capa_n_scan_channels;
5889 	    c++) {
5890 		uint8_t channel_num;
5891 
5892 		if (c->ic_flags == 0)
5893 			continue;
5894 
5895 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5896 		if (isset(sc->sc_ucode_api,
5897 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
5898 			chan->v2.channel_num = channel_num;
5899 			if (IEEE80211_IS_CHAN_2GHZ(c))
5900 				chan->v2.band = IWX_PHY_BAND_24;
5901 			else
5902 				chan->v2.band = IWX_PHY_BAND_5;
5903 			chan->v2.iter_count = 1;
5904 			chan->v2.iter_interval = 0;
5905 		} else {
5906 			chan->v1.channel_num = channel_num;
5907 			chan->v1.iter_count = 1;
5908 			chan->v1.iter_interval = htole16(0);
5909 		}
5910 		/*
5911 		 * Firmware may become unresponsive when asked to send
5912 		 * a directed probe request on a passive channel.
5913 		 */
5914 		if (n_ssids != 0 && !bgscan &&
5915 		    (c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
5916 			chan->flags = htole32(1 << 0); /* select SSID 0 */
5917 		chan++;
5918 		nchan++;
5919 	}
5920 
5921 	return nchan;
5922 }
5923 
5924 int
5925 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
5926 {
5927 	struct ieee80211com *ic = &sc->sc_ic;
5928 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5929 	struct ieee80211_rateset *rs;
5930 	size_t remain = sizeof(preq->buf);
5931 	uint8_t *frm, *pos;
5932 
5933 	memset(preq, 0, sizeof(*preq));
5934 
5935 	if (remain < sizeof(*wh) + 2)
5936 		return ENOBUFS;
5937 
5938 	/*
5939 	 * Build a probe request frame.  Most of the following code is a
5940 	 * copy & paste of what is done in net80211.
5941 	 */
5942 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5943 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5944 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5945 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5946 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5947 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5948 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5949 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5950 
5951 	frm = (uint8_t *)(wh + 1);
5952 	*frm++ = IEEE80211_ELEMID_SSID;
5953 	*frm++ = 0;
5954 	/* hardware inserts SSID */
5955 
5956 	/* Tell the firmware where the MAC header is. */
5957 	preq->mac_header.offset = 0;
5958 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5959 	remain -= frm - (uint8_t *)wh;
5960 
5961 	/* Fill in 2GHz IEs and tell firmware where they are. */
5962 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5963 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5964 		if (remain < 4 + rs->rs_nrates)
5965 			return ENOBUFS;
5966 	} else if (remain < 2 + rs->rs_nrates)
5967 		return ENOBUFS;
5968 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5969 	pos = frm;
5970 	frm = ieee80211_add_rates(frm, rs);
5971 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5972 		frm = ieee80211_add_xrates(frm, rs);
5973 	remain -= frm - pos;
5974 
5975 	if (isset(sc->sc_enabled_capa,
5976 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5977 		if (remain < 3)
5978 			return ENOBUFS;
5979 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5980 		*frm++ = 1;
5981 		*frm++ = 0;
5982 		remain -= 3;
5983 	}
5984 	preq->band_data[0].len = htole16(frm - pos);
5985 
5986 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5987 		/* Fill in 5GHz IEs. */
5988 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5989 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5990 			if (remain < 4 + rs->rs_nrates)
5991 				return ENOBUFS;
5992 		} else if (remain < 2 + rs->rs_nrates)
5993 			return ENOBUFS;
5994 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5995 		pos = frm;
5996 		frm = ieee80211_add_rates(frm, rs);
5997 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5998 			frm = ieee80211_add_xrates(frm, rs);
5999 		preq->band_data[1].len = htole16(frm - pos);
6000 		remain -= frm - pos;
6001 	}
6002 
6003 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6004 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6005 	pos = frm;
6006 	if (ic->ic_flags & IEEE80211_F_HTON) {
6007 		if (remain < 28)
6008 			return ENOBUFS;
6009 		frm = ieee80211_add_htcaps(frm, ic);
6010 		/* XXX add WME info? */
6011 	}
6012 	preq->common_data.len = htole16(frm - pos);
6013 
6014 	return 0;
6015 }
6016 
6017 int
6018 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6019 {
6020 	struct iwx_scan_config scan_cfg;
6021 	struct iwx_host_cmd hcmd = {
6022 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6023 		.len[0] = sizeof(scan_cfg),
6024 		.data[0] = &scan_cfg,
6025 		.flags = 0,
6026 	};
6027 	int cmdver;
6028 
6029 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6030 		printf("%s: firmware does not support reduced scan config\n",
6031 		    DEVNAME(sc));
6032 		return ENOTSUP;
6033 	}
6034 
6035 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6036 
6037 	/*
6038 	 * SCAN_CFG version >= 5 implies that the broadcast
6039 	 * STA ID field is deprecated.
6040 	 */
6041 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6042 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6043 		scan_cfg.bcast_sta_id = 0xff;
6044 
6045 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6046 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6047 
6048 	return iwx_send_cmd(sc, &hcmd);
6049 }
6050 
6051 uint16_t
6052 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6053 {
6054 	struct ieee80211com *ic = &sc->sc_ic;
6055 	uint16_t flags = 0;
6056 
6057 	if (ic->ic_des_esslen == 0)
6058 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6059 
6060 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6061 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6062 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6063 
6064 	return flags;
6065 }
6066 
6067 #define IWX_SCAN_DWELL_ACTIVE		10
6068 #define IWX_SCAN_DWELL_PASSIVE		110
6069 
6070 /* adaptive dwell max budget time [TU] for full scan */
6071 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6072 /* adaptive dwell max budget time [TU] for directed scan */
6073 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6074 /* adaptive dwell default high band APs number */
6075 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6076 /* adaptive dwell default low band APs number */
6077 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6078 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6079 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6080 /* adaptive dwell number of APs override for p2p friendly GO channels */
6081 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6082 /* adaptive dwell number of APs override for social channels */
6083 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6084 
6085 void
6086 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6087     struct iwx_scan_general_params_v10 *general_params, int bgscan)
6088 {
6089 	uint32_t suspend_time, max_out_time;
6090 	uint8_t active_dwell, passive_dwell;
6091 
6092 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
6093 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6094 
6095 	general_params->adwell_default_social_chn =
6096 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6097 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6098 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6099 
6100 	if (bgscan)
6101 		general_params->adwell_max_budget =
6102 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6103 	else
6104 		general_params->adwell_max_budget =
6105 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6106 
6107 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6108 	if (bgscan) {
6109 		max_out_time = htole32(120);
6110 		suspend_time = htole32(120);
6111 	} else {
6112 		max_out_time = htole32(0);
6113 		suspend_time = htole32(0);
6114 	}
6115 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6116 		htole32(max_out_time);
6117 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6118 		htole32(suspend_time);
6119 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6120 		htole32(max_out_time);
6121 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6122 		htole32(suspend_time);
6123 
6124 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6125 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6126 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6127 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6128 }
6129 
6130 void
6131 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6132     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6133 {
6134 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6135 
6136 	gp->flags = htole16(gen_flags);
6137 
6138 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6139 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6140 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6141 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6142 
6143 	gp->scan_start_mac_id = 0;
6144 }
6145 
6146 void
6147 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6148     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6149     int n_ssid, int bgscan)
6150 {
6151 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6152 
6153 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6154 	    nitems(cp->channel_config), n_ssid, bgscan);
6155 
6156 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6157 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6158 }
6159 
6160 int
6161 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6162 {
6163 	struct ieee80211com *ic = &sc->sc_ic;
6164 	struct iwx_host_cmd hcmd = {
6165 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6166 		.len = { 0, },
6167 		.data = { NULL, },
6168 		.flags = 0,
6169 	};
6170 	struct iwx_scan_req_umac_v14 *cmd;
6171 	struct iwx_scan_req_params_v14 *scan_p;
6172 	int err, async = bgscan, n_ssid = 0;
6173 	uint16_t gen_flags;
6174 	uint32_t bitmap_ssid = 0;
6175 
6176 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
6177 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
6178 	if (cmd == NULL)
6179 		return ENOMEM;
6180 
6181 	scan_p = &cmd->scan_params;
6182 
6183 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6184 	cmd->uid = htole32(0);
6185 
6186 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6187 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6188 	    gen_flags, bgscan);
6189 
6190 	scan_p->periodic_params.schedule[0].interval = htole16(0);
6191 	scan_p->periodic_params.schedule[0].iter_count = 1;
6192 
6193 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6194 	if (err) {
6195 		free(cmd, M_DEVBUF, sizeof(*cmd));
6196 		return err;
6197 	}
6198 
6199 	if (ic->ic_des_esslen != 0) {
6200 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
6201 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
6202 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
6203 		    ic->ic_des_essid, ic->ic_des_esslen);
6204 		bitmap_ssid |= (1 << 0);
6205 		n_ssid = 1;
6206 	}
6207 
6208 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6209 	    n_ssid, bgscan);
6210 
6211 	hcmd.len[0] = sizeof(*cmd);
6212 	hcmd.data[0] = (void *)cmd;
6213 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6214 
6215 	err = iwx_send_cmd(sc, &hcmd);
6216 	free(cmd, M_DEVBUF, sizeof(*cmd));
6217 	return err;
6218 }
6219 
6220 void
6221 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6222 {
6223 	struct ieee80211com *ic = &sc->sc_ic;
6224 	struct ifnet *ifp = IC2IFP(ic);
6225 	char alpha2[3];
6226 
6227 	snprintf(alpha2, sizeof(alpha2), "%c%c",
6228 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6229 
6230 	if (ifp->if_flags & IFF_DEBUG) {
6231 		printf("%s: firmware has detected regulatory domain '%s' "
6232 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6233 	}
6234 
6235 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6236 }
6237 
6238 uint8_t
6239 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6240 {
6241 	int i;
6242 	uint8_t rval;
6243 
6244 	for (i = 0; i < rs->rs_nrates; i++) {
6245 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6246 		if (rval == iwx_rates[ridx].rate)
6247 			return rs->rs_rates[i];
6248 	}
6249 
6250 	return 0;
6251 }
6252 
6253 int
6254 iwx_rval2ridx(int rval)
6255 {
6256 	int ridx;
6257 
6258 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6259 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6260 			continue;
6261 		if (rval == iwx_rates[ridx].rate)
6262 			break;
6263 	}
6264 
6265        return ridx;
6266 }
6267 
6268 void
6269 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6270     int *ofdm_rates)
6271 {
6272 	struct ieee80211_node *ni = &in->in_ni;
6273 	struct ieee80211_rateset *rs = &ni->ni_rates;
6274 	int lowest_present_ofdm = -1;
6275 	int lowest_present_cck = -1;
6276 	uint8_t cck = 0;
6277 	uint8_t ofdm = 0;
6278 	int i;
6279 
6280 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6281 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6282 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6283 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6284 				continue;
6285 			cck |= (1 << i);
6286 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6287 				lowest_present_cck = i;
6288 		}
6289 	}
6290 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6291 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6292 			continue;
6293 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6294 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6295 			lowest_present_ofdm = i;
6296 	}
6297 
6298 	/*
6299 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6300 	 * variables. This isn't sufficient though, as there might not
6301 	 * be all the right rates in the bitmap. E.g. if the only basic
6302 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6303 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6304 	 *
6305 	 *    [...] a STA responding to a received frame shall transmit
6306 	 *    its Control Response frame [...] at the highest rate in the
6307 	 *    BSSBasicRateSet parameter that is less than or equal to the
6308 	 *    rate of the immediately previous frame in the frame exchange
6309 	 *    sequence ([...]) and that is of the same modulation class
6310 	 *    ([...]) as the received frame. If no rate contained in the
6311 	 *    BSSBasicRateSet parameter meets these conditions, then the
6312 	 *    control frame sent in response to a received frame shall be
6313 	 *    transmitted at the highest mandatory rate of the PHY that is
6314 	 *    less than or equal to the rate of the received frame, and
6315 	 *    that is of the same modulation class as the received frame.
6316 	 *
6317 	 * As a consequence, we need to add all mandatory rates that are
6318 	 * lower than all of the basic rates to these bitmaps.
6319 	 */
6320 
6321 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6322 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6323 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6324 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6325 	/* 6M already there or needed so always add */
6326 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6327 
6328 	/*
6329 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6330 	 * Note, however:
6331 	 *  - if no CCK rates are basic, it must be ERP since there must
6332 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6333 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6334 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6335 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6336 	 *  - if 2M is basic, 1M is mandatory
6337 	 *  - if 1M is basic, that's the only valid ACK rate.
6338 	 * As a consequence, it's not as complicated as it sounds, just add
6339 	 * any lower rates to the ACK rate bitmap.
6340 	 */
6341 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
6342 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6343 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
6344 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6345 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
6346 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6347 	/* 1M already there or needed so always add */
6348 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6349 
6350 	*cck_rates = cck;
6351 	*ofdm_rates = ofdm;
6352 }
6353 
6354 void
6355 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6356     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6357 {
6358 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6359 	struct ieee80211com *ic = &sc->sc_ic;
6360 	struct ieee80211_node *ni = ic->ic_bss;
6361 	int cck_ack_rates, ofdm_ack_rates;
6362 	int i;
6363 
6364 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6365 	    in->in_color));
6366 	cmd->action = htole32(action);
6367 
6368 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
6369 		return;
6370 
6371 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6372 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6373 	else if (ic->ic_opmode == IEEE80211_M_STA)
6374 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6375 	else
6376 		panic("unsupported operating mode %d", ic->ic_opmode);
6377 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
6378 
6379 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6380 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6381 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6382 		return;
6383 	}
6384 
6385 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6386 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6387 	cmd->cck_rates = htole32(cck_ack_rates);
6388 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6389 
6390 	cmd->cck_short_preamble
6391 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6392 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6393 	cmd->short_slot
6394 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6395 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
6396 
6397 	for (i = 0; i < EDCA_NUM_AC; i++) {
6398 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6399 		int txf = iwx_ac_to_tx_fifo[i];
6400 
6401 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
6402 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
6403 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6404 		cmd->ac[txf].fifos_mask = (1 << txf);
6405 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6406 	}
6407 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6408 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6409 
6410 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6411 		enum ieee80211_htprot htprot =
6412 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6413 		switch (htprot) {
6414 		case IEEE80211_HTPROT_NONE:
6415 			break;
6416 		case IEEE80211_HTPROT_NONMEMBER:
6417 		case IEEE80211_HTPROT_NONHT_MIXED:
6418 			cmd->protection_flags |=
6419 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6420 			    IWX_MAC_PROT_FLG_FAT_PROT);
6421 			break;
6422 		case IEEE80211_HTPROT_20MHZ:
6423 			if (in->in_phyctxt &&
6424 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
6425 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
6426 				cmd->protection_flags |=
6427 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6428 				    IWX_MAC_PROT_FLG_FAT_PROT);
6429 			}
6430 			break;
6431 		default:
6432 			break;
6433 		}
6434 
6435 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6436 	}
6437 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6438 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6439 
6440 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6441 #undef IWX_EXP2
6442 }
6443 
6444 void
6445 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6446     struct iwx_mac_data_sta *sta, int assoc)
6447 {
6448 	struct ieee80211_node *ni = &in->in_ni;
6449 	uint32_t dtim_off;
6450 	uint64_t tsf;
6451 
6452 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
6453 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
6454 	tsf = letoh64(tsf);
6455 
6456 	sta->is_assoc = htole32(assoc);
6457 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
6458 	sta->dtim_tsf = htole64(tsf + dtim_off);
6459 	sta->bi = htole32(ni->ni_intval);
6460 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
6461 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6462 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
6463 	sta->listen_interval = htole32(10);
6464 	sta->assoc_id = htole32(ni->ni_associd);
6465 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
6466 }
6467 
6468 int
6469 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6470     int assoc)
6471 {
6472 	struct ieee80211com *ic = &sc->sc_ic;
6473 	struct ieee80211_node *ni = &in->in_ni;
6474 	struct iwx_mac_ctx_cmd cmd;
6475 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6476 
6477 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
6478 		panic("MAC already added");
6479 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6480 		panic("MAC already removed");
6481 
6482 	memset(&cmd, 0, sizeof(cmd));
6483 
6484 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6485 
6486 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6487 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6488 		    sizeof(cmd), &cmd);
6489 	}
6490 
6491 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6492 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6493 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6494 		    IWX_MAC_FILTER_ACCEPT_GRP |
6495 		    IWX_MAC_FILTER_IN_BEACON |
6496 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
6497 		    IWX_MAC_FILTER_IN_CRC32);
6498 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6499 		/*
6500 		 * Allow beacons to pass through as long as we are not
6501 		 * associated or we do not have dtim period information.
6502 		 */
6503 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6504 	else
6505 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6506 
6507 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6508 }
6509 
6510 int
6511 iwx_clear_statistics(struct iwx_softc *sc)
6512 {
6513 	struct iwx_statistics_cmd scmd = {
6514 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6515 	};
6516 	struct iwx_host_cmd cmd = {
6517 		.id = IWX_STATISTICS_CMD,
6518 		.len[0] = sizeof(scmd),
6519 		.data[0] = &scmd,
6520 		.flags = IWX_CMD_WANT_RESP,
6521 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
6522 	};
6523 	int err;
6524 
6525 	err = iwx_send_cmd(sc, &cmd);
6526 	if (err)
6527 		return err;
6528 
6529 	iwx_free_resp(sc, &cmd);
6530 	return 0;
6531 }
6532 
6533 void
6534 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6535 {
6536 	int s = splnet();
6537 
6538 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6539 		splx(s);
6540 		return;
6541 	}
6542 
6543 	refcnt_take(&sc->task_refs);
6544 	if (!task_add(taskq, task))
6545 		refcnt_rele_wake(&sc->task_refs);
6546 	splx(s);
6547 }
6548 
6549 void
6550 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6551 {
6552 	if (task_del(taskq, task))
6553 		refcnt_rele(&sc->task_refs);
6554 }
6555 
6556 int
6557 iwx_scan(struct iwx_softc *sc)
6558 {
6559 	struct ieee80211com *ic = &sc->sc_ic;
6560 	struct ifnet *ifp = IC2IFP(ic);
6561 	int err;
6562 
6563 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
6564 		err = iwx_scan_abort(sc);
6565 		if (err) {
6566 			printf("%s: could not abort background scan\n",
6567 			    DEVNAME(sc));
6568 			return err;
6569 		}
6570 	}
6571 
6572 	err = iwx_umac_scan_v14(sc, 0);
6573 	if (err) {
6574 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6575 		return err;
6576 	}
6577 
6578 	/*
6579 	 * The current mode might have been fixed during association.
6580 	 * Ensure all channels get scanned.
6581 	 */
6582 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6583 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6584 
6585 	sc->sc_flags |= IWX_FLAG_SCANNING;
6586 	if (ifp->if_flags & IFF_DEBUG)
6587 		printf("%s: %s -> %s\n", ifp->if_xname,
6588 		    ieee80211_state_name[ic->ic_state],
6589 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6590 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
6591 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6592 		ieee80211_node_cleanup(ic, ic->ic_bss);
6593 	}
6594 	ic->ic_state = IEEE80211_S_SCAN;
6595 	wakeup(&ic->ic_state); /* wake iwx_init() */
6596 
6597 	return 0;
6598 }
6599 
6600 int
6601 iwx_bgscan(struct ieee80211com *ic)
6602 {
6603 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
6604 	int err;
6605 
6606 	if (sc->sc_flags & IWX_FLAG_SCANNING)
6607 		return 0;
6608 
6609 	err = iwx_umac_scan_v14(sc, 1);
6610 	if (err) {
6611 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6612 		return err;
6613 	}
6614 
6615 	sc->sc_flags |= IWX_FLAG_BGSCAN;
6616 	return 0;
6617 }
6618 
6619 int
6620 iwx_umac_scan_abort(struct iwx_softc *sc)
6621 {
6622 	struct iwx_umac_scan_abort cmd = { 0 };
6623 
6624 	return iwx_send_cmd_pdu(sc,
6625 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
6626 	    0, sizeof(cmd), &cmd);
6627 }
6628 
6629 int
6630 iwx_scan_abort(struct iwx_softc *sc)
6631 {
6632 	int err;
6633 
6634 	err = iwx_umac_scan_abort(sc);
6635 	if (err == 0)
6636 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6637 	return err;
6638 }
6639 
6640 int
6641 iwx_enable_mgmt_queue(struct iwx_softc *sc)
6642 {
6643 	int err;
6644 
6645 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
6646 
6647 	/*
6648 	 * Non-QoS frames use the "MGMT" TID and queue.
6649 	 * Other TIDs and data queues are reserved for QoS data frames.
6650 	 */
6651 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
6652 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
6653 	if (err) {
6654 		printf("%s: could not enable Tx queue %d (error %d)\n",
6655 		    DEVNAME(sc), sc->first_data_qid, err);
6656 		return err;
6657 	}
6658 
6659 	return 0;
6660 }
6661 
6662 int
6663 iwx_rs_rval2idx(uint8_t rval)
6664 {
6665 	/* Firmware expects indices which match our 11g rate set. */
6666 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
6667 	int i;
6668 
6669 	for (i = 0; i < rs->rs_nrates; i++) {
6670 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6671 			return i;
6672 	}
6673 
6674 	return -1;
6675 }
6676 
6677 uint16_t
6678 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
6679 {
6680 	struct ieee80211com *ic = &sc->sc_ic;
6681 	const struct ieee80211_ht_rateset *rs;
6682 	uint16_t htrates = 0;
6683 	int mcs;
6684 
6685 	rs = &ieee80211_std_ratesets_11n[rsidx];
6686 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
6687 		if (!isset(ni->ni_rxmcs, mcs) ||
6688 		    !isset(ic->ic_sup_mcs, mcs))
6689 			continue;
6690 		htrates |= (1 << (mcs - rs->min_mcs));
6691 	}
6692 
6693 	return htrates;
6694 }
6695 
6696 int
6697 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
6698 {
6699 	struct ieee80211_node *ni = &in->in_ni;
6700 	struct ieee80211_rateset *rs = &ni->ni_rates;
6701 	struct iwx_tlc_config_cmd cfg_cmd;
6702 	uint32_t cmd_id;
6703 	int i;
6704 	size_t cmd_size = sizeof(cfg_cmd);
6705 
6706 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
6707 
6708 	for (i = 0; i < rs->rs_nrates; i++) {
6709 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
6710 		int idx = iwx_rs_rval2idx(rval);
6711 		if (idx == -1)
6712 			return EINVAL;
6713 		cfg_cmd.non_ht_rates |= (1 << idx);
6714 	}
6715 
6716 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6717 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
6718 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
6719 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_SISO);
6720 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
6721 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_MIMO2);
6722 	} else
6723 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
6724 
6725 	cfg_cmd.sta_id = IWX_STATION_ID;
6726 	if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
6727 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
6728 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
6729 	else
6730 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
6731 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
6732 	cfg_cmd.max_mpdu_len = 3839;
6733 	if (ieee80211_node_supports_ht_sgi20(ni))
6734 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
6735 	if (ieee80211_node_supports_ht_sgi40(ni))
6736 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_40MHZ);
6737 
6738 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
6739 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
6740 }
6741 
6742 void
6743 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
6744 {
6745 	struct ieee80211com *ic = &sc->sc_ic;
6746 	struct ieee80211_node *ni = ic->ic_bss;
6747 	struct ieee80211_rateset *rs = &ni->ni_rates;
6748 	uint32_t rate_n_flags;
6749 	int i;
6750 
6751 	if (notif->sta_id != IWX_STATION_ID ||
6752 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
6753 		return;
6754 
6755 	rate_n_flags = le32toh(notif->rate);
6756 	if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
6757 		ni->ni_txmcs = (rate_n_flags &
6758 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
6759 		    IWX_RATE_HT_MCS_NSS_MSK));
6760 	} else {
6761 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
6762 		uint8_t rval = 0;
6763 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
6764 			if (iwx_rates[i].plcp == plcp) {
6765 				rval = iwx_rates[i].rate;
6766 				break;
6767 			}
6768 		}
6769 		if (rval) {
6770 			uint8_t rv;
6771 			for (i = 0; i < rs->rs_nrates; i++) {
6772 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
6773 				if (rv == rval) {
6774 					ni->ni_txrate = i;
6775 					break;
6776 				}
6777 			}
6778 		}
6779 	}
6780 }
6781 
6782 int
6783 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
6784     struct ieee80211_channel *chan, uint8_t chains_static,
6785     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco)
6786 {
6787 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
6788 	int err;
6789 
6790 	if (isset(sc->sc_enabled_capa,
6791 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
6792 	    (phyctxt->channel->ic_flags & band_flags) !=
6793 	    (chan->ic_flags & band_flags)) {
6794 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6795 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco);
6796 		if (err) {
6797 			printf("%s: could not remove PHY context "
6798 			    "(error %d)\n", DEVNAME(sc), err);
6799 			return err;
6800 		}
6801 		phyctxt->channel = chan;
6802 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6803 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco);
6804 		if (err) {
6805 			printf("%s: could not remove PHY context "
6806 			    "(error %d)\n", DEVNAME(sc), err);
6807 			return err;
6808 		}
6809 	} else {
6810 		phyctxt->channel = chan;
6811 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6812 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco);
6813 		if (err) {
6814 			printf("%s: could not update PHY context (error %d)\n",
6815 			    DEVNAME(sc), err);
6816 			return err;
6817 		}
6818 	}
6819 
6820 	phyctxt->sco = sco;
6821 	return 0;
6822 }
6823 
6824 int
6825 iwx_auth(struct iwx_softc *sc)
6826 {
6827 	struct ieee80211com *ic = &sc->sc_ic;
6828 	struct iwx_node *in = (void *)ic->ic_bss;
6829 	uint32_t duration;
6830 	int generation = sc->sc_generation, err;
6831 
6832 	splassert(IPL_NET);
6833 
6834 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6835 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6836 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
6837 		if (err)
6838 			return err;
6839 	} else {
6840 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6841 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
6842 		if (err)
6843 			return err;
6844 	}
6845 	in->in_phyctxt = &sc->sc_phyctxt[0];
6846 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
6847 
6848 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
6849 	if (err) {
6850 		printf("%s: could not add MAC context (error %d)\n",
6851 		    DEVNAME(sc), err);
6852 		return err;
6853  	}
6854 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
6855 
6856 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
6857 	if (err) {
6858 		printf("%s: could not add binding (error %d)\n",
6859 		    DEVNAME(sc), err);
6860 		goto rm_mac_ctxt;
6861 	}
6862 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
6863 
6864 	err = iwx_add_sta_cmd(sc, in, 0);
6865 	if (err) {
6866 		printf("%s: could not add sta (error %d)\n",
6867 		    DEVNAME(sc), err);
6868 		goto rm_binding;
6869 	}
6870 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
6871 
6872 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6873 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
6874 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
6875 		    IWX_TX_RING_COUNT);
6876 		if (err)
6877 			goto rm_sta;
6878 		return 0;
6879 	}
6880 
6881 	err = iwx_enable_mgmt_queue(sc);
6882 	if (err)
6883 		goto rm_sta;
6884 
6885 	err = iwx_clear_statistics(sc);
6886 	if (err)
6887 		goto rm_sta;
6888 
6889 	/*
6890 	 * Prevent the FW from wandering off channel during association
6891 	 * by "protecting" the session with a time event.
6892 	 */
6893 	if (in->in_ni.ni_intval)
6894 		duration = in->in_ni.ni_intval * 2;
6895 	else
6896 		duration = IEEE80211_DUR_TU;
6897 	return iwx_schedule_session_protection(sc, in, duration);
6898 rm_sta:
6899 	if (generation == sc->sc_generation) {
6900 		iwx_rm_sta_cmd(sc, in);
6901 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6902 	}
6903 rm_binding:
6904 	if (generation == sc->sc_generation) {
6905 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
6906 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6907 	}
6908 rm_mac_ctxt:
6909 	if (generation == sc->sc_generation) {
6910 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
6911 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6912 	}
6913 	return err;
6914 }
6915 
6916 int
6917 iwx_deauth(struct iwx_softc *sc)
6918 {
6919 	struct ieee80211com *ic = &sc->sc_ic;
6920 	struct iwx_node *in = (void *)ic->ic_bss;
6921 	int err;
6922 
6923 	splassert(IPL_NET);
6924 
6925 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
6926 		err = iwx_rm_sta(sc, in);
6927 		if (err)
6928 			return err;
6929 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6930 	}
6931 
6932 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
6933 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
6934 		if (err) {
6935 			printf("%s: could not remove binding (error %d)\n",
6936 			    DEVNAME(sc), err);
6937 			return err;
6938 		}
6939 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6940 	}
6941 
6942 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
6943 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
6944 		if (err) {
6945 			printf("%s: could not remove MAC context (error %d)\n",
6946 			    DEVNAME(sc), err);
6947 			return err;
6948 		}
6949 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6950 	}
6951 
6952 	/* Move unused PHY context to a default channel. */
6953 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6954 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
6955 	if (err)
6956 		return err;
6957 
6958 	return 0;
6959 }
6960 
6961 int
6962 iwx_run(struct iwx_softc *sc)
6963 {
6964 	struct ieee80211com *ic = &sc->sc_ic;
6965 	struct iwx_node *in = (void *)ic->ic_bss;
6966 	struct ieee80211_node *ni = &in->in_ni;
6967 	int err;
6968 
6969 	splassert(IPL_NET);
6970 
6971 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6972 		/* Add a MAC context and a sniffing STA. */
6973 		err = iwx_auth(sc);
6974 		if (err)
6975 			return err;
6976 	}
6977 
6978 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
6979 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6980 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
6981 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
6982 		    in->in_phyctxt->channel, chains, chains,
6983 		    0, IEEE80211_HTOP0_SCO_SCN);
6984 		if (err) {
6985 			printf("%s: failed to update PHY\n", DEVNAME(sc));
6986 			return err;
6987 		}
6988 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
6989 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
6990 		uint8_t sco;
6991 		if (ieee80211_node_supports_ht_chan40(ni))
6992 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6993 		else
6994 			sco = IEEE80211_HTOP0_SCO_SCN;
6995 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
6996 		    in->in_phyctxt->channel, chains, chains,
6997 		    0, sco);
6998 		if (err) {
6999 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7000 			return err;
7001 		}
7002 	}
7003 
7004 	/* We have now been assigned an associd by the AP. */
7005 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7006 	if (err) {
7007 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7008 		return err;
7009 	}
7010 
7011 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7012 	if (err) {
7013 		printf("%s: could not set sf full on (error %d)\n",
7014 		    DEVNAME(sc), err);
7015 		return err;
7016 	}
7017 
7018 	err = iwx_allow_mcast(sc);
7019 	if (err) {
7020 		printf("%s: could not allow mcast (error %d)\n",
7021 		    DEVNAME(sc), err);
7022 		return err;
7023 	}
7024 
7025 	err = iwx_power_update_device(sc);
7026 	if (err) {
7027 		printf("%s: could not send power command (error %d)\n",
7028 		    DEVNAME(sc), err);
7029 		return err;
7030 	}
7031 #ifdef notyet
7032 	/*
7033 	 * Disabled for now. Default beacon filter settings
7034 	 * prevent net80211 from getting ERP and HT protection
7035 	 * updates from beacons.
7036 	 */
7037 	err = iwx_enable_beacon_filter(sc, in);
7038 	if (err) {
7039 		printf("%s: could not enable beacon filter\n",
7040 		    DEVNAME(sc));
7041 		return err;
7042 	}
7043 #endif
7044 	err = iwx_power_mac_update_mode(sc, in);
7045 	if (err) {
7046 		printf("%s: could not update MAC power (error %d)\n",
7047 		    DEVNAME(sc), err);
7048 		return err;
7049 	}
7050 
7051 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7052 		return 0;
7053 
7054 	/* Start at lowest available bit-rate. Firmware will raise. */
7055 	in->in_ni.ni_txrate = 0;
7056 	in->in_ni.ni_txmcs = 0;
7057 
7058 	err = iwx_rs_init(sc, in);
7059 	if (err) {
7060 		printf("%s: could not init rate scaling (error %d)\n",
7061 		    DEVNAME(sc), err);
7062 		return err;
7063 	}
7064 
7065 	return 0;
7066 }
7067 
7068 int
7069 iwx_run_stop(struct iwx_softc *sc)
7070 {
7071 	struct ieee80211com *ic = &sc->sc_ic;
7072 	struct iwx_node *in = (void *)ic->ic_bss;
7073 	struct ieee80211_node *ni = &in->in_ni;
7074 	int err, i;
7075 
7076 	splassert(IPL_NET);
7077 
7078 	err = iwx_flush_sta(sc, in);
7079 	if (err) {
7080 		printf("%s: could not flush Tx path (error %d)\n",
7081 		    DEVNAME(sc), err);
7082 		return err;
7083 	}
7084 
7085 	/*
7086 	 * Stop Rx BA sessions now. We cannot rely on the BA task
7087 	 * for this when moving out of RUN state since it runs in a
7088 	 * separate thread.
7089 	 * Note that in->in_ni (struct ieee80211_node) already represents
7090 	 * our new access point in case we are roaming between APs.
7091 	 * This means we cannot rely on struct ieee802111_node to tell
7092 	 * us which BA sessions exist.
7093 	 */
7094 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7095 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7096 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7097 			continue;
7098 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7099 	}
7100 
7101 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7102 	if (err)
7103 		return err;
7104 
7105 	err = iwx_disable_beacon_filter(sc);
7106 	if (err) {
7107 		printf("%s: could not disable beacon filter (error %d)\n",
7108 		    DEVNAME(sc), err);
7109 		return err;
7110 	}
7111 
7112 	/* Mark station as disassociated. */
7113 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7114 	if (err) {
7115 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7116 		return err;
7117 	}
7118 
7119 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
7120 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7121 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7122 		   in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
7123 		if (err) {
7124 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7125 			return err;
7126 		}
7127 	}
7128 
7129 	return 0;
7130 }
7131 
7132 struct ieee80211_node *
7133 iwx_node_alloc(struct ieee80211com *ic)
7134 {
7135 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
7136 }
7137 
7138 int
7139 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7140     struct ieee80211_key *k)
7141 {
7142 	struct iwx_softc *sc = ic->ic_softc;
7143 	struct iwx_node *in = (void *)ni;
7144 	struct iwx_setkey_task_arg *a;
7145 	int err;
7146 
7147 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7148 		/* Fallback to software crypto for other ciphers. */
7149 		err = ieee80211_set_key(ic, ni, k);
7150 		if (!err && (k->k_flags & IEEE80211_KEY_GROUP))
7151 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7152 		return err;
7153 	}
7154 
7155 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7156 		return ENOSPC;
7157 
7158 	a = &sc->setkey_arg[sc->setkey_cur];
7159 	a->sta_id = IWX_STATION_ID;
7160 	a->ni = ni;
7161 	a->k = k;
7162 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7163 	sc->setkey_nkeys++;
7164 	iwx_add_task(sc, systq, &sc->setkey_task);
7165 	return EBUSY;
7166 }
7167 
7168 int
7169 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7170     struct ieee80211_key *k)
7171 {
7172 	struct ieee80211com *ic = &sc->sc_ic;
7173 	struct iwx_node *in = (void *)ni;
7174 	struct iwx_add_sta_key_cmd cmd;
7175 	uint32_t status;
7176 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7177 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
7178 	int err;
7179 
7180 	/*
7181 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7182 	 * Currently we only implement station mode where 'ni' is always
7183 	 * ic->ic_bss so there is no need to validate arguments beyond this:
7184 	 */
7185 	KASSERT(ni == ic->ic_bss);
7186 
7187 	memset(&cmd, 0, sizeof(cmd));
7188 
7189 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7190 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
7191 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7192 	    IWX_STA_KEY_FLG_KEYID_MSK));
7193 	if (k->k_flags & IEEE80211_KEY_GROUP) {
7194 		cmd.common.key_offset = 1;
7195 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7196 	} else
7197 		cmd.common.key_offset = 0;
7198 
7199 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7200 	cmd.common.sta_id = sta_id;
7201 
7202 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
7203 
7204 	status = IWX_ADD_STA_SUCCESS;
7205 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7206 	    &status);
7207 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7208 		return ECANCELED;
7209 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7210 		err = EIO;
7211 	if (err) {
7212 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7213 		    IEEE80211_REASON_AUTH_LEAVE);
7214 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7215 		return err;
7216 	}
7217 
7218 	if (k->k_flags & IEEE80211_KEY_GROUP)
7219 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7220 	else
7221 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7222 
7223 	if ((in->in_flags & want_keymask) == want_keymask) {
7224 		DPRINTF(("marking port %s valid\n",
7225 		    ether_sprintf(ni->ni_macaddr)));
7226 		ni->ni_port_valid = 1;
7227 		ieee80211_set_link_state(ic, LINK_STATE_UP);
7228 	}
7229 
7230 	return 0;
7231 }
7232 
7233 void
7234 iwx_setkey_task(void *arg)
7235 {
7236 	struct iwx_softc *sc = arg;
7237 	struct iwx_setkey_task_arg *a;
7238 	int err = 0, s = splnet();
7239 
7240 	while (sc->setkey_nkeys > 0) {
7241 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7242 			break;
7243 		a = &sc->setkey_arg[sc->setkey_tail];
7244 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7245 		a->sta_id = 0;
7246 		a->ni = NULL;
7247 		a->k = NULL;
7248 		sc->setkey_tail = (sc->setkey_tail + 1) %
7249 		    nitems(sc->setkey_arg);
7250 		sc->setkey_nkeys--;
7251 	}
7252 
7253 	refcnt_rele_wake(&sc->task_refs);
7254 	splx(s);
7255 }
7256 
7257 void
7258 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7259     struct ieee80211_key *k)
7260 {
7261 	struct iwx_softc *sc = ic->ic_softc;
7262 	struct iwx_add_sta_key_cmd cmd;
7263 
7264 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7265 		/* Fallback to software crypto for other ciphers. */
7266                 ieee80211_delete_key(ic, ni, k);
7267 		return;
7268 	}
7269 
7270 	memset(&cmd, 0, sizeof(cmd));
7271 
7272 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7273 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7274 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7275 	    IWX_STA_KEY_FLG_KEYID_MSK));
7276 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7277 	if (k->k_flags & IEEE80211_KEY_GROUP)
7278 		cmd.common.key_offset = 1;
7279 	else
7280 		cmd.common.key_offset = 0;
7281 	cmd.common.sta_id = IWX_STATION_ID;
7282 
7283 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7284 }
7285 
7286 int
7287 iwx_media_change(struct ifnet *ifp)
7288 {
7289 	struct iwx_softc *sc = ifp->if_softc;
7290 	struct ieee80211com *ic = &sc->sc_ic;
7291 	uint8_t rate, ridx;
7292 	int err;
7293 
7294 	err = ieee80211_media_change(ifp);
7295 	if (err != ENETRESET)
7296 		return err;
7297 
7298 	if (ic->ic_fixed_mcs != -1)
7299 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
7300 	else if (ic->ic_fixed_rate != -1) {
7301 		rate = ic->ic_sup_rates[ic->ic_curmode].
7302 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
7303 		/* Map 802.11 rate to HW rate index. */
7304 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
7305 			if (iwx_rates[ridx].rate == rate)
7306 				break;
7307 		sc->sc_fixed_ridx = ridx;
7308 	}
7309 
7310 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7311 	    (IFF_UP | IFF_RUNNING)) {
7312 		iwx_stop(ifp);
7313 		err = iwx_init(ifp);
7314 	}
7315 	return err;
7316 }
7317 
7318 void
7319 iwx_newstate_task(void *psc)
7320 {
7321 	struct iwx_softc *sc = (struct iwx_softc *)psc;
7322 	struct ieee80211com *ic = &sc->sc_ic;
7323 	enum ieee80211_state nstate = sc->ns_nstate;
7324 	enum ieee80211_state ostate = ic->ic_state;
7325 	int arg = sc->ns_arg;
7326 	int err = 0, s = splnet();
7327 
7328 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7329 		/* iwx_stop() is waiting for us. */
7330 		refcnt_rele_wake(&sc->task_refs);
7331 		splx(s);
7332 		return;
7333 	}
7334 
7335 	if (ostate == IEEE80211_S_SCAN) {
7336 		if (nstate == ostate) {
7337 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
7338 				refcnt_rele_wake(&sc->task_refs);
7339 				splx(s);
7340 				return;
7341 			}
7342 			/* Firmware is no longer scanning. Do another scan. */
7343 			goto next_scan;
7344 		}
7345 	}
7346 
7347 	if (nstate <= ostate) {
7348 		switch (ostate) {
7349 		case IEEE80211_S_RUN:
7350 			err = iwx_run_stop(sc);
7351 			if (err)
7352 				goto out;
7353 			/* FALLTHROUGH */
7354 		case IEEE80211_S_ASSOC:
7355 		case IEEE80211_S_AUTH:
7356 			if (nstate <= IEEE80211_S_AUTH) {
7357 				err = iwx_deauth(sc);
7358 				if (err)
7359 					goto out;
7360 			}
7361 			/* FALLTHROUGH */
7362 		case IEEE80211_S_SCAN:
7363 		case IEEE80211_S_INIT:
7364 			break;
7365 		}
7366 
7367 		/* Die now if iwx_stop() was called while we were sleeping. */
7368 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7369 			refcnt_rele_wake(&sc->task_refs);
7370 			splx(s);
7371 			return;
7372 		}
7373 	}
7374 
7375 	switch (nstate) {
7376 	case IEEE80211_S_INIT:
7377 		break;
7378 
7379 	case IEEE80211_S_SCAN:
7380 next_scan:
7381 		err = iwx_scan(sc);
7382 		if (err)
7383 			break;
7384 		refcnt_rele_wake(&sc->task_refs);
7385 		splx(s);
7386 		return;
7387 
7388 	case IEEE80211_S_AUTH:
7389 		err = iwx_auth(sc);
7390 		break;
7391 
7392 	case IEEE80211_S_ASSOC:
7393 		break;
7394 
7395 	case IEEE80211_S_RUN:
7396 		err = iwx_run(sc);
7397 		break;
7398 	}
7399 
7400 out:
7401 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7402 		if (err)
7403 			task_add(systq, &sc->init_task);
7404 		else
7405 			sc->sc_newstate(ic, nstate, arg);
7406 	}
7407 	refcnt_rele_wake(&sc->task_refs);
7408 	splx(s);
7409 }
7410 
7411 int
7412 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7413 {
7414 	struct ifnet *ifp = IC2IFP(ic);
7415 	struct iwx_softc *sc = ifp->if_softc;
7416 
7417 	/*
7418 	 * Prevent attemps to transition towards the same state, unless
7419 	 * we are scanning in which case a SCAN -> SCAN transition
7420 	 * triggers another scan iteration. And AUTH -> AUTH is needed
7421 	 * to support band-steering.
7422 	 */
7423 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
7424 	    nstate != IEEE80211_S_AUTH)
7425 		return 0;
7426 
7427 	if (ic->ic_state == IEEE80211_S_RUN) {
7428 		iwx_del_task(sc, systq, &sc->ba_task);
7429 		iwx_del_task(sc, systq, &sc->setkey_task);
7430 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
7431 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
7432 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
7433 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
7434 	}
7435 
7436 	sc->ns_nstate = nstate;
7437 	sc->ns_arg = arg;
7438 
7439 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7440 
7441 	return 0;
7442 }
7443 
7444 void
7445 iwx_endscan(struct iwx_softc *sc)
7446 {
7447 	struct ieee80211com *ic = &sc->sc_ic;
7448 
7449 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
7450 		return;
7451 
7452 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7453 	ieee80211_end_scan(&ic->ic_if);
7454 }
7455 
7456 /*
7457  * Aging and idle timeouts for the different possible scenarios
7458  * in default configuration
7459  */
7460 static const uint32_t
7461 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7462 	{
7463 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
7464 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
7465 	},
7466 	{
7467 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
7468 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
7469 	},
7470 	{
7471 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
7472 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
7473 	},
7474 	{
7475 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
7476 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
7477 	},
7478 	{
7479 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
7480 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
7481 	},
7482 };
7483 
7484 /*
7485  * Aging and idle timeouts for the different possible scenarios
7486  * in single BSS MAC configuration.
7487  */
7488 static const uint32_t
7489 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7490 	{
7491 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
7492 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
7493 	},
7494 	{
7495 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
7496 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
7497 	},
7498 	{
7499 		htole32(IWX_SF_MCAST_AGING_TIMER),
7500 		htole32(IWX_SF_MCAST_IDLE_TIMER)
7501 	},
7502 	{
7503 		htole32(IWX_SF_BA_AGING_TIMER),
7504 		htole32(IWX_SF_BA_IDLE_TIMER)
7505 	},
7506 	{
7507 		htole32(IWX_SF_TX_RE_AGING_TIMER),
7508 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
7509 	},
7510 };
7511 
7512 void
7513 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
7514     struct ieee80211_node *ni)
7515 {
7516 	int i, j, watermark;
7517 
7518 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
7519 
7520 	/*
7521 	 * If we are in association flow - check antenna configuration
7522 	 * capabilities of the AP station, and choose the watermark accordingly.
7523 	 */
7524 	if (ni) {
7525 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7526 			if (ni->ni_rxmcs[1] != 0)
7527 				watermark = IWX_SF_W_MARK_MIMO2;
7528 			else
7529 				watermark = IWX_SF_W_MARK_SISO;
7530 		} else {
7531 			watermark = IWX_SF_W_MARK_LEGACY;
7532 		}
7533 	/* default watermark value for unassociated mode. */
7534 	} else {
7535 		watermark = IWX_SF_W_MARK_MIMO2;
7536 	}
7537 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
7538 
7539 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
7540 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
7541 			sf_cmd->long_delay_timeouts[i][j] =
7542 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
7543 		}
7544 	}
7545 
7546 	if (ni) {
7547 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
7548 		       sizeof(iwx_sf_full_timeout));
7549 	} else {
7550 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
7551 		       sizeof(iwx_sf_full_timeout_def));
7552 	}
7553 
7554 }
7555 
7556 int
7557 iwx_sf_config(struct iwx_softc *sc, int new_state)
7558 {
7559 	struct ieee80211com *ic = &sc->sc_ic;
7560 	struct iwx_sf_cfg_cmd sf_cmd = {
7561 		.state = htole32(new_state),
7562 	};
7563 	int err = 0;
7564 
7565 	switch (new_state) {
7566 	case IWX_SF_UNINIT:
7567 	case IWX_SF_INIT_OFF:
7568 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
7569 		break;
7570 	case IWX_SF_FULL_ON:
7571 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7572 		break;
7573 	default:
7574 		return EINVAL;
7575 	}
7576 
7577 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
7578 				   sizeof(sf_cmd), &sf_cmd);
7579 	return err;
7580 }
7581 
7582 int
7583 iwx_send_bt_init_conf(struct iwx_softc *sc)
7584 {
7585 	struct iwx_bt_coex_cmd bt_cmd;
7586 
7587 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
7588 	bt_cmd.enabled_modules = 0;
7589 
7590 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
7591 	    &bt_cmd);
7592 }
7593 
7594 int
7595 iwx_send_soc_conf(struct iwx_softc *sc)
7596 {
7597 	struct iwx_soc_configuration_cmd cmd;
7598 	int err;
7599 	uint32_t cmd_id, flags = 0;
7600 
7601 	memset(&cmd, 0, sizeof(cmd));
7602 
7603 	/*
7604 	 * In VER_1 of this command, the discrete value is considered
7605 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
7606 	 * values in VER_1, this is backwards-compatible with VER_2,
7607 	 * as long as we don't set any other flag bits.
7608 	 */
7609 	if (!sc->sc_integrated) { /* VER_1 */
7610 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
7611 	} else { /* VER_2 */
7612 		uint8_t scan_cmd_ver;
7613 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
7614 			flags |= (sc->sc_ltr_delay &
7615 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
7616 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
7617 		    IWX_SCAN_REQ_UMAC);
7618 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
7619 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
7620 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
7621 	}
7622 	cmd.flags = htole32(flags);
7623 
7624 	cmd.latency = htole32(sc->sc_xtal_latency);
7625 
7626 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
7627 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7628 	if (err)
7629 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
7630 	return err;
7631 }
7632 
7633 int
7634 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
7635 {
7636 	struct iwx_mcc_update_cmd mcc_cmd;
7637 	struct iwx_host_cmd hcmd = {
7638 		.id = IWX_MCC_UPDATE_CMD,
7639 		.flags = IWX_CMD_WANT_RESP,
7640 		.data = { &mcc_cmd },
7641 	};
7642 	struct iwx_rx_packet *pkt;
7643 	struct iwx_mcc_update_resp *resp;
7644 	size_t resp_len;
7645 	int err;
7646 
7647 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7648 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7649 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
7650 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
7651 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
7652 	else
7653 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
7654 
7655 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
7656 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
7657 
7658 	err = iwx_send_cmd(sc, &hcmd);
7659 	if (err)
7660 		return err;
7661 
7662 	pkt = hcmd.resp_pkt;
7663 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
7664 		err = EIO;
7665 		goto out;
7666 	}
7667 
7668 	resp_len = iwx_rx_packet_payload_len(pkt);
7669 	if (resp_len < sizeof(*resp)) {
7670 		err = EIO;
7671 		goto out;
7672 	}
7673 
7674 	resp = (void *)pkt->data;
7675 	if (resp_len != sizeof(*resp) +
7676 	    resp->n_channels * sizeof(resp->channels[0])) {
7677 		err = EIO;
7678 		goto out;
7679 	}
7680 
7681 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
7682 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
7683 
7684 	/* Update channel map for net80211 and our scan configuration. */
7685 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
7686 
7687 out:
7688 	iwx_free_resp(sc, &hcmd);
7689 
7690 	return err;
7691 }
7692 
7693 int
7694 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
7695 {
7696 	struct iwx_temp_report_ths_cmd cmd;
7697 	int err;
7698 
7699 	/*
7700 	 * In order to give responsibility for critical-temperature-kill
7701 	 * and TX backoff to FW we need to send an empty temperature
7702 	 * reporting command at init time.
7703 	 */
7704 	memset(&cmd, 0, sizeof(cmd));
7705 
7706 	err = iwx_send_cmd_pdu(sc,
7707 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
7708 	    0, sizeof(cmd), &cmd);
7709 	if (err)
7710 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
7711 		    DEVNAME(sc), err);
7712 
7713 	return err;
7714 }
7715 
7716 int
7717 iwx_init_hw(struct iwx_softc *sc)
7718 {
7719 	struct ieee80211com *ic = &sc->sc_ic;
7720 	int err, i;
7721 
7722 	err = iwx_run_init_mvm_ucode(sc, 0);
7723 	if (err)
7724 		return err;
7725 
7726 	if (!iwx_nic_lock(sc))
7727 		return EBUSY;
7728 
7729 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
7730 	if (err) {
7731 		printf("%s: could not init tx ant config (error %d)\n",
7732 		    DEVNAME(sc), err);
7733 		goto err;
7734 	}
7735 
7736 	if (sc->sc_tx_with_siso_diversity) {
7737 		err = iwx_send_phy_cfg_cmd(sc);
7738 		if (err) {
7739 			printf("%s: could not send phy config (error %d)\n",
7740 			    DEVNAME(sc), err);
7741 			goto err;
7742 		}
7743 	}
7744 
7745 	err = iwx_send_bt_init_conf(sc);
7746 	if (err) {
7747 		printf("%s: could not init bt coex (error %d)\n",
7748 		    DEVNAME(sc), err);
7749 		return err;
7750 	}
7751 
7752 	err = iwx_send_soc_conf(sc);
7753 	if (err)
7754 		return err;
7755 
7756 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7757 		err = iwx_send_dqa_cmd(sc);
7758 		if (err)
7759 			return err;
7760 	}
7761 
7762 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
7763 		/*
7764 		 * The channel used here isn't relevant as it's
7765 		 * going to be overwritten in the other flows.
7766 		 * For now use the first channel we have.
7767 		 */
7768 		sc->sc_phyctxt[i].id = i;
7769 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7770 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7771 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN);
7772 		if (err) {
7773 			printf("%s: could not add phy context %d (error %d)\n",
7774 			    DEVNAME(sc), i, err);
7775 			goto err;
7776 		}
7777 	}
7778 
7779 	err = iwx_config_ltr(sc);
7780 	if (err) {
7781 		printf("%s: PCIe LTR configuration failed (error %d)\n",
7782 		    DEVNAME(sc), err);
7783 	}
7784 
7785 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
7786 		err = iwx_send_temp_report_ths_cmd(sc);
7787 		if (err)
7788 			goto err;
7789 	}
7790 
7791 	err = iwx_power_update_device(sc);
7792 	if (err) {
7793 		printf("%s: could not send power command (error %d)\n",
7794 		    DEVNAME(sc), err);
7795 		goto err;
7796 	}
7797 
7798 	if (sc->sc_nvm.lar_enabled) {
7799 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
7800 		if (err) {
7801 			printf("%s: could not init LAR (error %d)\n",
7802 			    DEVNAME(sc), err);
7803 			goto err;
7804 		}
7805 	}
7806 
7807 	err = iwx_config_umac_scan_reduced(sc);
7808 	if (err) {
7809 		printf("%s: could not configure scan (error %d)\n",
7810 		    DEVNAME(sc), err);
7811 		goto err;
7812 	}
7813 
7814 	err = iwx_disable_beacon_filter(sc);
7815 	if (err) {
7816 		printf("%s: could not disable beacon filter (error %d)\n",
7817 		    DEVNAME(sc), err);
7818 		goto err;
7819 	}
7820 
7821 err:
7822 	iwx_nic_unlock(sc);
7823 	return err;
7824 }
7825 
7826 /* Allow multicast from our BSSID. */
7827 int
7828 iwx_allow_mcast(struct iwx_softc *sc)
7829 {
7830 	struct ieee80211com *ic = &sc->sc_ic;
7831 	struct iwx_node *in = (void *)ic->ic_bss;
7832 	struct iwx_mcast_filter_cmd *cmd;
7833 	size_t size;
7834 	int err;
7835 
7836 	size = roundup(sizeof(*cmd), 4);
7837 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
7838 	if (cmd == NULL)
7839 		return ENOMEM;
7840 	cmd->filter_own = 1;
7841 	cmd->port_id = 0;
7842 	cmd->count = 0;
7843 	cmd->pass_all = 1;
7844 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
7845 
7846 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
7847 	    0, size, cmd);
7848 	free(cmd, M_DEVBUF, size);
7849 	return err;
7850 }
7851 
7852 int
7853 iwx_init(struct ifnet *ifp)
7854 {
7855 	struct iwx_softc *sc = ifp->if_softc;
7856 	struct ieee80211com *ic = &sc->sc_ic;
7857 	int err, generation;
7858 
7859 	rw_assert_wrlock(&sc->ioctl_rwl);
7860 
7861 	generation = ++sc->sc_generation;
7862 
7863 	err = iwx_preinit(sc);
7864 	if (err)
7865 		return err;
7866 
7867 	err = iwx_start_hw(sc);
7868 	if (err) {
7869 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7870 		return err;
7871 	}
7872 
7873 	err = iwx_init_hw(sc);
7874 	if (err) {
7875 		if (generation == sc->sc_generation)
7876 			iwx_stop_device(sc);
7877 		return err;
7878 	}
7879 
7880 	if (sc->sc_nvm.sku_cap_11n_enable)
7881 		iwx_setup_ht_rates(sc);
7882 
7883 	KASSERT(sc->task_refs.refs == 0);
7884 	refcnt_init(&sc->task_refs);
7885 	ifq_clr_oactive(&ifp->if_snd);
7886 	ifp->if_flags |= IFF_RUNNING;
7887 
7888 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7889 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
7890 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7891 		return 0;
7892 	}
7893 
7894 	ieee80211_begin_scan(ifp);
7895 
7896 	/*
7897 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
7898 	 * Wait until the transition to SCAN state has completed.
7899 	 */
7900 	do {
7901 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
7902 		    SEC_TO_NSEC(1));
7903 		if (generation != sc->sc_generation)
7904 			return ENXIO;
7905 		if (err) {
7906 			iwx_stop(ifp);
7907 			return err;
7908 		}
7909 	} while (ic->ic_state != IEEE80211_S_SCAN);
7910 
7911 	return 0;
7912 }
7913 
7914 void
7915 iwx_start(struct ifnet *ifp)
7916 {
7917 	struct iwx_softc *sc = ifp->if_softc;
7918 	struct ieee80211com *ic = &sc->sc_ic;
7919 	struct ieee80211_node *ni;
7920 	struct ether_header *eh;
7921 	struct mbuf *m;
7922 
7923 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
7924 		return;
7925 
7926 	for (;;) {
7927 		/* why isn't this done per-queue? */
7928 		if (sc->qfullmsk != 0) {
7929 			ifq_set_oactive(&ifp->if_snd);
7930 			break;
7931 		}
7932 
7933 		/* Don't queue additional frames while flushing Tx queues. */
7934 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
7935 			break;
7936 
7937 		/* need to send management frames even if we're not RUNning */
7938 		m = mq_dequeue(&ic->ic_mgtq);
7939 		if (m) {
7940 			ni = m->m_pkthdr.ph_cookie;
7941 			goto sendit;
7942 		}
7943 
7944 		if (ic->ic_state != IEEE80211_S_RUN ||
7945 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
7946 			break;
7947 
7948 		m = ifq_dequeue(&ifp->if_snd);
7949 		if (!m)
7950 			break;
7951 		if (m->m_len < sizeof (*eh) &&
7952 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
7953 			ifp->if_oerrors++;
7954 			continue;
7955 		}
7956 #if NBPFILTER > 0
7957 		if (ifp->if_bpf != NULL)
7958 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
7959 #endif
7960 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
7961 			ifp->if_oerrors++;
7962 			continue;
7963 		}
7964 
7965  sendit:
7966 #if NBPFILTER > 0
7967 		if (ic->ic_rawbpf != NULL)
7968 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
7969 #endif
7970 		if (iwx_tx(sc, m, ni) != 0) {
7971 			ieee80211_release_node(ic, ni);
7972 			ifp->if_oerrors++;
7973 			continue;
7974 		}
7975 
7976 		if (ifp->if_flags & IFF_UP) {
7977 			sc->sc_tx_timer = 15;
7978 			ifp->if_timer = 1;
7979 		}
7980 	}
7981 
7982 	return;
7983 }
7984 
7985 void
7986 iwx_stop(struct ifnet *ifp)
7987 {
7988 	struct iwx_softc *sc = ifp->if_softc;
7989 	struct ieee80211com *ic = &sc->sc_ic;
7990 	struct iwx_node *in = (void *)ic->ic_bss;
7991 	int i, s = splnet();
7992 
7993 	rw_assert_wrlock(&sc->ioctl_rwl);
7994 
7995 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
7996 
7997 	/* Cancel scheduled tasks and let any stale tasks finish up. */
7998 	task_del(systq, &sc->init_task);
7999 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8000 	iwx_del_task(sc, systq, &sc->ba_task);
8001 	iwx_del_task(sc, systq, &sc->setkey_task);
8002 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8003 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8004 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8005 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8006 	KASSERT(sc->task_refs.refs >= 1);
8007 	refcnt_finalize(&sc->task_refs, "iwxstop");
8008 
8009 	iwx_stop_device(sc);
8010 
8011 	/* Reset soft state. */
8012 
8013 	sc->sc_generation++;
8014 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8015 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8016 		sc->sc_cmd_resp_pkt[i] = NULL;
8017 		sc->sc_cmd_resp_len[i] = 0;
8018 	}
8019 	ifp->if_flags &= ~IFF_RUNNING;
8020 	ifq_clr_oactive(&ifp->if_snd);
8021 
8022 	in->in_phyctxt = NULL;
8023 	in->in_flags = 0;
8024 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
8025 
8026 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8027 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8028 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8029 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8030 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8031 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8032 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8033 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8034 
8035 	sc->sc_rx_ba_sessions = 0;
8036 	sc->ba_rx.start_tidmask = 0;
8037 	sc->ba_rx.stop_tidmask = 0;
8038 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
8039 	sc->ba_tx.start_tidmask = 0;
8040 	sc->ba_tx.stop_tidmask = 0;
8041 
8042 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8043 
8044 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8045 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8046 		iwx_clear_reorder_buffer(sc, rxba);
8047 	}
8048 	ifp->if_timer = sc->sc_tx_timer = 0;
8049 
8050 	splx(s);
8051 }
8052 
8053 void
8054 iwx_watchdog(struct ifnet *ifp)
8055 {
8056 	struct iwx_softc *sc = ifp->if_softc;
8057 
8058 	ifp->if_timer = 0;
8059 	if (sc->sc_tx_timer > 0) {
8060 		if (--sc->sc_tx_timer == 0) {
8061 			printf("%s: device timeout\n", DEVNAME(sc));
8062 			if (ifp->if_flags & IFF_DEBUG) {
8063 				iwx_nic_error(sc);
8064 				iwx_dump_driver_status(sc);
8065 			}
8066 			if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8067 				task_add(systq, &sc->init_task);
8068 			ifp->if_oerrors++;
8069 			return;
8070 		}
8071 		ifp->if_timer = 1;
8072 	}
8073 
8074 	ieee80211_watchdog(ifp);
8075 }
8076 
8077 int
8078 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8079 {
8080 	struct iwx_softc *sc = ifp->if_softc;
8081 	int s, err = 0, generation = sc->sc_generation;
8082 
8083 	/*
8084 	 * Prevent processes from entering this function while another
8085 	 * process is tsleep'ing in it.
8086 	 */
8087 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
8088 	if (err == 0 && generation != sc->sc_generation) {
8089 		rw_exit(&sc->ioctl_rwl);
8090 		return ENXIO;
8091 	}
8092 	if (err)
8093 		return err;
8094 	s = splnet();
8095 
8096 	switch (cmd) {
8097 	case SIOCSIFADDR:
8098 		ifp->if_flags |= IFF_UP;
8099 		/* FALLTHROUGH */
8100 	case SIOCSIFFLAGS:
8101 		if (ifp->if_flags & IFF_UP) {
8102 			if (!(ifp->if_flags & IFF_RUNNING)) {
8103 				/* Force reload of firmware image from disk. */
8104 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
8105 				err = iwx_init(ifp);
8106 			}
8107 		} else {
8108 			if (ifp->if_flags & IFF_RUNNING)
8109 				iwx_stop(ifp);
8110 		}
8111 		break;
8112 
8113 	default:
8114 		err = ieee80211_ioctl(ifp, cmd, data);
8115 	}
8116 
8117 	if (err == ENETRESET) {
8118 		err = 0;
8119 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8120 		    (IFF_UP | IFF_RUNNING)) {
8121 			iwx_stop(ifp);
8122 			err = iwx_init(ifp);
8123 		}
8124 	}
8125 
8126 	splx(s);
8127 	rw_exit(&sc->ioctl_rwl);
8128 
8129 	return err;
8130 }
8131 
8132 /*
8133  * Note: This structure is read from the device with IO accesses,
8134  * and the reading already does the endian conversion. As it is
8135  * read with uint32_t-sized accesses, any members with a different size
8136  * need to be ordered correctly though!
8137  */
8138 struct iwx_error_event_table {
8139 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8140 	uint32_t error_id;		/* type of error */
8141 	uint32_t trm_hw_status0;	/* TRM HW status */
8142 	uint32_t trm_hw_status1;	/* TRM HW status */
8143 	uint32_t blink2;		/* branch link */
8144 	uint32_t ilink1;		/* interrupt link */
8145 	uint32_t ilink2;		/* interrupt link */
8146 	uint32_t data1;		/* error-specific data */
8147 	uint32_t data2;		/* error-specific data */
8148 	uint32_t data3;		/* error-specific data */
8149 	uint32_t bcon_time;		/* beacon timer */
8150 	uint32_t tsf_low;		/* network timestamp function timer */
8151 	uint32_t tsf_hi;		/* network timestamp function timer */
8152 	uint32_t gp1;		/* GP1 timer register */
8153 	uint32_t gp2;		/* GP2 timer register */
8154 	uint32_t fw_rev_type;	/* firmware revision type */
8155 	uint32_t major;		/* uCode version major */
8156 	uint32_t minor;		/* uCode version minor */
8157 	uint32_t hw_ver;		/* HW Silicon version */
8158 	uint32_t brd_ver;		/* HW board version */
8159 	uint32_t log_pc;		/* log program counter */
8160 	uint32_t frame_ptr;		/* frame pointer */
8161 	uint32_t stack_ptr;		/* stack pointer */
8162 	uint32_t hcmd;		/* last host command header */
8163 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8164 				 * rxtx_flag */
8165 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8166 				 * host_flag */
8167 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8168 				 * enc_flag */
8169 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8170 				 * time_flag */
8171 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8172 				 * wico interrupt */
8173 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8174 	uint32_t wait_event;		/* wait event() caller address */
8175 	uint32_t l2p_control;	/* L2pControlField */
8176 	uint32_t l2p_duration;	/* L2pDurationField */
8177 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8178 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8179 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8180 				 * (LMPM_PMG_SEL) */
8181 	uint32_t u_timestamp;	/* indicate when the date and time of the
8182 				 * compilation */
8183 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8184 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8185 
8186 /*
8187  * UMAC error struct - relevant starting from family 8000 chip.
8188  * Note: This structure is read from the device with IO accesses,
8189  * and the reading already does the endian conversion. As it is
8190  * read with u32-sized accesses, any members with a different size
8191  * need to be ordered correctly though!
8192  */
8193 struct iwx_umac_error_event_table {
8194 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8195 	uint32_t error_id;	/* type of error */
8196 	uint32_t blink1;	/* branch link */
8197 	uint32_t blink2;	/* branch link */
8198 	uint32_t ilink1;	/* interrupt link */
8199 	uint32_t ilink2;	/* interrupt link */
8200 	uint32_t data1;		/* error-specific data */
8201 	uint32_t data2;		/* error-specific data */
8202 	uint32_t data3;		/* error-specific data */
8203 	uint32_t umac_major;
8204 	uint32_t umac_minor;
8205 	uint32_t frame_pointer;	/* core register 27*/
8206 	uint32_t stack_pointer;	/* core register 28 */
8207 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8208 	uint32_t nic_isr_pref;	/* ISR status register */
8209 } __packed;
8210 
8211 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8212 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8213 
8214 void
8215 iwx_nic_umac_error(struct iwx_softc *sc)
8216 {
8217 	struct iwx_umac_error_event_table table;
8218 	uint32_t base;
8219 
8220 	base = sc->sc_uc.uc_umac_error_event_table;
8221 
8222 	if (base < 0x800000) {
8223 		printf("%s: Invalid error log pointer 0x%08x\n",
8224 		    DEVNAME(sc), base);
8225 		return;
8226 	}
8227 
8228 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8229 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8230 		return;
8231 	}
8232 
8233 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8234 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8235 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8236 			sc->sc_flags, table.valid);
8237 	}
8238 
8239 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8240 		iwx_desc_lookup(table.error_id));
8241 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8242 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8243 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8244 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8245 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8246 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8247 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8248 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8249 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8250 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8251 	    table.frame_pointer);
8252 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8253 	    table.stack_pointer);
8254 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8255 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8256 	    table.nic_isr_pref);
8257 }
8258 
8259 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8260 static struct {
8261 	const char *name;
8262 	uint8_t num;
8263 } advanced_lookup[] = {
8264 	{ "NMI_INTERRUPT_WDG", 0x34 },
8265 	{ "SYSASSERT", 0x35 },
8266 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8267 	{ "BAD_COMMAND", 0x38 },
8268 	{ "BAD_COMMAND", 0x39 },
8269 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8270 	{ "FATAL_ERROR", 0x3D },
8271 	{ "NMI_TRM_HW_ERR", 0x46 },
8272 	{ "NMI_INTERRUPT_TRM", 0x4C },
8273 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8274 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8275 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8276 	{ "NMI_INTERRUPT_HOST", 0x66 },
8277 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8278 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8279 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8280 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8281 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8282 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8283 	{ "ADVANCED_SYSASSERT", 0 },
8284 };
8285 
8286 const char *
8287 iwx_desc_lookup(uint32_t num)
8288 {
8289 	int i;
8290 
8291 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8292 		if (advanced_lookup[i].num ==
8293 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8294 			return advanced_lookup[i].name;
8295 
8296 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8297 	return advanced_lookup[i].name;
8298 }
8299 
8300 /*
8301  * Support for dumping the error log seemed like a good idea ...
8302  * but it's mostly hex junk and the only sensible thing is the
8303  * hw/ucode revision (which we know anyway).  Since it's here,
8304  * I'll just leave it in, just in case e.g. the Intel guys want to
8305  * help us decipher some "ADVANCED_SYSASSERT" later.
8306  */
8307 void
8308 iwx_nic_error(struct iwx_softc *sc)
8309 {
8310 	struct iwx_error_event_table table;
8311 	uint32_t base;
8312 
8313 	printf("%s: dumping device error log\n", DEVNAME(sc));
8314 	base = sc->sc_uc.uc_lmac_error_event_table[0];
8315 	if (base < 0x800000) {
8316 		printf("%s: Invalid error log pointer 0x%08x\n",
8317 		    DEVNAME(sc), base);
8318 		return;
8319 	}
8320 
8321 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8322 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8323 		return;
8324 	}
8325 
8326 	if (!table.valid) {
8327 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8328 		return;
8329 	}
8330 
8331 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8332 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8333 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8334 		    sc->sc_flags, table.valid);
8335 	}
8336 
8337 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8338 	    iwx_desc_lookup(table.error_id));
8339 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8340 	    table.trm_hw_status0);
8341 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8342 	    table.trm_hw_status1);
8343 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8344 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8345 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8346 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8347 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8348 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8349 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8350 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8351 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8352 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8353 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8354 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8355 	    table.fw_rev_type);
8356 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8357 	    table.major);
8358 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8359 	    table.minor);
8360 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8361 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8362 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8363 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8364 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8365 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8366 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8367 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8368 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8369 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8370 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8371 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8372 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8373 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8374 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8375 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8376 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8377 
8378 	if (sc->sc_uc.uc_umac_error_event_table)
8379 		iwx_nic_umac_error(sc);
8380 }
8381 
8382 void
8383 iwx_dump_driver_status(struct iwx_softc *sc)
8384 {
8385 	int i;
8386 
8387 	printf("driver status:\n");
8388 	for (i = 0; i < nitems(sc->txq); i++) {
8389 		struct iwx_tx_ring *ring = &sc->txq[i];
8390 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
8391 		    "queued=%-3d\n",
8392 		    i, ring->qid, ring->cur, ring->queued);
8393 	}
8394 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
8395 	printf("  802.11 state %s\n",
8396 	    ieee80211_state_name[sc->sc_ic.ic_state]);
8397 }
8398 
8399 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8400 do {									\
8401 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8402 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
8403 	_var_ = (void *)((_pkt_)+1);					\
8404 } while (/*CONSTCOND*/0)
8405 
8406 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
8407 do {									\
8408 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8409 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
8410 	_ptr_ = (void *)((_pkt_)+1);					\
8411 } while (/*CONSTCOND*/0)
8412 
8413 int
8414 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8415 {
8416 	int qid, idx, code;
8417 
8418 	qid = pkt->hdr.qid & ~0x80;
8419 	idx = pkt->hdr.idx;
8420 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8421 
8422 	return (!(qid == 0 && idx == 0 && code == 0) &&
8423 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8424 }
8425 
8426 void
8427 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
8428 {
8429 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8430 	struct iwx_rx_packet *pkt, *nextpkt;
8431 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8432 	struct mbuf *m0, *m;
8433 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8434 	int qid, idx, code, handled = 1;
8435 
8436 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
8437 	    BUS_DMASYNC_POSTREAD);
8438 
8439 	m0 = data->m;
8440 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8441 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8442 		qid = pkt->hdr.qid;
8443 		idx = pkt->hdr.idx;
8444 
8445 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8446 
8447 		if (!iwx_rx_pkt_valid(pkt))
8448 			break;
8449 
8450 		/*
8451 		 * XXX Intel inside (tm)
8452 		 * Any commands in the LONG_GROUP could actually be in the
8453 		 * LEGACY group. Firmware API versions >= 50 reject commands
8454 		 * in group 0, forcing us to use this hack.
8455 		 */
8456 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8457 			struct iwx_tx_ring *ring = &sc->txq[qid];
8458 			struct iwx_tx_data *txdata = &ring->data[idx];
8459 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8460 				code = iwx_cmd_opcode(code);
8461 		}
8462 
8463 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8464 		if (len < sizeof(pkt->hdr) ||
8465 		    len > (IWX_RBUF_SIZE - offset - minsz))
8466 			break;
8467 
8468 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8469 			/* Take mbuf m0 off the RX ring. */
8470 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8471 				ifp->if_ierrors++;
8472 				break;
8473 			}
8474 			KASSERT(data->m != m0);
8475 		}
8476 
8477 		switch (code) {
8478 		case IWX_REPLY_RX_PHY_CMD:
8479 			iwx_rx_rx_phy_cmd(sc, pkt, data);
8480 			break;
8481 
8482 		case IWX_REPLY_RX_MPDU_CMD: {
8483 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8484 			nextoff = offset +
8485 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8486 			nextpkt = (struct iwx_rx_packet *)
8487 			    (m0->m_data + nextoff);
8488 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
8489 			    !iwx_rx_pkt_valid(nextpkt)) {
8490 				/* No need to copy last frame in buffer. */
8491 				if (offset > 0)
8492 					m_adj(m0, offset);
8493 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
8494 				m0 = NULL; /* stack owns m0 now; abort loop */
8495 			} else {
8496 				/*
8497 				 * Create an mbuf which points to the current
8498 				 * packet. Always copy from offset zero to
8499 				 * preserve m_pkthdr.
8500 				 */
8501 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
8502 				if (m == NULL) {
8503 					ifp->if_ierrors++;
8504 					m_freem(m0);
8505 					m0 = NULL;
8506 					break;
8507 				}
8508 				m_adj(m, offset);
8509 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
8510 			}
8511  			break;
8512 		}
8513 
8514 		case IWX_BAR_FRAME_RELEASE:
8515 			iwx_rx_bar_frame_release(sc, pkt, data, ml);
8516 			break;
8517 
8518 		case IWX_TX_CMD:
8519 			iwx_rx_tx_cmd(sc, pkt, data);
8520 			break;
8521 
8522 		case IWX_BA_NOTIF:
8523 			iwx_rx_compressed_ba(sc, pkt, data);
8524 			break;
8525 
8526 		case IWX_MISSED_BEACONS_NOTIFICATION:
8527 			iwx_rx_bmiss(sc, pkt, data);
8528 			break;
8529 
8530 		case IWX_MFUART_LOAD_NOTIFICATION:
8531 			break;
8532 
8533 		case IWX_ALIVE: {
8534 			struct iwx_alive_resp_v4 *resp4;
8535 			struct iwx_alive_resp_v5 *resp5;
8536 
8537 			DPRINTF(("%s: firmware alive\n", __func__));
8538 			sc->sc_uc.uc_ok = 0;
8539 
8540 			/*
8541 			 * For v5 and above, we can check the version, for older
8542 			 * versions we need to check the size.
8543 			 */
8544 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
8545 			    IWX_ALIVE) == 5) {
8546 				SYNC_RESP_STRUCT(resp5, pkt);
8547 				if (iwx_rx_packet_payload_len(pkt) !=
8548 				    sizeof(*resp5)) {
8549 					sc->sc_uc.uc_intr = 1;
8550 					wakeup(&sc->sc_uc);
8551 					break;
8552 				}
8553 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8554 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8555 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8556 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8557 				sc->sc_uc.uc_log_event_table = le32toh(
8558 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8559 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8560 				    resp5->umac_data.dbg_ptrs.error_info_addr);
8561 				if (resp5->status == IWX_ALIVE_STATUS_OK)
8562 					sc->sc_uc.uc_ok = 1;
8563 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
8564 				SYNC_RESP_STRUCT(resp4, pkt);
8565 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8566 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8567 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8568 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8569 				sc->sc_uc.uc_log_event_table = le32toh(
8570 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8571 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8572 				    resp4->umac_data.dbg_ptrs.error_info_addr);
8573 				if (resp4->status == IWX_ALIVE_STATUS_OK)
8574 					sc->sc_uc.uc_ok = 1;
8575 			}
8576 
8577 			sc->sc_uc.uc_intr = 1;
8578 			wakeup(&sc->sc_uc);
8579 			break;
8580 		}
8581 
8582 		case IWX_STATISTICS_NOTIFICATION: {
8583 			struct iwx_notif_statistics *stats;
8584 			SYNC_RESP_STRUCT(stats, pkt);
8585 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8586 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
8587 			break;
8588 		}
8589 
8590 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
8591 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8592 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
8593 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8594 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
8595 			break;
8596 
8597 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8598 		    IWX_CT_KILL_NOTIFICATION): {
8599 			struct iwx_ct_kill_notif *notif;
8600 			SYNC_RESP_STRUCT(notif, pkt);
8601 			printf("%s: device at critical temperature (%u degC), "
8602 			    "stopping device\n",
8603 			    DEVNAME(sc), le16toh(notif->temperature));
8604 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8605 			task_add(systq, &sc->init_task);
8606 			break;
8607 		}
8608 
8609 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
8610 		    IWX_SESSION_PROTECTION_CMD):
8611 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8612 		    IWX_NVM_GET_INFO):
8613 		case IWX_ADD_STA_KEY:
8614 		case IWX_PHY_CONFIGURATION_CMD:
8615 		case IWX_TX_ANT_CONFIGURATION_CMD:
8616 		case IWX_ADD_STA:
8617 		case IWX_MAC_CONTEXT_CMD:
8618 		case IWX_REPLY_SF_CFG_CMD:
8619 		case IWX_POWER_TABLE_CMD:
8620 		case IWX_LTR_CONFIG:
8621 		case IWX_PHY_CONTEXT_CMD:
8622 		case IWX_BINDING_CONTEXT_CMD:
8623 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
8624 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
8625 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
8626 		case IWX_REPLY_BEACON_FILTERING_CMD:
8627 		case IWX_MAC_PM_POWER_TABLE:
8628 		case IWX_TIME_QUOTA_CMD:
8629 		case IWX_REMOVE_STA:
8630 		case IWX_TXPATH_FLUSH:
8631 		case IWX_BT_CONFIG:
8632 		case IWX_MCC_UPDATE_CMD:
8633 		case IWX_TIME_EVENT_CMD:
8634 		case IWX_STATISTICS_CMD:
8635 		case IWX_SCD_QUEUE_CFG: {
8636 			size_t pkt_len;
8637 
8638 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
8639 				break;
8640 
8641 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
8642 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8643 
8644 			pkt_len = sizeof(pkt->len_n_flags) +
8645 			    iwx_rx_packet_len(pkt);
8646 
8647 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
8648 			    pkt_len < sizeof(*pkt) ||
8649 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
8650 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
8651 				    sc->sc_cmd_resp_len[idx]);
8652 				sc->sc_cmd_resp_pkt[idx] = NULL;
8653 				break;
8654 			}
8655 
8656 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
8657 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8658 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
8659 			break;
8660 		}
8661 
8662 		case IWX_INIT_COMPLETE_NOTIF:
8663 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
8664 			wakeup(&sc->sc_init_complete);
8665 			break;
8666 
8667 		case IWX_SCAN_COMPLETE_UMAC: {
8668 			struct iwx_umac_scan_complete *notif;
8669 			SYNC_RESP_STRUCT(notif, pkt);
8670 			iwx_endscan(sc);
8671 			break;
8672 		}
8673 
8674 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
8675 			struct iwx_umac_scan_iter_complete_notif *notif;
8676 			SYNC_RESP_STRUCT(notif, pkt);
8677 			iwx_endscan(sc);
8678 			break;
8679 		}
8680 
8681 		case IWX_MCC_CHUB_UPDATE_CMD: {
8682 			struct iwx_mcc_chub_notif *notif;
8683 			SYNC_RESP_STRUCT(notif, pkt);
8684 			iwx_mcc_update(sc, notif);
8685 			break;
8686 		}
8687 
8688 		case IWX_REPLY_ERROR: {
8689 			struct iwx_error_resp *resp;
8690 			SYNC_RESP_STRUCT(resp, pkt);
8691 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
8692 				DEVNAME(sc), le32toh(resp->error_type),
8693 				resp->cmd_id);
8694 			break;
8695 		}
8696 
8697 		case IWX_TIME_EVENT_NOTIFICATION: {
8698 			struct iwx_time_event_notif *notif;
8699 			uint32_t action;
8700 			SYNC_RESP_STRUCT(notif, pkt);
8701 
8702 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
8703 				break;
8704 			action = le32toh(notif->action);
8705 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
8706 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8707 			break;
8708 		}
8709 
8710 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
8711 		    IWX_SESSION_PROTECTION_NOTIF):
8712 			break;
8713 
8714 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
8715 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
8716 		    break;
8717 
8718 		/*
8719 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8720 		 * messages. Just ignore them for now.
8721 		 */
8722 		case IWX_DEBUG_LOG_MSG:
8723 			break;
8724 
8725 		case IWX_MCAST_FILTER_CMD:
8726 			break;
8727 
8728 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
8729 			break;
8730 
8731 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
8732 			break;
8733 
8734 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
8735 			break;
8736 
8737 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8738 		    IWX_NVM_ACCESS_COMPLETE):
8739 			break;
8740 
8741 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
8742 			break; /* happens in monitor mode; ignore for now */
8743 
8744 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
8745 			break;
8746 
8747 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
8748 		    IWX_TLC_MNG_UPDATE_NOTIF): {
8749 			struct iwx_tlc_update_notif *notif;
8750 			SYNC_RESP_STRUCT(notif, pkt);
8751 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
8752 				iwx_rs_update(sc, notif);
8753 			break;
8754 		}
8755 
8756 		default:
8757 			handled = 0;
8758 			printf("%s: unhandled firmware response 0x%x/0x%x "
8759 			    "rx ring %d[%d]\n",
8760 			    DEVNAME(sc), code, pkt->len_n_flags,
8761 			    (qid & ~0x80), idx);
8762 			break;
8763 		}
8764 
8765 		/*
8766 		 * uCode sets bit 0x80 when it originates the notification,
8767 		 * i.e. when the notification is not a direct response to a
8768 		 * command sent by the driver.
8769 		 * For example, uCode issues IWX_REPLY_RX when it sends a
8770 		 * received frame to the driver.
8771 		 */
8772 		if (handled && !(qid & (1 << 7))) {
8773 			iwx_cmd_done(sc, qid, idx, code);
8774 		}
8775 
8776 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8777 	}
8778 
8779 	if (m0 && m0 != data->m)
8780 		m_freem(m0);
8781 }
8782 
8783 void
8784 iwx_notif_intr(struct iwx_softc *sc)
8785 {
8786 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
8787 	uint16_t hw;
8788 
8789 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
8790 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
8791 
8792 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
8793 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
8794 	while (sc->rxq.cur != hw) {
8795 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8796 		iwx_rx_pkt(sc, data, &ml);
8797 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
8798 	}
8799 	if_input(&sc->sc_ic.ic_if, &ml);
8800 
8801 	/*
8802 	 * Tell the firmware what we have processed.
8803 	 * Seems like the hardware gets upset unless we align the write by 8??
8804 	 */
8805 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
8806 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
8807 }
8808 
8809 int
8810 iwx_intr(void *arg)
8811 {
8812 	struct iwx_softc *sc = arg;
8813 	struct ieee80211com *ic = &sc->sc_ic;
8814 	struct ifnet *ifp = IC2IFP(ic);
8815 	int handled = 0;
8816 	int r1, r2, rv = 0;
8817 
8818 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
8819 
8820 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
8821 		uint32_t *ict = sc->ict_dma.vaddr;
8822 		int tmp;
8823 
8824 		tmp = htole32(ict[sc->ict_cur]);
8825 		if (!tmp)
8826 			goto out_ena;
8827 
8828 		/*
8829 		 * ok, there was something.  keep plowing until we have all.
8830 		 */
8831 		r1 = r2 = 0;
8832 		while (tmp) {
8833 			r1 |= tmp;
8834 			ict[sc->ict_cur] = 0;
8835 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
8836 			tmp = htole32(ict[sc->ict_cur]);
8837 		}
8838 
8839 		/* this is where the fun begins.  don't ask */
8840 		if (r1 == 0xffffffff)
8841 			r1 = 0;
8842 
8843 		/* i am not expected to understand this */
8844 		if (r1 & 0xc0000)
8845 			r1 |= 0x8000;
8846 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8847 	} else {
8848 		r1 = IWX_READ(sc, IWX_CSR_INT);
8849 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
8850 			goto out;
8851 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
8852 	}
8853 	if (r1 == 0 && r2 == 0) {
8854 		goto out_ena;
8855 	}
8856 
8857 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
8858 
8859 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
8860 		int i;
8861 
8862 		/* Firmware has now configured the RFH. */
8863 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
8864 			iwx_update_rx_desc(sc, &sc->rxq, i);
8865 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
8866 	}
8867 
8868 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
8869 
8870 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
8871 		handled |= IWX_CSR_INT_BIT_RF_KILL;
8872 		iwx_check_rfkill(sc);
8873 		task_add(systq, &sc->init_task);
8874 		rv = 1;
8875 		goto out_ena;
8876 	}
8877 
8878 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
8879 		if (ifp->if_flags & IFF_DEBUG) {
8880 			iwx_nic_error(sc);
8881 			iwx_dump_driver_status(sc);
8882 		}
8883 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8884 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8885 			task_add(systq, &sc->init_task);
8886 		rv = 1;
8887 		goto out;
8888 
8889 	}
8890 
8891 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
8892 		handled |= IWX_CSR_INT_BIT_HW_ERR;
8893 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8894 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8895 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8896 			task_add(systq, &sc->init_task);
8897 		}
8898 		rv = 1;
8899 		goto out;
8900 	}
8901 
8902 	/* firmware chunk loaded */
8903 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
8904 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
8905 		handled |= IWX_CSR_INT_BIT_FH_TX;
8906 
8907 		sc->sc_fw_chunk_done = 1;
8908 		wakeup(&sc->sc_fw);
8909 	}
8910 
8911 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
8912 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
8913 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
8914 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
8915 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
8916 		}
8917 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
8918 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
8919 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
8920 		}
8921 
8922 		/* Disable periodic interrupt; we use it as just a one-shot. */
8923 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
8924 
8925 		/*
8926 		 * Enable periodic interrupt in 8 msec only if we received
8927 		 * real RX interrupt (instead of just periodic int), to catch
8928 		 * any dangling Rx interrupt.  If it was just the periodic
8929 		 * interrupt, there was no dangling Rx activity, and no need
8930 		 * to extend the periodic interrupt; one-shot is enough.
8931 		 */
8932 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
8933 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
8934 			    IWX_CSR_INT_PERIODIC_ENA);
8935 
8936 		iwx_notif_intr(sc);
8937 	}
8938 
8939 	rv = 1;
8940 
8941  out_ena:
8942 	iwx_restore_interrupts(sc);
8943  out:
8944 	return rv;
8945 }
8946 
8947 int
8948 iwx_intr_msix(void *arg)
8949 {
8950 	struct iwx_softc *sc = arg;
8951 	struct ieee80211com *ic = &sc->sc_ic;
8952 	struct ifnet *ifp = IC2IFP(ic);
8953 	uint32_t inta_fh, inta_hw;
8954 	int vector = 0;
8955 
8956 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
8957 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
8958 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
8959 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
8960 	inta_fh &= sc->sc_fh_mask;
8961 	inta_hw &= sc->sc_hw_mask;
8962 
8963 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
8964 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
8965 		iwx_notif_intr(sc);
8966 	}
8967 
8968 	/* firmware chunk loaded */
8969 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
8970 		sc->sc_fw_chunk_done = 1;
8971 		wakeup(&sc->sc_fw);
8972 	}
8973 
8974 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
8975 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
8976 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
8977 		if (ifp->if_flags & IFF_DEBUG) {
8978 			iwx_nic_error(sc);
8979 			iwx_dump_driver_status(sc);
8980 		}
8981 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8982 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8983 			task_add(systq, &sc->init_task);
8984 		return 1;
8985 	}
8986 
8987 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
8988 		iwx_check_rfkill(sc);
8989 		task_add(systq, &sc->init_task);
8990 	}
8991 
8992 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
8993 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8994 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8995 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8996 			task_add(systq, &sc->init_task);
8997 		}
8998 		return 1;
8999 	}
9000 
9001 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9002 		int i;
9003 
9004 		/* Firmware has now configured the RFH. */
9005 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9006 			iwx_update_rx_desc(sc, &sc->rxq, i);
9007 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9008 	}
9009 
9010 	/*
9011 	 * Before sending the interrupt the HW disables it to prevent
9012 	 * a nested interrupt. This is done by writing 1 to the corresponding
9013 	 * bit in the mask register. After handling the interrupt, it should be
9014 	 * re-enabled by clearing this bit. This register is defined as
9015 	 * write 1 clear (W1C) register, meaning that it's being clear
9016 	 * by writing 1 to the bit.
9017 	 */
9018 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9019 	return 1;
9020 }
9021 
9022 typedef void *iwx_match_t;
9023 
9024 static const struct pci_matchid iwx_devices[] = {
9025 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
9026 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
9027 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
9028 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
9029 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
9030 };
9031 
9032 static const struct pci_matchid iwx_subsystem_id_ax201[] = {
9033 	{ PCI_VENDOR_INTEL,	0x0070 },
9034 	{ PCI_VENDOR_INTEL,	0x0074 },
9035 	{ PCI_VENDOR_INTEL,	0x0078 },
9036 	{ PCI_VENDOR_INTEL,	0x007c },
9037 	{ PCI_VENDOR_INTEL,	0x0310 },
9038 	{ PCI_VENDOR_INTEL,	0x2074 },
9039 	{ PCI_VENDOR_INTEL,	0x4070 },
9040 	/* TODO: There are more ax201 devices with "main" product ID 0x06f0 */
9041 };
9042 
9043 int
9044 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
9045 {
9046 	struct pci_attach_args *pa = aux;
9047 	pcireg_t subid;
9048 	pci_vendor_id_t svid;
9049 	pci_product_id_t spid;
9050 	int i;
9051 
9052 	if (!pci_matchbyid(pa, iwx_devices, nitems(iwx_devices)))
9053 		return 0;
9054 
9055 	/*
9056 	 * Some PCI product IDs are shared among devices which use distinct
9057 	 * chips or firmware. We need to match the subsystem ID as well to
9058 	 * ensure that we have in fact found a supported device.
9059 	 */
9060 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
9061 	svid = PCI_VENDOR(subid);
9062 	spid = PCI_PRODUCT(subid);
9063 
9064 	switch (PCI_PRODUCT(pa->pa_id)) {
9065 	case PCI_PRODUCT_INTEL_WL_22500_1: /* AX200 */
9066 		return 1; /* match any device */
9067 	case PCI_PRODUCT_INTEL_WL_22500_2: /* AX201 */
9068 	case PCI_PRODUCT_INTEL_WL_22500_3: /* AX201 */
9069 	case PCI_PRODUCT_INTEL_WL_22500_4: /* AX201 */
9070 	case PCI_PRODUCT_INTEL_WL_22500_5: /* AX201 */
9071 		for (i = 0; i < nitems(iwx_subsystem_id_ax201); i++) {
9072 			if (svid == iwx_subsystem_id_ax201[i].pm_vid &&
9073 			    spid == iwx_subsystem_id_ax201[i].pm_pid)
9074 				return 1;
9075 
9076 		}
9077 		break;
9078 	default:
9079 		break;
9080 	}
9081 
9082 	return 0;
9083 }
9084 
9085 int
9086 iwx_preinit(struct iwx_softc *sc)
9087 {
9088 	struct ieee80211com *ic = &sc->sc_ic;
9089 	struct ifnet *ifp = IC2IFP(ic);
9090 	int err;
9091 	static int attached;
9092 
9093 	err = iwx_prepare_card_hw(sc);
9094 	if (err) {
9095 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9096 		return err;
9097 	}
9098 
9099 	if (attached) {
9100 		/* Update MAC in case the upper layers changed it. */
9101 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
9102 		    ((struct arpcom *)ifp)->ac_enaddr);
9103 		return 0;
9104 	}
9105 
9106 	err = iwx_start_hw(sc);
9107 	if (err) {
9108 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9109 		return err;
9110 	}
9111 
9112 	err = iwx_run_init_mvm_ucode(sc, 1);
9113 	iwx_stop_device(sc);
9114 	if (err)
9115 		return err;
9116 
9117 	/* Print version info and MAC address on first successful fw load. */
9118 	attached = 1;
9119 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9120 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9121 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9122 
9123 	if (sc->sc_nvm.sku_cap_11n_enable)
9124 		iwx_setup_ht_rates(sc);
9125 
9126 	/* not all hardware can do 5GHz band */
9127 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9128 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9129 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9130 
9131 	/* Configure channel information obtained from firmware. */
9132 	ieee80211_channel_init(ifp);
9133 
9134 	/* Configure MAC address. */
9135 	err = if_setlladdr(ifp, ic->ic_myaddr);
9136 	if (err)
9137 		printf("%s: could not set MAC address (error %d)\n",
9138 		    DEVNAME(sc), err);
9139 
9140 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9141 
9142 	return 0;
9143 }
9144 
9145 void
9146 iwx_attach_hook(struct device *self)
9147 {
9148 	struct iwx_softc *sc = (void *)self;
9149 
9150 	KASSERT(!cold);
9151 
9152 	iwx_preinit(sc);
9153 }
9154 
9155 void
9156 iwx_attach(struct device *parent, struct device *self, void *aux)
9157 {
9158 	struct iwx_softc *sc = (void *)self;
9159 	struct pci_attach_args *pa = aux;
9160 	pci_intr_handle_t ih;
9161 	pcireg_t reg, memtype;
9162 	struct ieee80211com *ic = &sc->sc_ic;
9163 	struct ifnet *ifp = &ic->ic_if;
9164 	const char *intrstr;
9165 	int err;
9166 	int txq_i, i, j;
9167 
9168 	sc->sc_pct = pa->pa_pc;
9169 	sc->sc_pcitag = pa->pa_tag;
9170 	sc->sc_dmat = pa->pa_dmat;
9171 
9172 	rw_init(&sc->ioctl_rwl, "iwxioctl");
9173 
9174 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9175 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9176 	if (err == 0) {
9177 		printf("%s: PCIe capability structure not found!\n",
9178 		    DEVNAME(sc));
9179 		return;
9180 	}
9181 
9182 	/*
9183 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
9184 	 * PCI Tx retries from interfering with C3 CPU state.
9185 	 */
9186 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9187 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9188 
9189 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9190 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9191 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9192 	if (err) {
9193 		printf("%s: can't map mem space\n", DEVNAME(sc));
9194 		return;
9195 	}
9196 
9197 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9198 		sc->sc_msix = 1;
9199 	} else if (pci_intr_map_msi(pa, &ih)) {
9200 		if (pci_intr_map(pa, &ih)) {
9201 			printf("%s: can't map interrupt\n", DEVNAME(sc));
9202 			return;
9203 		}
9204 		/* Hardware bug workaround. */
9205 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9206 		    PCI_COMMAND_STATUS_REG);
9207 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
9208 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9209 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9210 		    PCI_COMMAND_STATUS_REG, reg);
9211 	}
9212 
9213 	intrstr = pci_intr_string(sc->sc_pct, ih);
9214 	if (sc->sc_msix)
9215 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9216 		    iwx_intr_msix, sc, DEVNAME(sc));
9217 	else
9218 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9219 		    iwx_intr, sc, DEVNAME(sc));
9220 
9221 	if (sc->sc_ih == NULL) {
9222 		printf("\n");
9223 		printf("%s: can't establish interrupt", DEVNAME(sc));
9224 		if (intrstr != NULL)
9225 			printf(" at %s", intrstr);
9226 		printf("\n");
9227 		return;
9228 	}
9229 	printf(", %s\n", intrstr);
9230 
9231 	/* Clear pending interrupts. */
9232 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9233 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
9234 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
9235 
9236 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
9237 
9238 	/*
9239 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9240 	 * changed, and now the revision step also includes bit 0-1 (no more
9241 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9242 	 * in the old format.
9243 	 */
9244 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9245 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
9246 
9247 	switch (PCI_PRODUCT(pa->pa_id)) {
9248 	case PCI_PRODUCT_INTEL_WL_22500_1:
9249 		sc->sc_fwname = "iwx-cc-a0-63";
9250 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9251 		sc->sc_integrated = 0;
9252 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
9253 		sc->sc_low_latency_xtal = 0;
9254 		sc->sc_xtal_latency = 0;
9255 		sc->sc_tx_with_siso_diversity = 0;
9256 		sc->sc_uhb_supported = 0;
9257 		break;
9258 	case PCI_PRODUCT_INTEL_WL_22500_2:
9259 	case PCI_PRODUCT_INTEL_WL_22500_3:
9260 	case PCI_PRODUCT_INTEL_WL_22500_5:
9261 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
9262 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
9263 			return;
9264 		}
9265 
9266 		sc->sc_fwname = "iwx-QuZ-a0-hr-b0-63";
9267 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9268 		sc->sc_integrated = 1;
9269 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
9270 		sc->sc_low_latency_xtal = 0;
9271 		sc->sc_xtal_latency = 500;
9272 		sc->sc_tx_with_siso_diversity = 0;
9273 		sc->sc_uhb_supported = 0;
9274 		break;
9275 	case PCI_PRODUCT_INTEL_WL_22500_4:
9276 		sc->sc_fwname = "iwx-Qu-c0-hr-b0-63";
9277 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9278 		sc->sc_integrated = 1;
9279 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
9280 		sc->sc_low_latency_xtal = 0;
9281 		sc->sc_xtal_latency = 1820;
9282 		sc->sc_tx_with_siso_diversity = 0;
9283 		sc->sc_uhb_supported = 0;
9284 		break;
9285 	default:
9286 		printf("%s: unknown adapter type\n", DEVNAME(sc));
9287 		return;
9288 	}
9289 
9290 	/* Allocate DMA memory for loading firmware. */
9291 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
9292 	    sizeof(struct iwx_context_info), 0);
9293 	if (err) {
9294 		printf("%s: could not allocate memory for loading firmware\n",
9295 		    DEVNAME(sc));
9296 		return;
9297 	}
9298 
9299 	/* Allocate interrupt cause table (ICT).*/
9300 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9301 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
9302 	if (err) {
9303 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
9304 		goto fail1;
9305 	}
9306 
9307 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
9308 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9309 		if (err) {
9310 			printf("%s: could not allocate TX ring %d\n",
9311 			    DEVNAME(sc), txq_i);
9312 			goto fail4;
9313 		}
9314 	}
9315 
9316 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
9317 	if (err) {
9318 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
9319 		goto fail4;
9320 	}
9321 
9322 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
9323 	if (sc->sc_nswq == NULL)
9324 		goto fail4;
9325 
9326 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
9327 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
9328 	ic->ic_state = IEEE80211_S_INIT;
9329 
9330 	/* Set device capabilities. */
9331 	ic->ic_caps =
9332 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
9333 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
9334 	    IEEE80211_C_WEP |		/* WEP */
9335 	    IEEE80211_C_RSN |		/* WPA/RSN */
9336 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
9337 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
9338 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
9339 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
9340 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
9341 
9342 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
9343 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
9344 	ic->ic_htcaps |=
9345 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
9346 	ic->ic_htxcaps = 0;
9347 	ic->ic_txbfcaps = 0;
9348 	ic->ic_aselcaps = 0;
9349 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
9350 
9351 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9352 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9353 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9354 
9355 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
9356 		sc->sc_phyctxt[i].id = i;
9357 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
9358 	}
9359 
9360 	/* IBSS channel undefined for now. */
9361 	ic->ic_ibss_chan = &ic->ic_channels[1];
9362 
9363 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
9364 
9365 	ifp->if_softc = sc;
9366 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9367 	ifp->if_ioctl = iwx_ioctl;
9368 	ifp->if_start = iwx_start;
9369 	ifp->if_watchdog = iwx_watchdog;
9370 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
9371 
9372 	if_attach(ifp);
9373 	ieee80211_ifattach(ifp);
9374 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9375 
9376 #if NBPFILTER > 0
9377 	iwx_radiotap_attach(sc);
9378 #endif
9379 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9380 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9381 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
9382 		rxba->sc = sc;
9383 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
9384 		    rxba);
9385 		timeout_set(&rxba->reorder_buf.reorder_timer,
9386 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
9387 		for (j = 0; j < nitems(rxba->entries); j++)
9388 			ml_init(&rxba->entries[j].frames);
9389 	}
9390 	task_set(&sc->init_task, iwx_init_task, sc);
9391 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
9392 	task_set(&sc->ba_task, iwx_ba_task, sc);
9393 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
9394 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
9395 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
9396 
9397 	ic->ic_node_alloc = iwx_node_alloc;
9398 	ic->ic_bgscan_start = iwx_bgscan;
9399 	ic->ic_set_key = iwx_set_key;
9400 	ic->ic_delete_key = iwx_delete_key;
9401 
9402 	/* Override 802.11 state transition machine. */
9403 	sc->sc_newstate = ic->ic_newstate;
9404 	ic->ic_newstate = iwx_newstate;
9405 	ic->ic_updateprot = iwx_updateprot;
9406 	ic->ic_updateslot = iwx_updateslot;
9407 	ic->ic_updateedca = iwx_updateedca;
9408 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
9409 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
9410 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
9411 	ic->ic_ampdu_tx_stop = NULL;
9412 	/*
9413 	 * We cannot read the MAC address without loading the
9414 	 * firmware from disk. Postpone until mountroot is done.
9415 	 */
9416 	config_mountroot(self, iwx_attach_hook);
9417 
9418 	return;
9419 
9420 fail4:	while (--txq_i >= 0)
9421 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
9422 	iwx_free_rx_ring(sc, &sc->rxq);
9423 	if (sc->ict_dma.vaddr != NULL)
9424 		iwx_dma_contig_free(&sc->ict_dma);
9425 
9426 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
9427 	return;
9428 }
9429 
9430 #if NBPFILTER > 0
9431 void
9432 iwx_radiotap_attach(struct iwx_softc *sc)
9433 {
9434 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
9435 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
9436 
9437 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
9438 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
9439 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
9440 
9441 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
9442 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
9443 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
9444 }
9445 #endif
9446 
9447 void
9448 iwx_init_task(void *arg1)
9449 {
9450 	struct iwx_softc *sc = arg1;
9451 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9452 	int s = splnet();
9453 	int generation = sc->sc_generation;
9454 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
9455 
9456 	rw_enter_write(&sc->ioctl_rwl);
9457 	if (generation != sc->sc_generation) {
9458 		rw_exit(&sc->ioctl_rwl);
9459 		splx(s);
9460 		return;
9461 	}
9462 
9463 	if (ifp->if_flags & IFF_RUNNING)
9464 		iwx_stop(ifp);
9465 	else
9466 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9467 
9468 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
9469 		iwx_init(ifp);
9470 
9471 	rw_exit(&sc->ioctl_rwl);
9472 	splx(s);
9473 }
9474 
9475 void
9476 iwx_resume(struct iwx_softc *sc)
9477 {
9478 	pcireg_t reg;
9479 
9480 	/*
9481 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
9482 	 * PCI Tx retries from interfering with C3 CPU state.
9483 	 */
9484 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9485 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9486 
9487 	if (!sc->sc_msix) {
9488 		/* Hardware bug workaround. */
9489 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9490 		    PCI_COMMAND_STATUS_REG);
9491 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
9492 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9493 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9494 		    PCI_COMMAND_STATUS_REG, reg);
9495 	}
9496 
9497 	iwx_disable_interrupts(sc);
9498 }
9499 
9500 int
9501 iwx_wakeup(struct iwx_softc *sc)
9502 {
9503 	struct ieee80211com *ic = &sc->sc_ic;
9504 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9505 	int err;
9506 
9507 	err = iwx_start_hw(sc);
9508 	if (err)
9509 		return err;
9510 
9511 	err = iwx_init_hw(sc);
9512 	if (err)
9513 		return err;
9514 
9515 	refcnt_init(&sc->task_refs);
9516 	ifq_clr_oactive(&ifp->if_snd);
9517 	ifp->if_flags |= IFF_RUNNING;
9518 
9519 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9520 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9521 	else
9522 		ieee80211_begin_scan(ifp);
9523 
9524 	return 0;
9525 }
9526 
9527 int
9528 iwx_activate(struct device *self, int act)
9529 {
9530 	struct iwx_softc *sc = (struct iwx_softc *)self;
9531 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9532 	int err = 0;
9533 
9534 	switch (act) {
9535 	case DVACT_QUIESCE:
9536 		if (ifp->if_flags & IFF_RUNNING) {
9537 			rw_enter_write(&sc->ioctl_rwl);
9538 			iwx_stop(ifp);
9539 			rw_exit(&sc->ioctl_rwl);
9540 		}
9541 		break;
9542 	case DVACT_RESUME:
9543 		iwx_resume(sc);
9544 		break;
9545 	case DVACT_WAKEUP:
9546 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
9547 			err = iwx_wakeup(sc);
9548 			if (err)
9549 				printf("%s: could not initialize hardware\n",
9550 				    DEVNAME(sc));
9551 		}
9552 		break;
9553 	}
9554 
9555 	return 0;
9556 }
9557 
9558 struct cfdriver iwx_cd = {
9559 	NULL, "iwx", DV_IFNET
9560 };
9561 
9562 struct cfattach iwx_ca = {
9563 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
9564 	NULL, iwx_activate
9565 };
9566