xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: if_iwm.c,v 1.377 2021/10/12 11:20:32 landry Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_ra.h>
146 #include <net80211/ieee80211_radiotap.h>
147 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
148 #undef DPRINTF /* defined in ieee80211_priv.h */
149 
150 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
151 
152 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
153 
154 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
155 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
156 
157 #ifdef IWM_DEBUG
158 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
159 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
160 int iwm_debug = 1;
161 #else
162 #define DPRINTF(x)	do { ; } while (0)
163 #define DPRINTFN(n, x)	do { ; } while (0)
164 #endif
165 
166 #include <dev/pci/if_iwmreg.h>
167 #include <dev/pci/if_iwmvar.h>
168 
169 const uint8_t iwm_nvm_channels[] = {
170 	/* 2.4 GHz */
171 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
172 	/* 5 GHz */
173 	36, 40, 44 , 48, 52, 56, 60, 64,
174 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
175 	149, 153, 157, 161, 165
176 };
177 
178 const uint8_t iwm_nvm_channels_8000[] = {
179 	/* 2.4 GHz */
180 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
181 	/* 5 GHz */
182 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
183 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
184 	149, 153, 157, 161, 165, 169, 173, 177, 181
185 };
186 
187 #define IWM_NUM_2GHZ_CHANNELS	14
188 
189 const struct iwm_rate {
190 	uint16_t rate;
191 	uint8_t plcp;
192 	uint8_t ht_plcp;
193 } iwm_rates[] = {
194 		/* Legacy */		/* HT */
195 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
198 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
199 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
200 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
201 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
202 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
203 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
204 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
205 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
206 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
207 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
208 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
209 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
210 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
211 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
212 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
213 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
214 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
215 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
216 };
217 #define IWM_RIDX_CCK	0
218 #define IWM_RIDX_OFDM	4
219 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
220 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
221 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
222 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
223 
224 /* Convert an MCS index into an iwm_rates[] index. */
225 const int iwm_mcs2ridx[] = {
226 	IWM_RATE_MCS_0_INDEX,
227 	IWM_RATE_MCS_1_INDEX,
228 	IWM_RATE_MCS_2_INDEX,
229 	IWM_RATE_MCS_3_INDEX,
230 	IWM_RATE_MCS_4_INDEX,
231 	IWM_RATE_MCS_5_INDEX,
232 	IWM_RATE_MCS_6_INDEX,
233 	IWM_RATE_MCS_7_INDEX,
234 	IWM_RATE_MCS_8_INDEX,
235 	IWM_RATE_MCS_9_INDEX,
236 	IWM_RATE_MCS_10_INDEX,
237 	IWM_RATE_MCS_11_INDEX,
238 	IWM_RATE_MCS_12_INDEX,
239 	IWM_RATE_MCS_13_INDEX,
240 	IWM_RATE_MCS_14_INDEX,
241 	IWM_RATE_MCS_15_INDEX,
242 };
243 
244 struct iwm_nvm_section {
245 	uint16_t length;
246 	uint8_t *data;
247 };
248 
249 int	iwm_is_mimo_ht_plcp(uint8_t);
250 int	iwm_is_mimo_mcs(int);
251 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
252 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
253 	    uint8_t *, size_t);
254 int	iwm_set_default_calib(struct iwm_softc *, const void *);
255 void	iwm_fw_info_free(struct iwm_fw_info *);
256 void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
257 int	iwm_read_firmware(struct iwm_softc *);
258 uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
259 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
260 void	iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
261 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
262 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
263 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
264 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
265 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
266 int	iwm_nic_lock(struct iwm_softc *);
267 void	iwm_nic_assert_locked(struct iwm_softc *);
268 void	iwm_nic_unlock(struct iwm_softc *);
269 int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
270 	    uint32_t);
271 int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
272 int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
273 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
274 	    bus_size_t);
275 void	iwm_dma_contig_free(struct iwm_dma_info *);
276 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
277 void	iwm_disable_rx_dma(struct iwm_softc *);
278 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
279 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
281 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
282 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
283 void	iwm_enable_rfkill_int(struct iwm_softc *);
284 int	iwm_check_rfkill(struct iwm_softc *);
285 void	iwm_enable_interrupts(struct iwm_softc *);
286 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
287 void	iwm_restore_interrupts(struct iwm_softc *);
288 void	iwm_disable_interrupts(struct iwm_softc *);
289 void	iwm_ict_reset(struct iwm_softc *);
290 int	iwm_set_hw_ready(struct iwm_softc *);
291 int	iwm_prepare_card_hw(struct iwm_softc *);
292 void	iwm_apm_config(struct iwm_softc *);
293 int	iwm_apm_init(struct iwm_softc *);
294 void	iwm_apm_stop(struct iwm_softc *);
295 int	iwm_allow_mcast(struct iwm_softc *);
296 void	iwm_init_msix_hw(struct iwm_softc *);
297 void	iwm_conf_msix_hw(struct iwm_softc *, int);
298 int	iwm_clear_persistence_bit(struct iwm_softc *);
299 int	iwm_start_hw(struct iwm_softc *);
300 void	iwm_stop_device(struct iwm_softc *);
301 void	iwm_nic_config(struct iwm_softc *);
302 int	iwm_nic_rx_init(struct iwm_softc *);
303 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
304 int	iwm_nic_rx_mq_init(struct iwm_softc *);
305 int	iwm_nic_tx_init(struct iwm_softc *);
306 int	iwm_nic_init(struct iwm_softc *);
307 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
308 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
309 	    uint16_t);
310 int	iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
311 int	iwm_post_alive(struct iwm_softc *);
312 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
313 	    uint16_t);
314 int	iwm_phy_db_set_section(struct iwm_softc *,
315 	    struct iwm_calib_res_notif_phy_db *);
316 int	iwm_is_valid_channel(uint16_t);
317 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
318 uint16_t iwm_channel_id_to_papd(uint16_t);
319 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
320 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
321 	    uint16_t *, uint16_t);
322 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
323 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
324 	    uint8_t);
325 int	iwm_send_phy_db_data(struct iwm_softc *);
326 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
327 	    uint32_t);
328 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
329 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
330 	    uint8_t *, uint16_t *);
331 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
332 	    uint16_t *, size_t);
333 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
334 	    const uint8_t *nvm_channels, int nchan);
335 int	iwm_mimo_enabled(struct iwm_softc *);
336 void	iwm_setup_ht_rates(struct iwm_softc *);
337 void	iwm_mac_ctxt_task(void *);
338 void	iwm_phy_ctxt_task(void *);
339 void	iwm_updateprot(struct ieee80211com *);
340 void	iwm_updateslot(struct ieee80211com *);
341 void	iwm_updateedca(struct ieee80211com *);
342 void	iwm_updatechan(struct ieee80211com *);
343 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
344 	    uint16_t);
345 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
346 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
347 	    uint8_t);
348 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
349 	    uint8_t);
350 void	iwm_rx_ba_session_expired(void *);
351 void	iwm_reorder_timer_expired(void *);
352 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
353 	    uint16_t, uint16_t, int, int);
354 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
355 	    uint8_t);
356 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
357 	    uint8_t);
358 void	iwm_ba_task(void *);
359 
360 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
361 	    const uint16_t *, const uint16_t *,
362 	    const uint16_t *, const uint16_t *,
363 	    const uint16_t *, int);
364 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
365 	    const uint16_t *, const uint16_t *);
366 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
367 int	iwm_nvm_init(struct iwm_softc *);
368 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
369 	    uint32_t);
370 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
371 	    uint32_t);
372 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
373 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
374 	    int , int *);
375 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
376 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
377 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
378 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
379 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
380 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
381 int	iwm_send_dqa_cmd(struct iwm_softc *);
382 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
383 int	iwm_config_ltr(struct iwm_softc *);
384 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
385 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
386 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
387 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
388 	    struct iwm_rx_data *);
389 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
390 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
391 	    struct ieee80211_rxinfo *);
392 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
393 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
394 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
395 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
396 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
397 	    int, uint8_t, int);
398 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
399 	    struct iwm_node *, int, int);
400 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
401 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
402 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
403 	    struct iwm_rx_data *);
404 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
405 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
406 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
407 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *,
408 	    struct iwm_rx_data *);
409 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
410 	    struct iwm_rx_data *);
411 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
412 int	iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
413 	    uint8_t, uint32_t, uint32_t, uint8_t);
414 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
415 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
416 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
417 	    struct ieee80211_channel *, uint8_t, uint8_t, uint8_t);
418 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
419 	    uint8_t, uint32_t, uint32_t, uint8_t);
420 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
421 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
422 	    const void *);
423 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
424 	    uint32_t *);
425 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
426 	    const void *, uint32_t *);
427 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
428 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
429 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
430 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
431 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
432 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
433 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
434 int	iwm_flush_tx_path(struct iwm_softc *, int);
435 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
436 void	iwm_led_enable(struct iwm_softc *);
437 void	iwm_led_disable(struct iwm_softc *);
438 int	iwm_led_is_enabled(struct iwm_softc *);
439 void	iwm_led_blink_timeout(void *);
440 void	iwm_led_blink_start(struct iwm_softc *);
441 void	iwm_led_blink_stop(struct iwm_softc *);
442 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
443 	    struct iwm_beacon_filter_cmd *);
444 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
445 	    struct iwm_beacon_filter_cmd *);
446 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
447 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
448 	    struct iwm_mac_power_cmd *);
449 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
450 int	iwm_power_update_device(struct iwm_softc *);
451 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
452 int	iwm_disable_beacon_filter(struct iwm_softc *);
453 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
454 int	iwm_add_aux_sta(struct iwm_softc *);
455 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
456 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
457 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
458 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
459 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
460 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
461 	    struct iwm_scan_channel_cfg_lmac *, int, int);
462 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
463 int	iwm_lmac_scan(struct iwm_softc *, int);
464 int	iwm_config_umac_scan(struct iwm_softc *);
465 int	iwm_umac_scan(struct iwm_softc *, int);
466 void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
467 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
468 int	iwm_rval2ridx(int);
469 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
470 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
471 	    struct iwm_mac_ctx_cmd *, uint32_t);
472 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
473 	    struct iwm_mac_data_sta *, int);
474 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
475 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
476 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
477 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
478 int	iwm_scan(struct iwm_softc *);
479 int	iwm_bgscan(struct ieee80211com *);
480 int	iwm_umac_scan_abort(struct iwm_softc *);
481 int	iwm_lmac_scan_abort(struct iwm_softc *);
482 int	iwm_scan_abort(struct iwm_softc *);
483 int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
484 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t);
485 int	iwm_auth(struct iwm_softc *);
486 int	iwm_deauth(struct iwm_softc *);
487 int	iwm_run(struct iwm_softc *);
488 int	iwm_run_stop(struct iwm_softc *);
489 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
490 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
491 	    struct ieee80211_key *);
492 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
493 	    struct ieee80211_key *);
494 void	iwm_delete_key_v1(struct ieee80211com *,
495 	    struct ieee80211_node *, struct ieee80211_key *);
496 void	iwm_delete_key(struct ieee80211com *,
497 	    struct ieee80211_node *, struct ieee80211_key *);
498 void	iwm_calib_timeout(void *);
499 void	iwm_setrates(struct iwm_node *, int);
500 int	iwm_media_change(struct ifnet *);
501 void	iwm_newstate_task(void *);
502 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
503 void	iwm_endscan(struct iwm_softc *);
504 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
505 	    struct ieee80211_node *);
506 int	iwm_sf_config(struct iwm_softc *, int);
507 int	iwm_send_bt_init_conf(struct iwm_softc *);
508 int	iwm_send_soc_conf(struct iwm_softc *);
509 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
510 int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
511 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
512 void	iwm_free_fw_paging(struct iwm_softc *);
513 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
514 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
515 int	iwm_init_hw(struct iwm_softc *);
516 int	iwm_init(struct ifnet *);
517 void	iwm_start(struct ifnet *);
518 void	iwm_stop(struct ifnet *);
519 void	iwm_watchdog(struct ifnet *);
520 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
521 const char *iwm_desc_lookup(uint32_t);
522 void	iwm_nic_error(struct iwm_softc *);
523 void	iwm_dump_driver_status(struct iwm_softc *);
524 void	iwm_nic_umac_error(struct iwm_softc *);
525 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
526 	    struct mbuf_list *);
527 void	iwm_flip_address(uint8_t *);
528 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
529 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
530 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
531 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
532 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
533 	    struct mbuf_list *);
534 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
535 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
536 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
537 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
538 	    struct ieee80211_rxinfo *, struct mbuf_list *);
539 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
540 	    struct mbuf_list *);
541 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
542 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
543 	    struct mbuf_list *);
544 void	iwm_notif_intr(struct iwm_softc *);
545 int	iwm_intr(void *);
546 int	iwm_intr_msix(void *);
547 int	iwm_match(struct device *, void *, void *);
548 int	iwm_preinit(struct iwm_softc *);
549 void	iwm_attach_hook(struct device *);
550 void	iwm_attach(struct device *, struct device *, void *);
551 void	iwm_init_task(void *);
552 int	iwm_activate(struct device *, int);
553 int	iwm_resume(struct iwm_softc *);
554 
555 #if NBPFILTER > 0
556 void	iwm_radiotap_attach(struct iwm_softc *);
557 #endif
558 
559 uint8_t
560 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
561 {
562 	const struct iwm_fw_cmd_version *entry;
563 	int i;
564 
565 	for (i = 0; i < sc->n_cmd_versions; i++) {
566 		entry = &sc->cmd_versions[i];
567 		if (entry->group == grp && entry->cmd == cmd)
568 			return entry->cmd_ver;
569 	}
570 
571 	return IWM_FW_CMD_VER_UNKNOWN;
572 }
573 
574 int
575 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
576 {
577 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
578 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
579 }
580 
581 int
582 iwm_is_mimo_mcs(int mcs)
583 {
584 	int ridx = iwm_mcs2ridx[mcs];
585 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
586 
587 }
588 
589 int
590 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
591 {
592 	struct iwm_fw_cscheme_list *l = (void *)data;
593 
594 	if (dlen < sizeof(*l) ||
595 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
596 		return EINVAL;
597 
598 	/* we don't actually store anything for now, always use s/w crypto */
599 
600 	return 0;
601 }
602 
603 int
604 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
605     uint8_t *data, size_t dlen)
606 {
607 	struct iwm_fw_sects *fws;
608 	struct iwm_fw_onesect *fwone;
609 
610 	if (type >= IWM_UCODE_TYPE_MAX)
611 		return EINVAL;
612 	if (dlen < sizeof(uint32_t))
613 		return EINVAL;
614 
615 	fws = &sc->sc_fw.fw_sects[type];
616 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
617 		return EINVAL;
618 
619 	fwone = &fws->fw_sect[fws->fw_count];
620 
621 	/* first 32bit are device load offset */
622 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
623 
624 	/* rest is data */
625 	fwone->fws_data = data + sizeof(uint32_t);
626 	fwone->fws_len = dlen - sizeof(uint32_t);
627 
628 	fws->fw_count++;
629 	fws->fw_totlen += fwone->fws_len;
630 
631 	return 0;
632 }
633 
634 #define IWM_DEFAULT_SCAN_CHANNELS	40
635 /* Newer firmware might support more channels. Raise this value if needed. */
636 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
637 
638 struct iwm_tlv_calib_data {
639 	uint32_t ucode_type;
640 	struct iwm_tlv_calib_ctrl calib;
641 } __packed;
642 
643 int
644 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
645 {
646 	const struct iwm_tlv_calib_data *def_calib = data;
647 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
648 
649 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
650 		return EINVAL;
651 
652 	sc->sc_default_calib[ucode_type].flow_trigger =
653 	    def_calib->calib.flow_trigger;
654 	sc->sc_default_calib[ucode_type].event_trigger =
655 	    def_calib->calib.event_trigger;
656 
657 	return 0;
658 }
659 
660 void
661 iwm_fw_info_free(struct iwm_fw_info *fw)
662 {
663 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
664 	fw->fw_rawdata = NULL;
665 	fw->fw_rawsize = 0;
666 	/* don't touch fw->fw_status */
667 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
668 }
669 
670 void
671 iwm_fw_version_str(char *buf, size_t bufsize,
672     uint32_t major, uint32_t minor, uint32_t api)
673 {
674 	/*
675 	 * Starting with major version 35 the Linux driver prints the minor
676 	 * version in hexadecimal.
677 	 */
678 	if (major >= 35)
679 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
680 	else
681 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
682 }
683 
684 int
685 iwm_read_firmware(struct iwm_softc *sc)
686 {
687 	struct iwm_fw_info *fw = &sc->sc_fw;
688 	struct iwm_tlv_ucode_header *uhdr;
689 	struct iwm_ucode_tlv tlv;
690 	uint32_t tlv_type;
691 	uint8_t *data;
692 	uint32_t usniffer_img;
693 	uint32_t paging_mem_size;
694 	int err;
695 	size_t len;
696 
697 	if (fw->fw_status == IWM_FW_STATUS_DONE)
698 		return 0;
699 
700 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
701 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
702 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
703 
704 	if (fw->fw_rawdata != NULL)
705 		iwm_fw_info_free(fw);
706 
707 	err = loadfirmware(sc->sc_fwname,
708 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
709 	if (err) {
710 		printf("%s: could not read firmware %s (error %d)\n",
711 		    DEVNAME(sc), sc->sc_fwname, err);
712 		goto out;
713 	}
714 
715 	sc->sc_capaflags = 0;
716 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
717 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
718 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
719 	sc->n_cmd_versions = 0;
720 
721 	uhdr = (void *)fw->fw_rawdata;
722 	if (*(uint32_t *)fw->fw_rawdata != 0
723 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
724 		printf("%s: invalid firmware %s\n",
725 		    DEVNAME(sc), sc->sc_fwname);
726 		err = EINVAL;
727 		goto out;
728 	}
729 
730 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
731 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
732 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
733 	    IWM_UCODE_API(le32toh(uhdr->ver)));
734 
735 	data = uhdr->data;
736 	len = fw->fw_rawsize - sizeof(*uhdr);
737 
738 	while (len >= sizeof(tlv)) {
739 		size_t tlv_len;
740 		void *tlv_data;
741 
742 		memcpy(&tlv, data, sizeof(tlv));
743 		tlv_len = le32toh(tlv.length);
744 		tlv_type = le32toh(tlv.type);
745 
746 		len -= sizeof(tlv);
747 		data += sizeof(tlv);
748 		tlv_data = data;
749 
750 		if (len < tlv_len) {
751 			printf("%s: firmware too short: %zu bytes\n",
752 			    DEVNAME(sc), len);
753 			err = EINVAL;
754 			goto parse_out;
755 		}
756 
757 		switch (tlv_type) {
758 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
759 			if (tlv_len < sizeof(uint32_t)) {
760 				err = EINVAL;
761 				goto parse_out;
762 			}
763 			sc->sc_capa_max_probe_len
764 			    = le32toh(*(uint32_t *)tlv_data);
765 			if (sc->sc_capa_max_probe_len >
766 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
767 				err = EINVAL;
768 				goto parse_out;
769 			}
770 			break;
771 		case IWM_UCODE_TLV_PAN:
772 			if (tlv_len) {
773 				err = EINVAL;
774 				goto parse_out;
775 			}
776 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
777 			break;
778 		case IWM_UCODE_TLV_FLAGS:
779 			if (tlv_len < sizeof(uint32_t)) {
780 				err = EINVAL;
781 				goto parse_out;
782 			}
783 			/*
784 			 * Apparently there can be many flags, but Linux driver
785 			 * parses only the first one, and so do we.
786 			 *
787 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
788 			 * Intentional or a bug?  Observations from
789 			 * current firmware file:
790 			 *  1) TLV_PAN is parsed first
791 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
792 			 * ==> this resets TLV_PAN to itself... hnnnk
793 			 */
794 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
795 			break;
796 		case IWM_UCODE_TLV_CSCHEME:
797 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
798 			if (err)
799 				goto parse_out;
800 			break;
801 		case IWM_UCODE_TLV_NUM_OF_CPU: {
802 			uint32_t num_cpu;
803 			if (tlv_len != sizeof(uint32_t)) {
804 				err = EINVAL;
805 				goto parse_out;
806 			}
807 			num_cpu = le32toh(*(uint32_t *)tlv_data);
808 			if (num_cpu < 1 || num_cpu > 2) {
809 				err = EINVAL;
810 				goto parse_out;
811 			}
812 			break;
813 		}
814 		case IWM_UCODE_TLV_SEC_RT:
815 			err = iwm_firmware_store_section(sc,
816 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
817 			if (err)
818 				goto parse_out;
819 			break;
820 		case IWM_UCODE_TLV_SEC_INIT:
821 			err = iwm_firmware_store_section(sc,
822 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
823 			if (err)
824 				goto parse_out;
825 			break;
826 		case IWM_UCODE_TLV_SEC_WOWLAN:
827 			err = iwm_firmware_store_section(sc,
828 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
829 			if (err)
830 				goto parse_out;
831 			break;
832 		case IWM_UCODE_TLV_DEF_CALIB:
833 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
834 				err = EINVAL;
835 				goto parse_out;
836 			}
837 			err = iwm_set_default_calib(sc, tlv_data);
838 			if (err)
839 				goto parse_out;
840 			break;
841 		case IWM_UCODE_TLV_PHY_SKU:
842 			if (tlv_len != sizeof(uint32_t)) {
843 				err = EINVAL;
844 				goto parse_out;
845 			}
846 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
847 			break;
848 
849 		case IWM_UCODE_TLV_API_CHANGES_SET: {
850 			struct iwm_ucode_api *api;
851 			int idx, i;
852 			if (tlv_len != sizeof(*api)) {
853 				err = EINVAL;
854 				goto parse_out;
855 			}
856 			api = (struct iwm_ucode_api *)tlv_data;
857 			idx = le32toh(api->api_index);
858 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
859 				err = EINVAL;
860 				goto parse_out;
861 			}
862 			for (i = 0; i < 32; i++) {
863 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
864 					continue;
865 				setbit(sc->sc_ucode_api, i + (32 * idx));
866 			}
867 			break;
868 		}
869 
870 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
871 			struct iwm_ucode_capa *capa;
872 			int idx, i;
873 			if (tlv_len != sizeof(*capa)) {
874 				err = EINVAL;
875 				goto parse_out;
876 			}
877 			capa = (struct iwm_ucode_capa *)tlv_data;
878 			idx = le32toh(capa->api_index);
879 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
880 				goto parse_out;
881 			}
882 			for (i = 0; i < 32; i++) {
883 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
884 					continue;
885 				setbit(sc->sc_enabled_capa, i + (32 * idx));
886 			}
887 			break;
888 		}
889 
890 		case IWM_UCODE_TLV_CMD_VERSIONS:
891 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
892 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
893 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
894 			}
895 			if (sc->n_cmd_versions != 0) {
896 				err = EINVAL;
897 				goto parse_out;
898 			}
899 			if (tlv_len > sizeof(sc->cmd_versions)) {
900 				err = EINVAL;
901 				goto parse_out;
902 			}
903 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
904 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
905 			break;
906 
907 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
908 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
909 			/* ignore, not used by current driver */
910 			break;
911 
912 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
913 			err = iwm_firmware_store_section(sc,
914 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
915 			    tlv_len);
916 			if (err)
917 				goto parse_out;
918 			break;
919 
920 		case IWM_UCODE_TLV_PAGING:
921 			if (tlv_len != sizeof(uint32_t)) {
922 				err = EINVAL;
923 				goto parse_out;
924 			}
925 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
926 
927 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
928 			    DEVNAME(sc), paging_mem_size));
929 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
930 				printf("%s: Driver only supports up to %u"
931 				    " bytes for paging image (%u requested)\n",
932 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
933 				    paging_mem_size);
934 				err = EINVAL;
935 				goto out;
936 			}
937 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
938 				printf("%s: Paging: image isn't multiple of %u\n",
939 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
940 				err = EINVAL;
941 				goto out;
942 			}
943 
944 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
945 			    paging_mem_size;
946 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
947 			fw->fw_sects[usniffer_img].paging_mem_size =
948 			    paging_mem_size;
949 			break;
950 
951 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
952 			if (tlv_len != sizeof(uint32_t)) {
953 				err = EINVAL;
954 				goto parse_out;
955 			}
956 			sc->sc_capa_n_scan_channels =
957 			  le32toh(*(uint32_t *)tlv_data);
958 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
959 				err = ERANGE;
960 				goto parse_out;
961 			}
962 			break;
963 
964 		case IWM_UCODE_TLV_FW_VERSION:
965 			if (tlv_len != sizeof(uint32_t) * 3) {
966 				err = EINVAL;
967 				goto parse_out;
968 			}
969 
970 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
971 			    le32toh(((uint32_t *)tlv_data)[0]),
972 			    le32toh(((uint32_t *)tlv_data)[1]),
973 			    le32toh(((uint32_t *)tlv_data)[2]));
974 			break;
975 
976 		case IWM_UCODE_TLV_FW_DBG_DEST:
977 		case IWM_UCODE_TLV_FW_DBG_CONF:
978 		case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
979 		case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
980 		case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
981 		case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
982 		case IWM_UCODE_TLV_TYPE_HCMD:
983 		case IWM_UCODE_TLV_TYPE_REGIONS:
984 		case IWM_UCODE_TLV_TYPE_TRIGGERS:
985 			break;
986 
987 		case IWM_UCODE_TLV_HW_TYPE:
988 			break;
989 
990 		case IWM_UCODE_TLV_FW_MEM_SEG:
991 			break;
992 
993 		/* undocumented TLVs found in iwm-9000-43 image */
994 		case 0x1000003:
995 		case 0x1000004:
996 			break;
997 
998 		default:
999 			err = EINVAL;
1000 			goto parse_out;
1001 		}
1002 
1003 		len -= roundup(tlv_len, 4);
1004 		data += roundup(tlv_len, 4);
1005 	}
1006 
1007 	KASSERT(err == 0);
1008 
1009  parse_out:
1010 	if (err) {
1011 		printf("%s: firmware parse error %d, "
1012 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1013 	}
1014 
1015  out:
1016 	if (err) {
1017 		fw->fw_status = IWM_FW_STATUS_NONE;
1018 		if (fw->fw_rawdata != NULL)
1019 			iwm_fw_info_free(fw);
1020 	} else
1021 		fw->fw_status = IWM_FW_STATUS_DONE;
1022 	wakeup(&sc->sc_fw);
1023 
1024 	return err;
1025 }
1026 
1027 uint32_t
1028 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1029 {
1030 	IWM_WRITE(sc,
1031 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1032 	IWM_BARRIER_READ_WRITE(sc);
1033 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1034 }
1035 
1036 uint32_t
1037 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1038 {
1039 	iwm_nic_assert_locked(sc);
1040 	return iwm_read_prph_unlocked(sc, addr);
1041 }
1042 
1043 void
1044 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1045 {
1046 	IWM_WRITE(sc,
1047 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1048 	IWM_BARRIER_WRITE(sc);
1049 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1050 }
1051 
1052 void
1053 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1054 {
1055 	iwm_nic_assert_locked(sc);
1056 	iwm_write_prph_unlocked(sc, addr, val);
1057 }
1058 
1059 void
1060 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1061 {
1062 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1063 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1064 }
1065 
1066 int
1067 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1068 {
1069 	int offs, err = 0;
1070 	uint32_t *vals = buf;
1071 
1072 	if (iwm_nic_lock(sc)) {
1073 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1074 		for (offs = 0; offs < dwords; offs++)
1075 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1076 		iwm_nic_unlock(sc);
1077 	} else {
1078 		err = EBUSY;
1079 	}
1080 	return err;
1081 }
1082 
1083 int
1084 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1085 {
1086 	int offs;
1087 	const uint32_t *vals = buf;
1088 
1089 	if (iwm_nic_lock(sc)) {
1090 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1091 		/* WADDR auto-increments */
1092 		for (offs = 0; offs < dwords; offs++) {
1093 			uint32_t val = vals ? vals[offs] : 0;
1094 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1095 		}
1096 		iwm_nic_unlock(sc);
1097 	} else {
1098 		return EBUSY;
1099 	}
1100 	return 0;
1101 }
1102 
1103 int
1104 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1105 {
1106 	return iwm_write_mem(sc, addr, &val, 1);
1107 }
1108 
1109 int
1110 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1111     int timo)
1112 {
1113 	for (;;) {
1114 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1115 			return 1;
1116 		}
1117 		if (timo < 10) {
1118 			return 0;
1119 		}
1120 		timo -= 10;
1121 		DELAY(10);
1122 	}
1123 }
1124 
1125 int
1126 iwm_nic_lock(struct iwm_softc *sc)
1127 {
1128 	if (sc->sc_nic_locks > 0) {
1129 		iwm_nic_assert_locked(sc);
1130 		sc->sc_nic_locks++;
1131 		return 1; /* already locked */
1132 	}
1133 
1134 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1135 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1136 
1137 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1138 		DELAY(2);
1139 
1140 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1141 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1142 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1143 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1144 		sc->sc_nic_locks++;
1145 		return 1;
1146 	}
1147 
1148 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1149 	return 0;
1150 }
1151 
1152 void
1153 iwm_nic_assert_locked(struct iwm_softc *sc)
1154 {
1155 	if (sc->sc_nic_locks <= 0)
1156 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1157 }
1158 
1159 void
1160 iwm_nic_unlock(struct iwm_softc *sc)
1161 {
1162 	if (sc->sc_nic_locks > 0) {
1163 		if (--sc->sc_nic_locks == 0)
1164 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1165 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1166 	} else
1167 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1168 }
1169 
1170 int
1171 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1172     uint32_t mask)
1173 {
1174 	uint32_t val;
1175 
1176 	if (iwm_nic_lock(sc)) {
1177 		val = iwm_read_prph(sc, reg) & mask;
1178 		val |= bits;
1179 		iwm_write_prph(sc, reg, val);
1180 		iwm_nic_unlock(sc);
1181 		return 0;
1182 	}
1183 	return EBUSY;
1184 }
1185 
1186 int
1187 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1188 {
1189 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1190 }
1191 
1192 int
1193 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1194 {
1195 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1196 }
1197 
1198 int
1199 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1200     bus_size_t size, bus_size_t alignment)
1201 {
1202 	int nsegs, err;
1203 	caddr_t va;
1204 
1205 	dma->tag = tag;
1206 	dma->size = size;
1207 
1208 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1209 	    &dma->map);
1210 	if (err)
1211 		goto fail;
1212 
1213 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1214 	    BUS_DMA_NOWAIT);
1215 	if (err)
1216 		goto fail;
1217 
1218 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1219 	    BUS_DMA_NOWAIT);
1220 	if (err)
1221 		goto fail;
1222 	dma->vaddr = va;
1223 
1224 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1225 	    BUS_DMA_NOWAIT);
1226 	if (err)
1227 		goto fail;
1228 
1229 	memset(dma->vaddr, 0, size);
1230 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1231 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1232 
1233 	return 0;
1234 
1235 fail:	iwm_dma_contig_free(dma);
1236 	return err;
1237 }
1238 
1239 void
1240 iwm_dma_contig_free(struct iwm_dma_info *dma)
1241 {
1242 	if (dma->map != NULL) {
1243 		if (dma->vaddr != NULL) {
1244 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1245 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1246 			bus_dmamap_unload(dma->tag, dma->map);
1247 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1248 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1249 			dma->vaddr = NULL;
1250 		}
1251 		bus_dmamap_destroy(dma->tag, dma->map);
1252 		dma->map = NULL;
1253 	}
1254 }
1255 
1256 int
1257 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1258 {
1259 	bus_size_t size;
1260 	size_t descsz;
1261 	int count, i, err;
1262 
1263 	ring->cur = 0;
1264 
1265 	if (sc->sc_mqrx_supported) {
1266 		count = IWM_RX_MQ_RING_COUNT;
1267 		descsz = sizeof(uint64_t);
1268 	} else {
1269 		count = IWM_RX_RING_COUNT;
1270 		descsz = sizeof(uint32_t);
1271 	}
1272 
1273 	/* Allocate RX descriptors (256-byte aligned). */
1274 	size = count * descsz;
1275 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1276 	if (err) {
1277 		printf("%s: could not allocate RX ring DMA memory\n",
1278 		    DEVNAME(sc));
1279 		goto fail;
1280 	}
1281 	ring->desc = ring->free_desc_dma.vaddr;
1282 
1283 	/* Allocate RX status area (16-byte aligned). */
1284 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1285 	    sizeof(*ring->stat), 16);
1286 	if (err) {
1287 		printf("%s: could not allocate RX status DMA memory\n",
1288 		    DEVNAME(sc));
1289 		goto fail;
1290 	}
1291 	ring->stat = ring->stat_dma.vaddr;
1292 
1293 	if (sc->sc_mqrx_supported) {
1294 		size = count * sizeof(uint32_t);
1295 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1296 		    size, 256);
1297 		if (err) {
1298 			printf("%s: could not allocate RX ring DMA memory\n",
1299 			    DEVNAME(sc));
1300 			goto fail;
1301 		}
1302 	}
1303 
1304 	for (i = 0; i < count; i++) {
1305 		struct iwm_rx_data *data = &ring->data[i];
1306 
1307 		memset(data, 0, sizeof(*data));
1308 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1309 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1310 		    &data->map);
1311 		if (err) {
1312 			printf("%s: could not create RX buf DMA map\n",
1313 			    DEVNAME(sc));
1314 			goto fail;
1315 		}
1316 
1317 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1318 		if (err)
1319 			goto fail;
1320 	}
1321 	return 0;
1322 
1323 fail:	iwm_free_rx_ring(sc, ring);
1324 	return err;
1325 }
1326 
1327 void
1328 iwm_disable_rx_dma(struct iwm_softc *sc)
1329 {
1330 	int ntries;
1331 
1332 	if (iwm_nic_lock(sc)) {
1333 		if (sc->sc_mqrx_supported) {
1334 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1335 			for (ntries = 0; ntries < 1000; ntries++) {
1336 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1337 				    IWM_RXF_DMA_IDLE)
1338 					break;
1339 				DELAY(10);
1340 			}
1341 		} else {
1342 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1343 			for (ntries = 0; ntries < 1000; ntries++) {
1344 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1345 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1346 					break;
1347 				DELAY(10);
1348 			}
1349 		}
1350 		iwm_nic_unlock(sc);
1351 	}
1352 }
1353 
1354 void
1355 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1356 {
1357 	ring->cur = 0;
1358 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1359 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1360 	memset(ring->stat, 0, sizeof(*ring->stat));
1361 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1362 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1363 
1364 }
1365 
1366 void
1367 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1368 {
1369 	int count, i;
1370 
1371 	iwm_dma_contig_free(&ring->free_desc_dma);
1372 	iwm_dma_contig_free(&ring->stat_dma);
1373 	iwm_dma_contig_free(&ring->used_desc_dma);
1374 
1375 	if (sc->sc_mqrx_supported)
1376 		count = IWM_RX_MQ_RING_COUNT;
1377 	else
1378 		count = IWM_RX_RING_COUNT;
1379 
1380 	for (i = 0; i < count; i++) {
1381 		struct iwm_rx_data *data = &ring->data[i];
1382 
1383 		if (data->m != NULL) {
1384 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1385 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1386 			bus_dmamap_unload(sc->sc_dmat, data->map);
1387 			m_freem(data->m);
1388 			data->m = NULL;
1389 		}
1390 		if (data->map != NULL)
1391 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1392 	}
1393 }
1394 
1395 int
1396 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1397 {
1398 	bus_addr_t paddr;
1399 	bus_size_t size;
1400 	int i, err;
1401 
1402 	ring->qid = qid;
1403 	ring->queued = 0;
1404 	ring->cur = 0;
1405 	ring->tail = 0;
1406 
1407 	/* Allocate TX descriptors (256-byte aligned). */
1408 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1409 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1410 	if (err) {
1411 		printf("%s: could not allocate TX ring DMA memory\n",
1412 		    DEVNAME(sc));
1413 		goto fail;
1414 	}
1415 	ring->desc = ring->desc_dma.vaddr;
1416 
1417 	/*
1418 	 * There is no need to allocate DMA buffers for unused rings.
1419 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1420 	 * than we currently need.
1421 	 *
1422 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1423 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1424 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1425 	 * in order to provide one queue per EDCA category.
1426 	 * Tx aggregation requires additional queues, one queue per TID for
1427 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1428 	 *
1429 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1430 	 * and Tx aggregation is not supported.
1431 	 *
1432 	 * Unfortunately, we cannot tell if DQA will be used until the
1433 	 * firmware gets loaded later, so just allocate sufficient rings
1434 	 * in order to satisfy both cases.
1435 	 */
1436 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1437 		return 0;
1438 
1439 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1440 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1441 	if (err) {
1442 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1443 		goto fail;
1444 	}
1445 	ring->cmd = ring->cmd_dma.vaddr;
1446 
1447 	paddr = ring->cmd_dma.paddr;
1448 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1449 		struct iwm_tx_data *data = &ring->data[i];
1450 		size_t mapsize;
1451 
1452 		data->cmd_paddr = paddr;
1453 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1454 		    + offsetof(struct iwm_tx_cmd, scratch);
1455 		paddr += sizeof(struct iwm_device_cmd);
1456 
1457 		/* FW commands may require more mapped space than packets. */
1458 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1459 			mapsize = (sizeof(struct iwm_cmd_header) +
1460 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1461 		else
1462 			mapsize = MCLBYTES;
1463 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1464 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1465 		    &data->map);
1466 		if (err) {
1467 			printf("%s: could not create TX buf DMA map\n",
1468 			    DEVNAME(sc));
1469 			goto fail;
1470 		}
1471 	}
1472 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1473 	return 0;
1474 
1475 fail:	iwm_free_tx_ring(sc, ring);
1476 	return err;
1477 }
1478 
1479 void
1480 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1481 {
1482 	int i;
1483 
1484 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1485 		struct iwm_tx_data *data = &ring->data[i];
1486 
1487 		if (data->m != NULL) {
1488 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1489 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1490 			bus_dmamap_unload(sc->sc_dmat, data->map);
1491 			m_freem(data->m);
1492 			data->m = NULL;
1493 		}
1494 	}
1495 	/* Clear TX descriptors. */
1496 	memset(ring->desc, 0, ring->desc_dma.size);
1497 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1498 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1499 	sc->qfullmsk &= ~(1 << ring->qid);
1500 	sc->qenablemsk &= ~(1 << ring->qid);
1501 	/* 7000 family NICs are locked while commands are in progress. */
1502 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1503 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1504 			iwm_nic_unlock(sc);
1505 	}
1506 	ring->queued = 0;
1507 	ring->cur = 0;
1508 	ring->tail = 0;
1509 }
1510 
1511 void
1512 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1513 {
1514 	int i;
1515 
1516 	iwm_dma_contig_free(&ring->desc_dma);
1517 	iwm_dma_contig_free(&ring->cmd_dma);
1518 
1519 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1520 		struct iwm_tx_data *data = &ring->data[i];
1521 
1522 		if (data->m != NULL) {
1523 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1524 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1525 			bus_dmamap_unload(sc->sc_dmat, data->map);
1526 			m_freem(data->m);
1527 			data->m = NULL;
1528 		}
1529 		if (data->map != NULL)
1530 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1531 	}
1532 }
1533 
1534 void
1535 iwm_enable_rfkill_int(struct iwm_softc *sc)
1536 {
1537 	if (!sc->sc_msix) {
1538 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1539 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1540 	} else {
1541 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1542 		    sc->sc_fh_init_mask);
1543 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1544 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1545 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1546 	}
1547 
1548 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1549 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1550 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1551 }
1552 
1553 int
1554 iwm_check_rfkill(struct iwm_softc *sc)
1555 {
1556 	uint32_t v;
1557 	int s;
1558 	int rv;
1559 
1560 	s = splnet();
1561 
1562 	/*
1563 	 * "documentation" is not really helpful here:
1564 	 *  27:	HW_RF_KILL_SW
1565 	 *	Indicates state of (platform's) hardware RF-Kill switch
1566 	 *
1567 	 * But apparently when it's off, it's on ...
1568 	 */
1569 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1570 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1571 	if (rv) {
1572 		sc->sc_flags |= IWM_FLAG_RFKILL;
1573 	} else {
1574 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1575 	}
1576 
1577 	splx(s);
1578 	return rv;
1579 }
1580 
1581 void
1582 iwm_enable_interrupts(struct iwm_softc *sc)
1583 {
1584 	if (!sc->sc_msix) {
1585 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1586 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1587 	} else {
1588 		/*
1589 		 * fh/hw_mask keeps all the unmasked causes.
1590 		 * Unlike msi, in msix cause is enabled when it is unset.
1591 		 */
1592 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1593 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1594 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1595 		    ~sc->sc_fh_mask);
1596 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1597 		    ~sc->sc_hw_mask);
1598 	}
1599 }
1600 
1601 void
1602 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1603 {
1604 	if (!sc->sc_msix) {
1605 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1606 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1607 	} else {
1608 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1609 		    sc->sc_hw_init_mask);
1610 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1611 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1612 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1613 	}
1614 }
1615 
1616 void
1617 iwm_restore_interrupts(struct iwm_softc *sc)
1618 {
1619 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1620 }
1621 
1622 void
1623 iwm_disable_interrupts(struct iwm_softc *sc)
1624 {
1625 	int s = splnet();
1626 
1627 	if (!sc->sc_msix) {
1628 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1629 
1630 		/* acknowledge all interrupts */
1631 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1632 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1633 	} else {
1634 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1635 		    sc->sc_fh_init_mask);
1636 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1637 		    sc->sc_hw_init_mask);
1638 	}
1639 
1640 	splx(s);
1641 }
1642 
1643 void
1644 iwm_ict_reset(struct iwm_softc *sc)
1645 {
1646 	iwm_disable_interrupts(sc);
1647 
1648 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1649 	sc->ict_cur = 0;
1650 
1651 	/* Set physical address of ICT (4KB aligned). */
1652 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1653 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1654 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1655 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1656 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1657 
1658 	/* Switch to ICT interrupt mode in driver. */
1659 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1660 
1661 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1662 	iwm_enable_interrupts(sc);
1663 }
1664 
1665 #define IWM_HW_READY_TIMEOUT 50
1666 int
1667 iwm_set_hw_ready(struct iwm_softc *sc)
1668 {
1669 	int ready;
1670 
1671 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1672 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1673 
1674 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1675 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1676 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1677 	    IWM_HW_READY_TIMEOUT);
1678 	if (ready)
1679 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1680 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1681 
1682 	return ready;
1683 }
1684 #undef IWM_HW_READY_TIMEOUT
1685 
1686 int
1687 iwm_prepare_card_hw(struct iwm_softc *sc)
1688 {
1689 	int t = 0;
1690 	int ntries;
1691 
1692 	if (iwm_set_hw_ready(sc))
1693 		return 0;
1694 
1695 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1696 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1697 	DELAY(1000);
1698 
1699 	for (ntries = 0; ntries < 10; ntries++) {
1700 		/* If HW is not ready, prepare the conditions to check again */
1701 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1702 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1703 
1704 		do {
1705 			if (iwm_set_hw_ready(sc))
1706 				return 0;
1707 			DELAY(200);
1708 			t += 200;
1709 		} while (t < 150000);
1710 		DELAY(25000);
1711 	}
1712 
1713 	return ETIMEDOUT;
1714 }
1715 
1716 void
1717 iwm_apm_config(struct iwm_softc *sc)
1718 {
1719 	pcireg_t lctl, cap;
1720 
1721 	/*
1722 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1723 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1724 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1725 	 *    costs negligible amount of power savings.
1726 	 * If not (unlikely), enable L0S, so there is at least some
1727 	 *    power savings, even without L1.
1728 	 */
1729 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1730 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1731 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1732 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1733 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1734 	} else {
1735 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1736 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1737 	}
1738 
1739 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1740 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1741 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1742 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1743 	    DEVNAME(sc),
1744 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1745 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1746 }
1747 
1748 /*
1749  * Start up NIC's basic functionality after it has been reset
1750  * e.g. after platform boot or shutdown.
1751  * NOTE:  This does not load uCode nor start the embedded processor
1752  */
1753 int
1754 iwm_apm_init(struct iwm_softc *sc)
1755 {
1756 	int err = 0;
1757 
1758 	/* Disable L0S exit timer (platform NMI workaround) */
1759 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1760 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1761 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1762 
1763 	/*
1764 	 * Disable L0s without affecting L1;
1765 	 *  don't wait for ICH L0s (ICH bug W/A)
1766 	 */
1767 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1768 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1769 
1770 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1771 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1772 
1773 	/*
1774 	 * Enable HAP INTA (interrupt from management bus) to
1775 	 * wake device's PCI Express link L1a -> L0s
1776 	 */
1777 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1778 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1779 
1780 	iwm_apm_config(sc);
1781 
1782 #if 0 /* not for 7k/8k */
1783 	/* Configure analog phase-lock-loop before activating to D0A */
1784 	if (trans->cfg->base_params->pll_cfg_val)
1785 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1786 		    trans->cfg->base_params->pll_cfg_val);
1787 #endif
1788 
1789 	/*
1790 	 * Set "initialization complete" bit to move adapter from
1791 	 * D0U* --> D0A* (powered-up active) state.
1792 	 */
1793 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1794 
1795 	/*
1796 	 * Wait for clock stabilization; once stabilized, access to
1797 	 * device-internal resources is supported, e.g. iwm_write_prph()
1798 	 * and accesses to uCode SRAM.
1799 	 */
1800 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1801 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1802 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1803 		printf("%s: timeout waiting for clock stabilization\n",
1804 		    DEVNAME(sc));
1805 		err = ETIMEDOUT;
1806 		goto out;
1807 	}
1808 
1809 	if (sc->host_interrupt_operation_mode) {
1810 		/*
1811 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1812 		 * only check host_interrupt_operation_mode even if this is
1813 		 * not related to host_interrupt_operation_mode.
1814 		 *
1815 		 * Enable the oscillator to count wake up time for L1 exit. This
1816 		 * consumes slightly more power (100uA) - but allows to be sure
1817 		 * that we wake up from L1 on time.
1818 		 *
1819 		 * This looks weird: read twice the same register, discard the
1820 		 * value, set a bit, and yet again, read that same register
1821 		 * just to discard the value. But that's the way the hardware
1822 		 * seems to like it.
1823 		 */
1824 		if (iwm_nic_lock(sc)) {
1825 			iwm_read_prph(sc, IWM_OSC_CLK);
1826 			iwm_read_prph(sc, IWM_OSC_CLK);
1827 			iwm_nic_unlock(sc);
1828 		}
1829 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1830 		    IWM_OSC_CLK_FORCE_CONTROL);
1831 		if (err)
1832 			goto out;
1833 		if (iwm_nic_lock(sc)) {
1834 			iwm_read_prph(sc, IWM_OSC_CLK);
1835 			iwm_read_prph(sc, IWM_OSC_CLK);
1836 			iwm_nic_unlock(sc);
1837 		}
1838 	}
1839 
1840 	/*
1841 	 * Enable DMA clock and wait for it to stabilize.
1842 	 *
1843 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1844 	 * do not disable clocks.  This preserves any hardware bits already
1845 	 * set by default in "CLK_CTRL_REG" after reset.
1846 	 */
1847 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1848 		if (iwm_nic_lock(sc)) {
1849 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1850 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1851 			iwm_nic_unlock(sc);
1852 		}
1853 		DELAY(20);
1854 
1855 		/* Disable L1-Active */
1856 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1857 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1858 		if (err)
1859 			goto out;
1860 
1861 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1862 		if (iwm_nic_lock(sc)) {
1863 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1864 			    IWM_APMG_RTC_INT_STT_RFKILL);
1865 			iwm_nic_unlock(sc);
1866 		}
1867 	}
1868  out:
1869 	if (err)
1870 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1871 	return err;
1872 }
1873 
1874 void
1875 iwm_apm_stop(struct iwm_softc *sc)
1876 {
1877 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1878 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1879 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1880 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1881 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1882 	DELAY(1000);
1883 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1884 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1885 	DELAY(5000);
1886 
1887 	/* stop device's busmaster DMA activity */
1888 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1889 
1890 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1891 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1892 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1893 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1894 
1895 	/*
1896 	 * Clear "initialization complete" bit to move adapter from
1897 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1898 	 */
1899 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1900 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1901 }
1902 
1903 void
1904 iwm_init_msix_hw(struct iwm_softc *sc)
1905 {
1906 	iwm_conf_msix_hw(sc, 0);
1907 
1908 	if (!sc->sc_msix)
1909 		return;
1910 
1911 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1912 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1913 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1914 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1915 }
1916 
1917 void
1918 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1919 {
1920 	int vector = 0;
1921 
1922 	if (!sc->sc_msix) {
1923 		/* Newer chips default to MSIX. */
1924 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1925 			iwm_write_prph(sc, IWM_UREG_CHICK,
1926 			    IWM_UREG_CHICK_MSI_ENABLE);
1927 			iwm_nic_unlock(sc);
1928 		}
1929 		return;
1930 	}
1931 
1932 	if (!stopped && iwm_nic_lock(sc)) {
1933 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1934 		iwm_nic_unlock(sc);
1935 	}
1936 
1937 	/* Disable all interrupts */
1938 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1939 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1940 
1941 	/* Map fallback-queue (command/mgmt) to a single vector */
1942 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1943 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1944 	/* Map RSS queue (data) to the same vector */
1945 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1946 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1947 
1948 	/* Enable the RX queues cause interrupts */
1949 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1950 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1951 
1952 	/* Map non-RX causes to the same vector */
1953 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1954 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1955 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1956 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1957 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1958 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1959 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1960 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1961 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1962 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1963 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1964 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1965 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1966 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1967 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1968 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1969 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1970 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1971 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1972 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1973 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1974 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1975 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1976 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1977 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1978 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1979 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1980 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1981 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1982 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1983 
1984 	/* Enable non-RX causes interrupts */
1985 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1986 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1987 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1988 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1989 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1990 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1991 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1992 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1993 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1994 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1995 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
1996 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
1997 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
1998 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
1999 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
2000 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2001 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
2002 }
2003 
2004 int
2005 iwm_clear_persistence_bit(struct iwm_softc *sc)
2006 {
2007 	uint32_t hpm, wprot;
2008 
2009 	hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2010 	if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
2011 		wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2012 		if (wprot & IWM_PREG_WFPM_ACCESS) {
2013 			printf("%s: cannot clear persistence bit\n",
2014 			    DEVNAME(sc));
2015 			return EPERM;
2016 		}
2017 		iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2018 		    hpm & ~IWM_HPM_PERSISTENCE_BIT);
2019 	}
2020 
2021 	return 0;
2022 }
2023 
2024 int
2025 iwm_start_hw(struct iwm_softc *sc)
2026 {
2027 	int err;
2028 
2029 	err = iwm_prepare_card_hw(sc);
2030 	if (err)
2031 		return err;
2032 
2033 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2034 		err = iwm_clear_persistence_bit(sc);
2035 		if (err)
2036 			return err;
2037 	}
2038 
2039 	/* Reset the entire device */
2040 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2041 	DELAY(5000);
2042 
2043 	err = iwm_apm_init(sc);
2044 	if (err)
2045 		return err;
2046 
2047 	iwm_init_msix_hw(sc);
2048 
2049 	iwm_enable_rfkill_int(sc);
2050 	iwm_check_rfkill(sc);
2051 
2052 	return 0;
2053 }
2054 
2055 
2056 void
2057 iwm_stop_device(struct iwm_softc *sc)
2058 {
2059 	int chnl, ntries;
2060 	int qid;
2061 
2062 	iwm_disable_interrupts(sc);
2063 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2064 
2065 	/* Stop all DMA channels. */
2066 	if (iwm_nic_lock(sc)) {
2067 		/* Deactivate TX scheduler. */
2068 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2069 
2070 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2071 			IWM_WRITE(sc,
2072 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2073 			for (ntries = 0; ntries < 200; ntries++) {
2074 				uint32_t r;
2075 
2076 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2077 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
2078 				    chnl))
2079 					break;
2080 				DELAY(20);
2081 			}
2082 		}
2083 		iwm_nic_unlock(sc);
2084 	}
2085 	iwm_disable_rx_dma(sc);
2086 
2087 	iwm_reset_rx_ring(sc, &sc->rxq);
2088 
2089 	for (qid = 0; qid < nitems(sc->txq); qid++)
2090 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
2091 
2092 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2093 		if (iwm_nic_lock(sc)) {
2094 			/* Power-down device's busmaster DMA clocks */
2095 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2096 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
2097 			iwm_nic_unlock(sc);
2098 		}
2099 		DELAY(5);
2100 	}
2101 
2102 	/* Make sure (redundant) we've released our request to stay awake */
2103 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2104 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2105 	if (sc->sc_nic_locks > 0)
2106 		printf("%s: %d active NIC locks forcefully cleared\n",
2107 		    DEVNAME(sc), sc->sc_nic_locks);
2108 	sc->sc_nic_locks = 0;
2109 
2110 	/* Stop the device, and put it in low power state */
2111 	iwm_apm_stop(sc);
2112 
2113 	/* Reset the on-board processor. */
2114 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2115 	DELAY(5000);
2116 
2117 	/*
2118 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2119 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2120 	 * that enables radio won't fire on the correct irq, and the
2121 	 * driver won't be able to handle the interrupt.
2122 	 * Configure the IVAR table again after reset.
2123 	 */
2124 	iwm_conf_msix_hw(sc, 1);
2125 
2126 	/*
2127 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2128 	 * Clear the interrupt again.
2129 	 */
2130 	iwm_disable_interrupts(sc);
2131 
2132 	/* Even though we stop the HW we still want the RF kill interrupt. */
2133 	iwm_enable_rfkill_int(sc);
2134 	iwm_check_rfkill(sc);
2135 
2136 	iwm_prepare_card_hw(sc);
2137 }
2138 
2139 void
2140 iwm_nic_config(struct iwm_softc *sc)
2141 {
2142 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2143 	uint32_t mask, val, reg_val = 0;
2144 
2145 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2146 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2147 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2148 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2149 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2150 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2151 
2152 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2153 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2154 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2155 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2156 
2157 	/* radio configuration */
2158 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2159 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2160 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2161 
2162 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2163 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2164 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2165 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2166 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2167 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2168 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2169 
2170 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2171 	val &= ~mask;
2172 	val |= reg_val;
2173 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2174 
2175 	/*
2176 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2177 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2178 	 * to lose ownership and not being able to obtain it back.
2179 	 */
2180 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2181 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2182 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2183 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2184 }
2185 
2186 int
2187 iwm_nic_rx_init(struct iwm_softc *sc)
2188 {
2189 	if (sc->sc_mqrx_supported)
2190 		return iwm_nic_rx_mq_init(sc);
2191 	else
2192 		return iwm_nic_rx_legacy_init(sc);
2193 }
2194 
2195 int
2196 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2197 {
2198 	int enabled;
2199 
2200 	if (!iwm_nic_lock(sc))
2201 		return EBUSY;
2202 
2203 	/* Stop RX DMA. */
2204 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2205 	/* Disable RX used and free queue operation. */
2206 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2207 
2208 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2209 	    sc->rxq.free_desc_dma.paddr);
2210 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2211 	    sc->rxq.used_desc_dma.paddr);
2212 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2213 	    sc->rxq.stat_dma.paddr);
2214 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2215 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2216 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2217 
2218 	/* We configure only queue 0 for now. */
2219 	enabled = ((1 << 0) << 16) | (1 << 0);
2220 
2221 	/* Enable RX DMA, 4KB buffer size. */
2222 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2223 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2224 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2225 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2226 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2227 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2228 
2229 	/* Enable RX DMA snooping. */
2230 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2231 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2232 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2233 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2234 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2235 
2236 	/* Enable the configured queue(s). */
2237 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2238 
2239 	iwm_nic_unlock(sc);
2240 
2241 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2242 
2243 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2244 
2245 	return 0;
2246 }
2247 
2248 int
2249 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2250 {
2251 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2252 
2253 	iwm_disable_rx_dma(sc);
2254 
2255 	if (!iwm_nic_lock(sc))
2256 		return EBUSY;
2257 
2258 	/* reset and flush pointers */
2259 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2260 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2261 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2262 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2263 
2264 	/* Set physical address of RX ring (256-byte aligned). */
2265 	IWM_WRITE(sc,
2266 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2267 
2268 	/* Set physical address of RX status (16-byte aligned). */
2269 	IWM_WRITE(sc,
2270 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2271 
2272 	/* Enable RX. */
2273 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2274 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2275 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2276 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2277 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2278 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2279 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2280 
2281 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2282 
2283 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2284 	if (sc->host_interrupt_operation_mode)
2285 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2286 
2287 	iwm_nic_unlock(sc);
2288 
2289 	/*
2290 	 * This value should initially be 0 (before preparing any RBs),
2291 	 * and should be 8 after preparing the first 8 RBs (for example).
2292 	 */
2293 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2294 
2295 	return 0;
2296 }
2297 
2298 int
2299 iwm_nic_tx_init(struct iwm_softc *sc)
2300 {
2301 	int qid, err;
2302 
2303 	if (!iwm_nic_lock(sc))
2304 		return EBUSY;
2305 
2306 	/* Deactivate TX scheduler. */
2307 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2308 
2309 	/* Set physical address of "keep warm" page (16-byte aligned). */
2310 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2311 
2312 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2313 		struct iwm_tx_ring *txq = &sc->txq[qid];
2314 
2315 		/* Set physical address of TX ring (256-byte aligned). */
2316 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2317 		    txq->desc_dma.paddr >> 8);
2318 	}
2319 
2320 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2321 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2322 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2323 
2324 	iwm_nic_unlock(sc);
2325 
2326 	return err;
2327 }
2328 
2329 int
2330 iwm_nic_init(struct iwm_softc *sc)
2331 {
2332 	int err;
2333 
2334 	iwm_apm_init(sc);
2335 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2336 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2337 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2338 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2339 
2340 	iwm_nic_config(sc);
2341 
2342 	err = iwm_nic_rx_init(sc);
2343 	if (err)
2344 		return err;
2345 
2346 	err = iwm_nic_tx_init(sc);
2347 	if (err)
2348 		return err;
2349 
2350 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2351 
2352 	return 0;
2353 }
2354 
2355 /* Map a TID to an ieee80211_edca_ac category. */
2356 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2357 	EDCA_AC_BE,
2358 	EDCA_AC_BK,
2359 	EDCA_AC_BK,
2360 	EDCA_AC_BE,
2361 	EDCA_AC_VI,
2362 	EDCA_AC_VI,
2363 	EDCA_AC_VO,
2364 	EDCA_AC_VO,
2365 };
2366 
2367 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2368 const uint8_t iwm_ac_to_tx_fifo[] = {
2369 	IWM_TX_FIFO_BE,
2370 	IWM_TX_FIFO_BK,
2371 	IWM_TX_FIFO_VI,
2372 	IWM_TX_FIFO_VO,
2373 };
2374 
2375 int
2376 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2377 {
2378 	int err;
2379 	iwm_nic_assert_locked(sc);
2380 
2381 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2382 
2383 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2384 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2385 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2386 
2387 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2388 	if (err) {
2389 		return err;
2390 	}
2391 
2392 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2393 
2394 	iwm_write_mem32(sc,
2395 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2396 
2397 	/* Set scheduler window size and frame limit. */
2398 	iwm_write_mem32(sc,
2399 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2400 	    sizeof(uint32_t),
2401 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2402 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2403 	    ((IWM_FRAME_LIMIT
2404 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2405 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2406 
2407 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2408 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2409 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2410 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2411 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2412 
2413 	if (qid == sc->cmdqid)
2414 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2415 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2416 
2417 	return 0;
2418 }
2419 
2420 int
2421 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2422     int aggregate, uint8_t tid, uint16_t ssn)
2423 {
2424 	struct iwm_tx_ring *ring = &sc->txq[qid];
2425 	struct iwm_scd_txq_cfg_cmd cmd;
2426 	int err, idx, scd_bug;
2427 
2428 	iwm_nic_assert_locked(sc);
2429 
2430 	/*
2431 	 * If we need to move the SCD write pointer by steps of
2432 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2433 	 * This is really ugly, but this is the easiest way out for
2434 	 * this sad hardware issue.
2435 	 * This bug has been fixed on devices 9000 and up.
2436 	 */
2437 	scd_bug = !sc->sc_mqrx_supported &&
2438 		!((ssn - ring->cur) & 0x3f) &&
2439 		(ssn != ring->cur);
2440 	if (scd_bug)
2441 		ssn = (ssn + 1) & 0xfff;
2442 
2443 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2444 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2445 	ring->cur = idx;
2446 	ring->tail = idx;
2447 
2448 	memset(&cmd, 0, sizeof(cmd));
2449 	cmd.tid = tid;
2450 	cmd.scd_queue = qid;
2451 	cmd.enable = 1;
2452 	cmd.sta_id = sta_id;
2453 	cmd.tx_fifo = fifo;
2454 	cmd.aggregate = aggregate;
2455 	cmd.ssn = htole16(ssn);
2456 	cmd.window = IWM_FRAME_LIMIT;
2457 
2458 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2459 	    sizeof(cmd), &cmd);
2460 	if (err)
2461 		return err;
2462 
2463 	sc->qenablemsk |= (1 << qid);
2464 	return 0;
2465 }
2466 
2467 int
2468 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2469 {
2470 	struct iwm_scd_txq_cfg_cmd cmd;
2471 	int err;
2472 
2473 	memset(&cmd, 0, sizeof(cmd));
2474 	cmd.tid = tid;
2475 	cmd.scd_queue = qid;
2476 	cmd.enable = 0;
2477 	cmd.sta_id = sta_id;
2478 
2479 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2480 	if (err)
2481 		return err;
2482 
2483 	sc->qenablemsk &= ~(1 << qid);
2484 	return 0;
2485 }
2486 
2487 int
2488 iwm_post_alive(struct iwm_softc *sc)
2489 {
2490 	int nwords;
2491 	int err, chnl;
2492 	uint32_t base;
2493 
2494 	if (!iwm_nic_lock(sc))
2495 		return EBUSY;
2496 
2497 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2498 
2499 	iwm_ict_reset(sc);
2500 
2501 	iwm_nic_unlock(sc);
2502 
2503 	/* Clear TX scheduler state in SRAM. */
2504 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2505 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2506 	    / sizeof(uint32_t);
2507 	err = iwm_write_mem(sc,
2508 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2509 	    NULL, nwords);
2510 	if (err)
2511 		return err;
2512 
2513 	if (!iwm_nic_lock(sc))
2514 		return EBUSY;
2515 
2516 	/* Set physical address of TX scheduler rings (1KB aligned). */
2517 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2518 
2519 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2520 
2521 	/* enable command channel */
2522 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2523 	if (err) {
2524 		iwm_nic_unlock(sc);
2525 		return err;
2526 	}
2527 
2528 	/* Activate TX scheduler. */
2529 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2530 
2531 	/* Enable DMA channels. */
2532 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2533 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2534 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2535 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2536 	}
2537 
2538 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2539 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2540 
2541 	iwm_nic_unlock(sc);
2542 
2543 	/* Enable L1-Active */
2544 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2545 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2546 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2547 	}
2548 
2549 	return err;
2550 }
2551 
2552 struct iwm_phy_db_entry *
2553 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2554 {
2555 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2556 
2557 	if (type >= IWM_PHY_DB_MAX)
2558 		return NULL;
2559 
2560 	switch (type) {
2561 	case IWM_PHY_DB_CFG:
2562 		return &phy_db->cfg;
2563 	case IWM_PHY_DB_CALIB_NCH:
2564 		return &phy_db->calib_nch;
2565 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2566 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2567 			return NULL;
2568 		return &phy_db->calib_ch_group_papd[chg_id];
2569 	case IWM_PHY_DB_CALIB_CHG_TXP:
2570 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2571 			return NULL;
2572 		return &phy_db->calib_ch_group_txp[chg_id];
2573 	default:
2574 		return NULL;
2575 	}
2576 	return NULL;
2577 }
2578 
2579 int
2580 iwm_phy_db_set_section(struct iwm_softc *sc,
2581     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2582 {
2583 	uint16_t type = le16toh(phy_db_notif->type);
2584 	uint16_t size  = le16toh(phy_db_notif->length);
2585 	struct iwm_phy_db_entry *entry;
2586 	uint16_t chg_id = 0;
2587 
2588 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2589 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2590 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2591 
2592 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2593 	if (!entry)
2594 		return EINVAL;
2595 
2596 	if (entry->data)
2597 		free(entry->data, M_DEVBUF, entry->size);
2598 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2599 	if (!entry->data) {
2600 		entry->size = 0;
2601 		return ENOMEM;
2602 	}
2603 	memcpy(entry->data, phy_db_notif->data, size);
2604 	entry->size = size;
2605 
2606 	return 0;
2607 }
2608 
2609 int
2610 iwm_is_valid_channel(uint16_t ch_id)
2611 {
2612 	if (ch_id <= 14 ||
2613 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2614 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2615 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2616 		return 1;
2617 	return 0;
2618 }
2619 
2620 uint8_t
2621 iwm_ch_id_to_ch_index(uint16_t ch_id)
2622 {
2623 	if (!iwm_is_valid_channel(ch_id))
2624 		return 0xff;
2625 
2626 	if (ch_id <= 14)
2627 		return ch_id - 1;
2628 	if (ch_id <= 64)
2629 		return (ch_id + 20) / 4;
2630 	if (ch_id <= 140)
2631 		return (ch_id - 12) / 4;
2632 	return (ch_id - 13) / 4;
2633 }
2634 
2635 
2636 uint16_t
2637 iwm_channel_id_to_papd(uint16_t ch_id)
2638 {
2639 	if (!iwm_is_valid_channel(ch_id))
2640 		return 0xff;
2641 
2642 	if (1 <= ch_id && ch_id <= 14)
2643 		return 0;
2644 	if (36 <= ch_id && ch_id <= 64)
2645 		return 1;
2646 	if (100 <= ch_id && ch_id <= 140)
2647 		return 2;
2648 	return 3;
2649 }
2650 
2651 uint16_t
2652 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2653 {
2654 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2655 	struct iwm_phy_db_chg_txp *txp_chg;
2656 	int i;
2657 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2658 
2659 	if (ch_index == 0xff)
2660 		return 0xff;
2661 
2662 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2663 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2664 		if (!txp_chg)
2665 			return 0xff;
2666 		/*
2667 		 * Looking for the first channel group the max channel
2668 		 * of which is higher than the requested channel.
2669 		 */
2670 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2671 			return i;
2672 	}
2673 	return 0xff;
2674 }
2675 
2676 int
2677 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2678     uint16_t *size, uint16_t ch_id)
2679 {
2680 	struct iwm_phy_db_entry *entry;
2681 	uint16_t ch_group_id = 0;
2682 
2683 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2684 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2685 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2686 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2687 
2688 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2689 	if (!entry)
2690 		return EINVAL;
2691 
2692 	*data = entry->data;
2693 	*size = entry->size;
2694 
2695 	return 0;
2696 }
2697 
2698 int
2699 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2700     void *data)
2701 {
2702 	struct iwm_phy_db_cmd phy_db_cmd;
2703 	struct iwm_host_cmd cmd = {
2704 		.id = IWM_PHY_DB_CMD,
2705 		.flags = IWM_CMD_ASYNC,
2706 	};
2707 
2708 	phy_db_cmd.type = le16toh(type);
2709 	phy_db_cmd.length = le16toh(length);
2710 
2711 	cmd.data[0] = &phy_db_cmd;
2712 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2713 	cmd.data[1] = data;
2714 	cmd.len[1] = length;
2715 
2716 	return iwm_send_cmd(sc, &cmd);
2717 }
2718 
2719 int
2720 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2721     uint8_t max_ch_groups)
2722 {
2723 	uint16_t i;
2724 	int err;
2725 	struct iwm_phy_db_entry *entry;
2726 
2727 	for (i = 0; i < max_ch_groups; i++) {
2728 		entry = iwm_phy_db_get_section(sc, type, i);
2729 		if (!entry)
2730 			return EINVAL;
2731 
2732 		if (!entry->size)
2733 			continue;
2734 
2735 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2736 		if (err)
2737 			return err;
2738 
2739 		DELAY(1000);
2740 	}
2741 
2742 	return 0;
2743 }
2744 
2745 int
2746 iwm_send_phy_db_data(struct iwm_softc *sc)
2747 {
2748 	uint8_t *data = NULL;
2749 	uint16_t size = 0;
2750 	int err;
2751 
2752 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2753 	if (err)
2754 		return err;
2755 
2756 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2757 	if (err)
2758 		return err;
2759 
2760 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2761 	    &data, &size, 0);
2762 	if (err)
2763 		return err;
2764 
2765 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2766 	if (err)
2767 		return err;
2768 
2769 	err = iwm_phy_db_send_all_channel_groups(sc,
2770 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2771 	if (err)
2772 		return err;
2773 
2774 	err = iwm_phy_db_send_all_channel_groups(sc,
2775 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2776 	if (err)
2777 		return err;
2778 
2779 	return 0;
2780 }
2781 
2782 /*
2783  * For the high priority TE use a time event type that has similar priority to
2784  * the FW's action scan priority.
2785  */
2786 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2787 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2788 
2789 int
2790 iwm_send_time_event_cmd(struct iwm_softc *sc,
2791     const struct iwm_time_event_cmd *cmd)
2792 {
2793 	struct iwm_rx_packet *pkt;
2794 	struct iwm_time_event_resp *resp;
2795 	struct iwm_host_cmd hcmd = {
2796 		.id = IWM_TIME_EVENT_CMD,
2797 		.flags = IWM_CMD_WANT_RESP,
2798 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2799 	};
2800 	uint32_t resp_len;
2801 	int err;
2802 
2803 	hcmd.data[0] = cmd;
2804 	hcmd.len[0] = sizeof(*cmd);
2805 	err = iwm_send_cmd(sc, &hcmd);
2806 	if (err)
2807 		return err;
2808 
2809 	pkt = hcmd.resp_pkt;
2810 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2811 		err = EIO;
2812 		goto out;
2813 	}
2814 
2815 	resp_len = iwm_rx_packet_payload_len(pkt);
2816 	if (resp_len != sizeof(*resp)) {
2817 		err = EIO;
2818 		goto out;
2819 	}
2820 
2821 	resp = (void *)pkt->data;
2822 	if (le32toh(resp->status) == 0)
2823 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2824 	else
2825 		err = EIO;
2826 out:
2827 	iwm_free_resp(sc, &hcmd);
2828 	return err;
2829 }
2830 
2831 void
2832 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2833     uint32_t duration, uint32_t max_delay)
2834 {
2835 	struct iwm_time_event_cmd time_cmd;
2836 
2837 	/* Do nothing if a time event is already scheduled. */
2838 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2839 		return;
2840 
2841 	memset(&time_cmd, 0, sizeof(time_cmd));
2842 
2843 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2844 	time_cmd.id_and_color =
2845 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2846 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2847 
2848 	time_cmd.apply_time = htole32(0);
2849 
2850 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2851 	time_cmd.max_delay = htole32(max_delay);
2852 	/* TODO: why do we need to interval = bi if it is not periodic? */
2853 	time_cmd.interval = htole32(1);
2854 	time_cmd.duration = htole32(duration);
2855 	time_cmd.repeat = 1;
2856 	time_cmd.policy
2857 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2858 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2859 		IWM_T2_V2_START_IMMEDIATELY);
2860 
2861 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2862 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2863 
2864 	DELAY(100);
2865 }
2866 
2867 void
2868 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2869 {
2870 	struct iwm_time_event_cmd time_cmd;
2871 
2872 	/* Do nothing if the time event has already ended. */
2873 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2874 		return;
2875 
2876 	memset(&time_cmd, 0, sizeof(time_cmd));
2877 
2878 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2879 	time_cmd.id_and_color =
2880 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2881 	time_cmd.id = htole32(sc->sc_time_event_uid);
2882 
2883 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2884 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2885 
2886 	DELAY(100);
2887 }
2888 
2889 /*
2890  * NVM read access and content parsing.  We do not support
2891  * external NVM or writing NVM.
2892  */
2893 
2894 /* list of NVM sections we are allowed/need to read */
2895 const int iwm_nvm_to_read[] = {
2896 	IWM_NVM_SECTION_TYPE_HW,
2897 	IWM_NVM_SECTION_TYPE_SW,
2898 	IWM_NVM_SECTION_TYPE_REGULATORY,
2899 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2900 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2901 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2902 	IWM_NVM_SECTION_TYPE_HW_8000,
2903 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2904 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2905 };
2906 
2907 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2908 
2909 #define IWM_NVM_WRITE_OPCODE 1
2910 #define IWM_NVM_READ_OPCODE 0
2911 
2912 int
2913 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2914     uint16_t length, uint8_t *data, uint16_t *len)
2915 {
2916 	offset = 0;
2917 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2918 		.offset = htole16(offset),
2919 		.length = htole16(length),
2920 		.type = htole16(section),
2921 		.op_code = IWM_NVM_READ_OPCODE,
2922 	};
2923 	struct iwm_nvm_access_resp *nvm_resp;
2924 	struct iwm_rx_packet *pkt;
2925 	struct iwm_host_cmd cmd = {
2926 		.id = IWM_NVM_ACCESS_CMD,
2927 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2928 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2929 		.data = { &nvm_access_cmd, },
2930 	};
2931 	int err, offset_read;
2932 	size_t bytes_read;
2933 	uint8_t *resp_data;
2934 
2935 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2936 
2937 	err = iwm_send_cmd(sc, &cmd);
2938 	if (err)
2939 		return err;
2940 
2941 	pkt = cmd.resp_pkt;
2942 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2943 		err = EIO;
2944 		goto exit;
2945 	}
2946 
2947 	/* Extract NVM response */
2948 	nvm_resp = (void *)pkt->data;
2949 	if (nvm_resp == NULL)
2950 		return EIO;
2951 
2952 	err = le16toh(nvm_resp->status);
2953 	bytes_read = le16toh(nvm_resp->length);
2954 	offset_read = le16toh(nvm_resp->offset);
2955 	resp_data = nvm_resp->data;
2956 	if (err) {
2957 		err = EINVAL;
2958 		goto exit;
2959 	}
2960 
2961 	if (offset_read != offset) {
2962 		err = EINVAL;
2963 		goto exit;
2964 	}
2965 
2966 	if (bytes_read > length) {
2967 		err = EINVAL;
2968 		goto exit;
2969 	}
2970 
2971 	memcpy(data + offset, resp_data, bytes_read);
2972 	*len = bytes_read;
2973 
2974  exit:
2975 	iwm_free_resp(sc, &cmd);
2976 	return err;
2977 }
2978 
2979 /*
2980  * Reads an NVM section completely.
2981  * NICs prior to 7000 family doesn't have a real NVM, but just read
2982  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2983  * by uCode, we need to manually check in this case that we don't
2984  * overflow and try to read more than the EEPROM size.
2985  */
2986 int
2987 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2988     uint16_t *len, size_t max_len)
2989 {
2990 	uint16_t chunklen, seglen;
2991 	int err = 0;
2992 
2993 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2994 	*len = 0;
2995 
2996 	/* Read NVM chunks until exhausted (reading less than requested) */
2997 	while (seglen == chunklen && *len < max_len) {
2998 		err = iwm_nvm_read_chunk(sc,
2999 		    section, *len, chunklen, data, &seglen);
3000 		if (err)
3001 			return err;
3002 
3003 		*len += seglen;
3004 	}
3005 
3006 	return err;
3007 }
3008 
3009 uint8_t
3010 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3011 {
3012 	uint8_t tx_ant;
3013 
3014 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3015 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
3016 
3017 	if (sc->sc_nvm.valid_tx_ant)
3018 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3019 
3020 	return tx_ant;
3021 }
3022 
3023 uint8_t
3024 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3025 {
3026 	uint8_t rx_ant;
3027 
3028 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3029 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
3030 
3031 	if (sc->sc_nvm.valid_rx_ant)
3032 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3033 
3034 	return rx_ant;
3035 }
3036 
3037 void
3038 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3039     const uint8_t *nvm_channels, int nchan)
3040 {
3041 	struct ieee80211com *ic = &sc->sc_ic;
3042 	struct iwm_nvm_data *data = &sc->sc_nvm;
3043 	int ch_idx;
3044 	struct ieee80211_channel *channel;
3045 	uint16_t ch_flags;
3046 	int is_5ghz;
3047 	int flags, hw_value;
3048 
3049 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3050 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
3051 
3052 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
3053 		    !data->sku_cap_band_52GHz_enable)
3054 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
3055 
3056 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
3057 			continue;
3058 
3059 		hw_value = nvm_channels[ch_idx];
3060 		channel = &ic->ic_channels[hw_value];
3061 
3062 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
3063 		if (!is_5ghz) {
3064 			flags = IEEE80211_CHAN_2GHZ;
3065 			channel->ic_flags
3066 			    = IEEE80211_CHAN_CCK
3067 			    | IEEE80211_CHAN_OFDM
3068 			    | IEEE80211_CHAN_DYN
3069 			    | IEEE80211_CHAN_2GHZ;
3070 		} else {
3071 			flags = IEEE80211_CHAN_5GHZ;
3072 			channel->ic_flags =
3073 			    IEEE80211_CHAN_A;
3074 		}
3075 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3076 
3077 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
3078 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3079 
3080 		if (data->sku_cap_11n_enable) {
3081 			channel->ic_flags |= IEEE80211_CHAN_HT;
3082 			if (ch_flags & IWM_NVM_CHANNEL_40MHZ)
3083 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3084 		}
3085 	}
3086 }
3087 
3088 int
3089 iwm_mimo_enabled(struct iwm_softc *sc)
3090 {
3091 	struct ieee80211com *ic = &sc->sc_ic;
3092 
3093 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3094 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3095 }
3096 
3097 void
3098 iwm_setup_ht_rates(struct iwm_softc *sc)
3099 {
3100 	struct ieee80211com *ic = &sc->sc_ic;
3101 	uint8_t rx_ant;
3102 
3103 	/* TX is supported with the same MCS as RX. */
3104 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3105 
3106 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3107 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3108 
3109 	if (!iwm_mimo_enabled(sc))
3110 		return;
3111 
3112 	rx_ant = iwm_fw_valid_rx_ant(sc);
3113 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3114 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
3115 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3116 }
3117 
3118 void
3119 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3120     uint16_t ssn, uint16_t buf_size)
3121 {
3122 	reorder_buf->head_sn = ssn;
3123 	reorder_buf->num_stored = 0;
3124 	reorder_buf->buf_size = buf_size;
3125 	reorder_buf->last_amsdu = 0;
3126 	reorder_buf->last_sub_index = 0;
3127 	reorder_buf->removed = 0;
3128 	reorder_buf->valid = 0;
3129 	reorder_buf->consec_oldsn_drops = 0;
3130 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3131 	reorder_buf->consec_oldsn_prev_drop = 0;
3132 }
3133 
3134 void
3135 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3136 {
3137 	int i;
3138 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3139 	struct iwm_reorder_buf_entry *entry;
3140 
3141 	for (i = 0; i < reorder_buf->buf_size; i++) {
3142 		entry = &rxba->entries[i];
3143 		ml_purge(&entry->frames);
3144 		timerclear(&entry->reorder_time);
3145 	}
3146 
3147 	reorder_buf->removed = 1;
3148 	timeout_del(&reorder_buf->reorder_timer);
3149 	timerclear(&rxba->last_rx);
3150 	timeout_del(&rxba->session_timer);
3151 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3152 }
3153 
3154 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3155 
3156 void
3157 iwm_rx_ba_session_expired(void *arg)
3158 {
3159 	struct iwm_rxba_data *rxba = arg;
3160 	struct iwm_softc *sc = rxba->sc;
3161 	struct ieee80211com *ic = &sc->sc_ic;
3162 	struct ieee80211_node *ni = ic->ic_bss;
3163 	struct timeval now, timeout, expiry;
3164 	int s;
3165 
3166 	s = splnet();
3167 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3168 	    ic->ic_state == IEEE80211_S_RUN &&
3169 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3170 		getmicrouptime(&now);
3171 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3172 		timeradd(&rxba->last_rx, &timeout, &expiry);
3173 		if (timercmp(&now, &expiry, <)) {
3174 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3175 		} else {
3176 			ic->ic_stats.is_ht_rx_ba_timeout++;
3177 			ieee80211_delba_request(ic, ni,
3178 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3179 		}
3180 	}
3181 	splx(s);
3182 }
3183 
3184 void
3185 iwm_reorder_timer_expired(void *arg)
3186 {
3187 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3188 	struct iwm_reorder_buffer *buf = arg;
3189 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3190 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3191 	struct iwm_softc *sc = rxba->sc;
3192 	struct ieee80211com *ic = &sc->sc_ic;
3193 	struct ieee80211_node *ni = ic->ic_bss;
3194 	int i, s;
3195 	uint16_t sn = 0, index = 0;
3196 	int expired = 0;
3197 	int cont = 0;
3198 	struct timeval now, timeout, expiry;
3199 
3200 	if (!buf->num_stored || buf->removed)
3201 		return;
3202 
3203 	s = splnet();
3204 	getmicrouptime(&now);
3205 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3206 
3207 	for (i = 0; i < buf->buf_size ; i++) {
3208 		index = (buf->head_sn + i) % buf->buf_size;
3209 
3210 		if (ml_empty(&entries[index].frames)) {
3211 			/*
3212 			 * If there is a hole and the next frame didn't expire
3213 			 * we want to break and not advance SN.
3214 			 */
3215 			cont = 0;
3216 			continue;
3217 		}
3218 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3219 		if (!cont && timercmp(&now, &expiry, <))
3220 			break;
3221 
3222 		expired = 1;
3223 		/* continue until next hole after this expired frame */
3224 		cont = 1;
3225 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3226 	}
3227 
3228 	if (expired) {
3229 		/* SN is set to the last expired frame + 1 */
3230 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3231 		if_input(&sc->sc_ic.ic_if, &ml);
3232 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3233 	} else {
3234 		/*
3235 		 * If no frame expired and there are stored frames, index is now
3236 		 * pointing to the first unexpired frame - modify reorder timeout
3237 		 * accordingly.
3238 		 */
3239 		timeout_add_usec(&buf->reorder_timer,
3240 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3241 	}
3242 
3243 	splx(s);
3244 }
3245 
3246 #define IWM_MAX_RX_BA_SESSIONS 16
3247 
3248 int
3249 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3250     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3251 {
3252 	struct ieee80211com *ic = &sc->sc_ic;
3253 	struct iwm_add_sta_cmd cmd;
3254 	struct iwm_node *in = (void *)ni;
3255 	int err, s;
3256 	uint32_t status;
3257 	size_t cmdsize;
3258 	struct iwm_rxba_data *rxba = NULL;
3259 	uint8_t baid = 0;
3260 
3261 	s = splnet();
3262 
3263 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3264 		ieee80211_addba_req_refuse(ic, ni, tid);
3265 		splx(s);
3266 		return 0;
3267 	}
3268 
3269 	memset(&cmd, 0, sizeof(cmd));
3270 
3271 	cmd.sta_id = IWM_STATION_ID;
3272 	cmd.mac_id_n_color
3273 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3274 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3275 
3276 	if (start) {
3277 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3278 		cmd.add_immediate_ba_ssn = ssn;
3279 		cmd.rx_ba_window = winsize;
3280 	} else {
3281 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3282 	}
3283 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3284 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3285 
3286 	status = IWM_ADD_STA_SUCCESS;
3287 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3288 		cmdsize = sizeof(cmd);
3289 	else
3290 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3291 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3292 	    &status);
3293 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3294 		err = EIO;
3295 	if (err) {
3296 		if (start)
3297 			ieee80211_addba_req_refuse(ic, ni, tid);
3298 		splx(s);
3299 		return err;
3300 	}
3301 
3302 	if (sc->sc_mqrx_supported) {
3303 		/* Deaggregation is done in hardware. */
3304 		if (start) {
3305 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3306 				ieee80211_addba_req_refuse(ic, ni, tid);
3307 				splx(s);
3308 				return EIO;
3309 			}
3310 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3311 			    IWM_ADD_STA_BAID_SHIFT;
3312 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3313 			    baid >= nitems(sc->sc_rxba_data)) {
3314 				ieee80211_addba_req_refuse(ic, ni, tid);
3315 				splx(s);
3316 				return EIO;
3317 			}
3318 			rxba = &sc->sc_rxba_data[baid];
3319 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3320 				ieee80211_addba_req_refuse(ic, ni, tid);
3321 				splx(s);
3322 				return 0;
3323 			}
3324 			rxba->sta_id = IWM_STATION_ID;
3325 			rxba->tid = tid;
3326 			rxba->baid = baid;
3327 			rxba->timeout = timeout_val;
3328 			getmicrouptime(&rxba->last_rx);
3329 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3330 			    winsize);
3331 			if (timeout_val != 0) {
3332 				struct ieee80211_rx_ba *ba;
3333 				timeout_add_usec(&rxba->session_timer,
3334 				    timeout_val);
3335 				/* XXX disable net80211's BA timeout handler */
3336 				ba = &ni->ni_rx_ba[tid];
3337 				ba->ba_timeout_val = 0;
3338 			}
3339 		} else {
3340 			int i;
3341 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3342 				rxba = &sc->sc_rxba_data[i];
3343 				if (rxba->baid ==
3344 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3345 					continue;
3346 				if (rxba->tid != tid)
3347 					continue;
3348 				iwm_clear_reorder_buffer(sc, rxba);
3349 				break;
3350 			}
3351 		}
3352 	}
3353 
3354 	if (start) {
3355 		sc->sc_rx_ba_sessions++;
3356 		ieee80211_addba_req_accept(ic, ni, tid);
3357 	} else if (sc->sc_rx_ba_sessions > 0)
3358 		sc->sc_rx_ba_sessions--;
3359 
3360 	splx(s);
3361 	return 0;
3362 }
3363 
3364 void
3365 iwm_mac_ctxt_task(void *arg)
3366 {
3367 	struct iwm_softc *sc = arg;
3368 	struct ieee80211com *ic = &sc->sc_ic;
3369 	struct iwm_node *in = (void *)ic->ic_bss;
3370 	int err, s = splnet();
3371 
3372 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3373 	    ic->ic_state != IEEE80211_S_RUN) {
3374 		refcnt_rele_wake(&sc->task_refs);
3375 		splx(s);
3376 		return;
3377 	}
3378 
3379 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3380 	if (err)
3381 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3382 
3383 	refcnt_rele_wake(&sc->task_refs);
3384 	splx(s);
3385 }
3386 
3387 void
3388 iwm_updateprot(struct ieee80211com *ic)
3389 {
3390 	struct iwm_softc *sc = ic->ic_softc;
3391 
3392 	if (ic->ic_state == IEEE80211_S_RUN &&
3393 	    !task_pending(&sc->newstate_task))
3394 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3395 }
3396 
3397 void
3398 iwm_updateslot(struct ieee80211com *ic)
3399 {
3400 	struct iwm_softc *sc = ic->ic_softc;
3401 
3402 	if (ic->ic_state == IEEE80211_S_RUN &&
3403 	    !task_pending(&sc->newstate_task))
3404 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3405 }
3406 
3407 void
3408 iwm_updateedca(struct ieee80211com *ic)
3409 {
3410 	struct iwm_softc *sc = ic->ic_softc;
3411 
3412 	if (ic->ic_state == IEEE80211_S_RUN &&
3413 	    !task_pending(&sc->newstate_task))
3414 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3415 }
3416 
3417 void
3418 iwm_phy_ctxt_task(void *arg)
3419 {
3420 	struct iwm_softc *sc = arg;
3421 	struct ieee80211com *ic = &sc->sc_ic;
3422 	struct iwm_node *in = (void *)ic->ic_bss;
3423 	struct ieee80211_node *ni = &in->in_ni;
3424 	uint8_t chains, sco;
3425 	int err, s = splnet();
3426 
3427 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3428 	    ic->ic_state != IEEE80211_S_RUN ||
3429 	    in->in_phyctxt == NULL) {
3430 		refcnt_rele_wake(&sc->task_refs);
3431 		splx(s);
3432 		return;
3433 	}
3434 
3435 	chains = iwm_mimo_enabled(sc) ? 2 : 1;
3436 	if (ieee80211_node_supports_ht_chan40(ni))
3437 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3438 	else
3439 		sco = IEEE80211_HTOP0_SCO_SCN;
3440 	if (in->in_phyctxt->sco != sco) {
3441 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3442 		    in->in_phyctxt->channel, chains, chains, 0, sco);
3443 		if (err)
3444 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3445 		iwm_setrates(in, 0);
3446 	}
3447 
3448 	refcnt_rele_wake(&sc->task_refs);
3449 	splx(s);
3450 }
3451 
3452 void
3453 iwm_updatechan(struct ieee80211com *ic)
3454 {
3455 	struct iwm_softc *sc = ic->ic_softc;
3456 
3457 	if (ic->ic_state == IEEE80211_S_RUN &&
3458 	    !task_pending(&sc->newstate_task))
3459 		iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3460 }
3461 
3462 int
3463 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3464     uint16_t ssn, uint16_t winsize, int start)
3465 {
3466 	struct iwm_add_sta_cmd cmd;
3467 	struct ieee80211com *ic = &sc->sc_ic;
3468 	struct iwm_node *in = (void *)ni;
3469 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3470 	struct iwm_tx_ring *ring;
3471 	enum ieee80211_edca_ac ac;
3472 	int fifo;
3473 	uint32_t status;
3474 	int err;
3475 	size_t cmdsize;
3476 
3477 	/* Ensure we can map this TID to an aggregation queue. */
3478 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3479 		return ENOSPC;
3480 
3481 	if (start) {
3482 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3483 			return 0;
3484 	} else {
3485 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3486 			return 0;
3487 	}
3488 
3489 	ring = &sc->txq[qid];
3490 	ac = iwm_tid_to_ac[tid];
3491 	fifo = iwm_ac_to_tx_fifo[ac];
3492 
3493 	memset(&cmd, 0, sizeof(cmd));
3494 
3495 	cmd.sta_id = IWM_STATION_ID;
3496 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3497 	    in->in_color));
3498 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3499 
3500 	if (start) {
3501 		/* Enable Tx aggregation for this queue. */
3502 		in->tid_disable_ampdu &= ~(1 << tid);
3503 		in->tfd_queue_msk |= (1 << qid);
3504 	} else {
3505 		in->tid_disable_ampdu |= (1 << tid);
3506 		/*
3507 		 * Queue remains enabled in the TFD queue mask
3508 		 * until we leave RUN state.
3509 		 */
3510 		err = iwm_flush_sta(sc, in);
3511 		if (err)
3512 			return err;
3513 	}
3514 
3515 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3516 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3517 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3518 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3519 
3520 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3521 		if (!iwm_nic_lock(sc)) {
3522 			if (start)
3523 				ieee80211_addba_resp_refuse(ic, ni, tid,
3524 				    IEEE80211_STATUS_UNSPECIFIED);
3525 			return EBUSY;
3526 		}
3527 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3528 		    ssn);
3529 		iwm_nic_unlock(sc);
3530 		if (err) {
3531 			printf("%s: could not enable Tx queue %d (error %d)\n",
3532 			    DEVNAME(sc), qid, err);
3533 			if (start)
3534 				ieee80211_addba_resp_refuse(ic, ni, tid,
3535 				    IEEE80211_STATUS_UNSPECIFIED);
3536 			return err;
3537 		}
3538 		/*
3539 		 * If iwm_enable_txq() employed the SCD hardware bug
3540 		 * workaround we must skip the frame with seqnum SSN.
3541 		 */
3542 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3543 			ssn = (ssn + 1) & 0xfff;
3544 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3545 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3546 			ni->ni_qos_txseqs[tid] = ssn;
3547 		}
3548 	}
3549 
3550 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3551 		cmdsize = sizeof(cmd);
3552 	else
3553 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3554 
3555 	status = 0;
3556 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3557 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3558 		err = EIO;
3559 	if (err) {
3560 		printf("%s: could not update sta (error %d)\n",
3561 		    DEVNAME(sc), err);
3562 		if (start)
3563 			ieee80211_addba_resp_refuse(ic, ni, tid,
3564 			    IEEE80211_STATUS_UNSPECIFIED);
3565 		return err;
3566 	}
3567 
3568 	if (start) {
3569 		sc->tx_ba_queue_mask |= (1 << qid);
3570 		ieee80211_addba_resp_accept(ic, ni, tid);
3571 	} else {
3572 		sc->tx_ba_queue_mask &= ~(1 << qid);
3573 
3574 		/*
3575 		 * Clear pending frames but keep the queue enabled.
3576 		 * Firmware panics if we disable the queue here.
3577 		 */
3578 		iwm_txq_advance(sc, ring, ring->cur);
3579 		iwm_clear_oactive(sc, ring);
3580 	}
3581 
3582 	return 0;
3583 }
3584 
3585 void
3586 iwm_ba_task(void *arg)
3587 {
3588 	struct iwm_softc *sc = arg;
3589 	struct ieee80211com *ic = &sc->sc_ic;
3590 	struct ieee80211_node *ni = ic->ic_bss;
3591 	int s = splnet();
3592 	int tid, err = 0;
3593 
3594 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3595 	    ic->ic_state != IEEE80211_S_RUN) {
3596 		refcnt_rele_wake(&sc->task_refs);
3597 		splx(s);
3598 		return;
3599 	}
3600 
3601 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3602 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3603 			break;
3604 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3605 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3606 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3607 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3608 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3609 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3610 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3611 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3612 		}
3613 	}
3614 
3615 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3616 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3617 			break;
3618 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3619 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3620 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3621 			    ba->ba_winsize, 1);
3622 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3623 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3624 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3625 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3626 		}
3627 	}
3628 
3629 	/*
3630 	 * We "recover" from failure to start or stop a BA session
3631 	 * by resetting the device.
3632 	 */
3633 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3634 		task_add(systq, &sc->init_task);
3635 
3636 	refcnt_rele_wake(&sc->task_refs);
3637 	splx(s);
3638 }
3639 
3640 /*
3641  * This function is called by upper layer when an ADDBA request is received
3642  * from another STA and before the ADDBA response is sent.
3643  */
3644 int
3645 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3646     uint8_t tid)
3647 {
3648 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3649 
3650 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3651 	    tid > IWM_MAX_TID_COUNT)
3652 		return ENOSPC;
3653 
3654 	if (sc->ba_rx.start_tidmask & (1 << tid))
3655 		return EBUSY;
3656 
3657 	sc->ba_rx.start_tidmask |= (1 << tid);
3658 	iwm_add_task(sc, systq, &sc->ba_task);
3659 
3660 	return EBUSY;
3661 }
3662 
3663 /*
3664  * This function is called by upper layer on teardown of an HT-immediate
3665  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3666  */
3667 void
3668 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3669     uint8_t tid)
3670 {
3671 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3672 
3673 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3674 		return;
3675 
3676 	sc->ba_rx.stop_tidmask |= (1 << tid);
3677 	iwm_add_task(sc, systq, &sc->ba_task);
3678 }
3679 
3680 int
3681 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3682     uint8_t tid)
3683 {
3684 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3685 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3686 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3687 
3688 	/* We only implement Tx aggregation with DQA-capable firmware. */
3689 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3690 		return ENOTSUP;
3691 
3692 	/* Ensure we can map this TID to an aggregation queue. */
3693 	if (tid >= IWM_MAX_TID_COUNT)
3694 		return EINVAL;
3695 
3696 	/* We only support a fixed Tx aggregation window size, for now. */
3697 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3698 		return ENOTSUP;
3699 
3700 	/* Is firmware already using Tx aggregation on this queue? */
3701 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3702 		return ENOSPC;
3703 
3704 	/* Are we already processing an ADDBA request? */
3705 	if (sc->ba_tx.start_tidmask & (1 << tid))
3706 		return EBUSY;
3707 
3708 	sc->ba_tx.start_tidmask |= (1 << tid);
3709 	iwm_add_task(sc, systq, &sc->ba_task);
3710 
3711 	return EBUSY;
3712 }
3713 
3714 void
3715 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3716     uint8_t tid)
3717 {
3718 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3719 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3720 
3721 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3722 		return;
3723 
3724 	/* Is firmware currently using Tx aggregation on this queue? */
3725 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3726 		return;
3727 
3728 	sc->ba_tx.stop_tidmask |= (1 << tid);
3729 	iwm_add_task(sc, systq, &sc->ba_task);
3730 }
3731 
3732 void
3733 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3734     const uint16_t *mac_override, const uint16_t *nvm_hw)
3735 {
3736 	const uint8_t *hw_addr;
3737 
3738 	if (mac_override) {
3739 		static const uint8_t reserved_mac[] = {
3740 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3741 		};
3742 
3743 		hw_addr = (const uint8_t *)(mac_override +
3744 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3745 
3746 		/*
3747 		 * Store the MAC address from MAO section.
3748 		 * No byte swapping is required in MAO section
3749 		 */
3750 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3751 
3752 		/*
3753 		 * Force the use of the OTP MAC address in case of reserved MAC
3754 		 * address in the NVM, or if address is given but invalid.
3755 		 */
3756 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3757 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3758 		    sizeof(etherbroadcastaddr)) != 0) &&
3759 		    (memcmp(etheranyaddr, data->hw_addr,
3760 		    sizeof(etheranyaddr)) != 0) &&
3761 		    !ETHER_IS_MULTICAST(data->hw_addr))
3762 			return;
3763 	}
3764 
3765 	if (nvm_hw) {
3766 		/* Read the mac address from WFMP registers. */
3767 		uint32_t mac_addr0, mac_addr1;
3768 
3769 		if (!iwm_nic_lock(sc))
3770 			goto out;
3771 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3772 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3773 		iwm_nic_unlock(sc);
3774 
3775 		hw_addr = (const uint8_t *)&mac_addr0;
3776 		data->hw_addr[0] = hw_addr[3];
3777 		data->hw_addr[1] = hw_addr[2];
3778 		data->hw_addr[2] = hw_addr[1];
3779 		data->hw_addr[3] = hw_addr[0];
3780 
3781 		hw_addr = (const uint8_t *)&mac_addr1;
3782 		data->hw_addr[4] = hw_addr[1];
3783 		data->hw_addr[5] = hw_addr[0];
3784 
3785 		return;
3786 	}
3787 out:
3788 	printf("%s: mac address not found\n", DEVNAME(sc));
3789 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3790 }
3791 
3792 int
3793 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3794     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3795     const uint16_t *mac_override, const uint16_t *phy_sku,
3796     const uint16_t *regulatory, int n_regulatory)
3797 {
3798 	struct iwm_nvm_data *data = &sc->sc_nvm;
3799 	uint8_t hw_addr[ETHER_ADDR_LEN];
3800 	uint32_t sku;
3801 	uint16_t lar_config;
3802 
3803 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3804 
3805 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3806 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3807 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3808 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3809 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3810 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3811 
3812 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3813 	} else {
3814 		uint32_t radio_cfg =
3815 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3816 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3817 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3818 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3819 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3820 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3821 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3822 
3823 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3824 	}
3825 
3826 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3827 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3828 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3829 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3830 
3831 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3832 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3833 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3834 				       IWM_NVM_LAR_OFFSET_8000;
3835 
3836 		lar_config = le16_to_cpup(regulatory + lar_offset);
3837 		data->lar_enabled = !!(lar_config &
3838 				       IWM_NVM_LAR_ENABLED_8000);
3839 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3840 	} else
3841 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3842 
3843 
3844 	/* The byte order is little endian 16 bit, meaning 214365 */
3845 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3846 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3847 		data->hw_addr[0] = hw_addr[1];
3848 		data->hw_addr[1] = hw_addr[0];
3849 		data->hw_addr[2] = hw_addr[3];
3850 		data->hw_addr[3] = hw_addr[2];
3851 		data->hw_addr[4] = hw_addr[5];
3852 		data->hw_addr[5] = hw_addr[4];
3853 	} else
3854 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3855 
3856 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3857 		if (sc->nvm_type == IWM_NVM_SDP) {
3858 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3859 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3860 		} else {
3861 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3862 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3863 		}
3864 	} else
3865 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3866 		    iwm_nvm_channels_8000,
3867 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3868 
3869 	data->calib_version = 255;   /* TODO:
3870 					this value will prevent some checks from
3871 					failing, we need to check if this
3872 					field is still needed, and if it does,
3873 					where is it in the NVM */
3874 
3875 	return 0;
3876 }
3877 
3878 int
3879 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3880 {
3881 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3882 	const uint16_t *regulatory = NULL;
3883 	int n_regulatory = 0;
3884 
3885 	/* Checking for required sections */
3886 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3887 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3888 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3889 			return ENOENT;
3890 		}
3891 
3892 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3893 
3894 		if (sc->nvm_type == IWM_NVM_SDP) {
3895 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3896 				return ENOENT;
3897 			regulatory = (const uint16_t *)
3898 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3899 			n_regulatory =
3900 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3901 		}
3902 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3903 		/* SW and REGULATORY sections are mandatory */
3904 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3905 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3906 			return ENOENT;
3907 		}
3908 		/* MAC_OVERRIDE or at least HW section must exist */
3909 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3910 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3911 			return ENOENT;
3912 		}
3913 
3914 		/* PHY_SKU section is mandatory in B0 */
3915 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3916 			return ENOENT;
3917 		}
3918 
3919 		regulatory = (const uint16_t *)
3920 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3921 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3922 		hw = (const uint16_t *)
3923 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3924 		mac_override =
3925 			(const uint16_t *)
3926 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3927 		phy_sku = (const uint16_t *)
3928 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3929 	} else {
3930 		panic("unknown device family %d", sc->sc_device_family);
3931 	}
3932 
3933 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3934 	calib = (const uint16_t *)
3935 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3936 
3937 	/* XXX should pass in the length of every section */
3938 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3939 	    phy_sku, regulatory, n_regulatory);
3940 }
3941 
3942 int
3943 iwm_nvm_init(struct iwm_softc *sc)
3944 {
3945 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3946 	int i, section, err;
3947 	uint16_t len;
3948 	uint8_t *buf;
3949 	const size_t bufsz = sc->sc_nvm_max_section_size;
3950 
3951 	memset(nvm_sections, 0, sizeof(nvm_sections));
3952 
3953 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3954 	if (buf == NULL)
3955 		return ENOMEM;
3956 
3957 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
3958 		section = iwm_nvm_to_read[i];
3959 		KASSERT(section <= nitems(nvm_sections));
3960 
3961 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3962 		if (err) {
3963 			err = 0;
3964 			continue;
3965 		}
3966 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3967 		if (nvm_sections[section].data == NULL) {
3968 			err = ENOMEM;
3969 			break;
3970 		}
3971 		memcpy(nvm_sections[section].data, buf, len);
3972 		nvm_sections[section].length = len;
3973 	}
3974 	free(buf, M_DEVBUF, bufsz);
3975 	if (err == 0)
3976 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3977 
3978 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3979 		if (nvm_sections[i].data != NULL)
3980 			free(nvm_sections[i].data, M_DEVBUF,
3981 			    nvm_sections[i].length);
3982 	}
3983 
3984 	return err;
3985 }
3986 
3987 int
3988 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3989     const uint8_t *section, uint32_t byte_cnt)
3990 {
3991 	int err = EINVAL;
3992 	uint32_t chunk_sz, offset;
3993 
3994 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3995 
3996 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3997 		uint32_t addr, len;
3998 		const uint8_t *data;
3999 
4000 		addr = dst_addr + offset;
4001 		len = MIN(chunk_sz, byte_cnt - offset);
4002 		data = section + offset;
4003 
4004 		err = iwm_firmware_load_chunk(sc, addr, data, len);
4005 		if (err)
4006 			break;
4007 	}
4008 
4009 	return err;
4010 }
4011 
4012 int
4013 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4014     const uint8_t *chunk, uint32_t byte_cnt)
4015 {
4016 	struct iwm_dma_info *dma = &sc->fw_dma;
4017 	int err;
4018 
4019 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
4020 	memcpy(dma->vaddr, chunk, byte_cnt);
4021 	bus_dmamap_sync(sc->sc_dmat,
4022 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
4023 
4024 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4025 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4026 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4027 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4028 		if (err)
4029 			return err;
4030 	}
4031 
4032 	sc->sc_fw_chunk_done = 0;
4033 
4034 	if (!iwm_nic_lock(sc))
4035 		return EBUSY;
4036 
4037 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4038 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4039 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4040 	    dst_addr);
4041 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4042 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4043 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4044 	    (iwm_get_dma_hi_addr(dma->paddr)
4045 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4046 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4047 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4048 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4049 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4050 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4051 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
4052 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4053 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4054 
4055 	iwm_nic_unlock(sc);
4056 
4057 	/* Wait for this segment to load. */
4058 	err = 0;
4059 	while (!sc->sc_fw_chunk_done) {
4060 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4061 		if (err)
4062 			break;
4063 	}
4064 
4065 	if (!sc->sc_fw_chunk_done)
4066 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4067 		    DEVNAME(sc), dst_addr, byte_cnt);
4068 
4069 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4070 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4071 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4072 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4073 		if (!err)
4074 			err = err2;
4075 	}
4076 
4077 	return err;
4078 }
4079 
4080 int
4081 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4082 {
4083 	struct iwm_fw_sects *fws;
4084 	int err, i;
4085 	void *data;
4086 	uint32_t dlen;
4087 	uint32_t offset;
4088 
4089 	fws = &sc->sc_fw.fw_sects[ucode_type];
4090 	for (i = 0; i < fws->fw_count; i++) {
4091 		data = fws->fw_sect[i].fws_data;
4092 		dlen = fws->fw_sect[i].fws_len;
4093 		offset = fws->fw_sect[i].fws_devoff;
4094 		if (dlen > sc->sc_fwdmasegsz) {
4095 			err = EFBIG;
4096 		} else
4097 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4098 		if (err) {
4099 			printf("%s: could not load firmware chunk %u of %u\n",
4100 			    DEVNAME(sc), i, fws->fw_count);
4101 			return err;
4102 		}
4103 	}
4104 
4105 	iwm_enable_interrupts(sc);
4106 
4107 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
4108 
4109 	return 0;
4110 }
4111 
4112 int
4113 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4114     int cpu, int *first_ucode_section)
4115 {
4116 	int shift_param;
4117 	int i, err = 0, sec_num = 0x1;
4118 	uint32_t val, last_read_idx = 0;
4119 	void *data;
4120 	uint32_t dlen;
4121 	uint32_t offset;
4122 
4123 	if (cpu == 1) {
4124 		shift_param = 0;
4125 		*first_ucode_section = 0;
4126 	} else {
4127 		shift_param = 16;
4128 		(*first_ucode_section)++;
4129 	}
4130 
4131 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
4132 		last_read_idx = i;
4133 		data = fws->fw_sect[i].fws_data;
4134 		dlen = fws->fw_sect[i].fws_len;
4135 		offset = fws->fw_sect[i].fws_devoff;
4136 
4137 		/*
4138 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4139 		 * CPU1 to CPU2.
4140 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
4141 		 * CPU2 non paged to CPU2 paging sec.
4142 		 */
4143 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
4144 		    offset == IWM_PAGING_SEPARATOR_SECTION)
4145 			break;
4146 
4147 		if (dlen > sc->sc_fwdmasegsz) {
4148 			err = EFBIG;
4149 		} else
4150 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4151 		if (err) {
4152 			printf("%s: could not load firmware chunk %d "
4153 			    "(error %d)\n", DEVNAME(sc), i, err);
4154 			return err;
4155 		}
4156 
4157 		/* Notify the ucode of the loaded section number and status */
4158 		if (iwm_nic_lock(sc)) {
4159 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4160 			val = val | (sec_num << shift_param);
4161 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4162 			sec_num = (sec_num << 1) | 0x1;
4163 			iwm_nic_unlock(sc);
4164 		} else {
4165 			err = EBUSY;
4166 			printf("%s: could not load firmware chunk %d "
4167 			    "(error %d)\n", DEVNAME(sc), i, err);
4168 			return err;
4169 		}
4170 	}
4171 
4172 	*first_ucode_section = last_read_idx;
4173 
4174 	if (iwm_nic_lock(sc)) {
4175 		if (cpu == 1)
4176 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4177 		else
4178 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4179 		iwm_nic_unlock(sc);
4180 	} else {
4181 		err = EBUSY;
4182 		printf("%s: could not finalize firmware loading (error %d)\n",
4183 		    DEVNAME(sc), err);
4184 		return err;
4185 	}
4186 
4187 	return 0;
4188 }
4189 
4190 int
4191 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4192 {
4193 	struct iwm_fw_sects *fws;
4194 	int err = 0;
4195 	int first_ucode_section;
4196 
4197 	fws = &sc->sc_fw.fw_sects[ucode_type];
4198 
4199 	/* configure the ucode to be ready to get the secured image */
4200 	/* release CPU reset */
4201 	if (iwm_nic_lock(sc)) {
4202 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4203 		    IWM_RELEASE_CPU_RESET_BIT);
4204 		iwm_nic_unlock(sc);
4205 	}
4206 
4207 	/* load to FW the binary Secured sections of CPU1 */
4208 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4209 	if (err)
4210 		return err;
4211 
4212 	/* load to FW the binary sections of CPU2 */
4213 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4214 	if (err)
4215 		return err;
4216 
4217 	iwm_enable_interrupts(sc);
4218 	return 0;
4219 }
4220 
4221 int
4222 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4223 {
4224 	int err;
4225 
4226 	splassert(IPL_NET);
4227 
4228 	sc->sc_uc.uc_intr = 0;
4229 	sc->sc_uc.uc_ok = 0;
4230 
4231 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4232 		err = iwm_load_firmware_8000(sc, ucode_type);
4233 	else
4234 		err = iwm_load_firmware_7000(sc, ucode_type);
4235 
4236 	if (err)
4237 		return err;
4238 
4239 	/* wait for the firmware to load */
4240 	err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4241 	if (err || !sc->sc_uc.uc_ok)
4242 		printf("%s: could not load firmware\n", DEVNAME(sc));
4243 
4244 	return err;
4245 }
4246 
4247 int
4248 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4249 {
4250 	int err;
4251 
4252 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4253 
4254 	err = iwm_nic_init(sc);
4255 	if (err) {
4256 		printf("%s: unable to init nic\n", DEVNAME(sc));
4257 		return err;
4258 	}
4259 
4260 	/* make sure rfkill handshake bits are cleared */
4261 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4262 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4263 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4264 
4265 	/* clear (again), then enable firwmare load interrupt */
4266 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4267 	iwm_enable_fwload_interrupt(sc);
4268 
4269 	/* really make sure rfkill handshake bits are cleared */
4270 	/* maybe we should write a few times more?  just to make sure */
4271 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4272 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4273 
4274 	return iwm_load_firmware(sc, ucode_type);
4275 }
4276 
4277 int
4278 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4279 {
4280 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4281 		.valid = htole32(valid_tx_ant),
4282 	};
4283 
4284 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4285 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4286 }
4287 
4288 int
4289 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4290 {
4291 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4292 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4293 
4294 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4295 	phy_cfg_cmd.calib_control.event_trigger =
4296 	    sc->sc_default_calib[ucode_type].event_trigger;
4297 	phy_cfg_cmd.calib_control.flow_trigger =
4298 	    sc->sc_default_calib[ucode_type].flow_trigger;
4299 
4300 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4301 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4302 }
4303 
4304 int
4305 iwm_send_dqa_cmd(struct iwm_softc *sc)
4306 {
4307 	struct iwm_dqa_enable_cmd dqa_cmd = {
4308 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4309 	};
4310 	uint32_t cmd_id;
4311 
4312 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4313 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4314 }
4315 
4316 int
4317 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4318 	enum iwm_ucode_type ucode_type)
4319 {
4320 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4321 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4322 	int err;
4323 
4324 	err = iwm_read_firmware(sc);
4325 	if (err)
4326 		return err;
4327 
4328 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4329 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4330 	else
4331 		sc->cmdqid = IWM_CMD_QUEUE;
4332 
4333 	sc->sc_uc_current = ucode_type;
4334 	err = iwm_start_fw(sc, ucode_type);
4335 	if (err) {
4336 		sc->sc_uc_current = old_type;
4337 		return err;
4338 	}
4339 
4340 	err = iwm_post_alive(sc);
4341 	if (err)
4342 		return err;
4343 
4344 	/*
4345 	 * configure and operate fw paging mechanism.
4346 	 * driver configures the paging flow only once, CPU2 paging image
4347 	 * included in the IWM_UCODE_INIT image.
4348 	 */
4349 	if (fw->paging_mem_size) {
4350 		err = iwm_save_fw_paging(sc, fw);
4351 		if (err) {
4352 			printf("%s: failed to save the FW paging image\n",
4353 			    DEVNAME(sc));
4354 			return err;
4355 		}
4356 
4357 		err = iwm_send_paging_cmd(sc, fw);
4358 		if (err) {
4359 			printf("%s: failed to send the paging cmd\n",
4360 			    DEVNAME(sc));
4361 			iwm_free_fw_paging(sc);
4362 			return err;
4363 		}
4364 	}
4365 
4366 	return 0;
4367 }
4368 
4369 int
4370 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4371 {
4372 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4373 	int err, s;
4374 
4375 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4376 		printf("%s: radio is disabled by hardware switch\n",
4377 		    DEVNAME(sc));
4378 		return EPERM;
4379 	}
4380 
4381 	s = splnet();
4382 	sc->sc_init_complete = 0;
4383 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4384 	if (err) {
4385 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4386 		splx(s);
4387 		return err;
4388 	}
4389 
4390 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4391 		err = iwm_send_bt_init_conf(sc);
4392 		if (err) {
4393 			printf("%s: could not init bt coex (error %d)\n",
4394 			    DEVNAME(sc), err);
4395 			splx(s);
4396 			return err;
4397 		}
4398 	}
4399 
4400 	if (justnvm) {
4401 		err = iwm_nvm_init(sc);
4402 		if (err) {
4403 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4404 			splx(s);
4405 			return err;
4406 		}
4407 
4408 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4409 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4410 			    sc->sc_nvm.hw_addr);
4411 
4412 		splx(s);
4413 		return 0;
4414 	}
4415 
4416 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4417 	if (err) {
4418 		splx(s);
4419 		return err;
4420 	}
4421 
4422 	/* Send TX valid antennas before triggering calibrations */
4423 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4424 	if (err) {
4425 		splx(s);
4426 		return err;
4427 	}
4428 
4429 	/*
4430 	 * Send phy configurations command to init uCode
4431 	 * to start the 16.0 uCode init image internal calibrations.
4432 	 */
4433 	err = iwm_send_phy_cfg_cmd(sc);
4434 	if (err) {
4435 		splx(s);
4436 		return err;
4437 	}
4438 
4439 	/*
4440 	 * Nothing to do but wait for the init complete and phy DB
4441 	 * notifications from the firmware.
4442 	 */
4443 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4444 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4445 		    SEC_TO_NSEC(2));
4446 		if (err)
4447 			break;
4448 	}
4449 
4450 	splx(s);
4451 	return err;
4452 }
4453 
4454 int
4455 iwm_config_ltr(struct iwm_softc *sc)
4456 {
4457 	struct iwm_ltr_config_cmd cmd = {
4458 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4459 	};
4460 
4461 	if (!sc->sc_ltr_enabled)
4462 		return 0;
4463 
4464 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4465 }
4466 
4467 int
4468 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4469 {
4470 	struct iwm_rx_ring *ring = &sc->rxq;
4471 	struct iwm_rx_data *data = &ring->data[idx];
4472 	struct mbuf *m;
4473 	int err;
4474 	int fatal = 0;
4475 
4476 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4477 	if (m == NULL)
4478 		return ENOBUFS;
4479 
4480 	if (size <= MCLBYTES) {
4481 		MCLGET(m, M_DONTWAIT);
4482 	} else {
4483 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4484 	}
4485 	if ((m->m_flags & M_EXT) == 0) {
4486 		m_freem(m);
4487 		return ENOBUFS;
4488 	}
4489 
4490 	if (data->m != NULL) {
4491 		bus_dmamap_unload(sc->sc_dmat, data->map);
4492 		fatal = 1;
4493 	}
4494 
4495 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4496 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4497 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4498 	if (err) {
4499 		/* XXX */
4500 		if (fatal)
4501 			panic("iwm: could not load RX mbuf");
4502 		m_freem(m);
4503 		return err;
4504 	}
4505 	data->m = m;
4506 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4507 
4508 	/* Update RX descriptor. */
4509 	if (sc->sc_mqrx_supported) {
4510 		((uint64_t *)ring->desc)[idx] =
4511 		    htole64(data->map->dm_segs[0].ds_addr);
4512 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4513 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4514 		    BUS_DMASYNC_PREWRITE);
4515 	} else {
4516 		((uint32_t *)ring->desc)[idx] =
4517 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4518 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4519 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4520 		    BUS_DMASYNC_PREWRITE);
4521 	}
4522 
4523 	return 0;
4524 }
4525 
4526 /*
4527  * RSSI values are reported by the FW as positive values - need to negate
4528  * to obtain their dBM.  Account for missing antennas by replacing 0
4529  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4530  */
4531 int
4532 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4533 {
4534 	int energy_a, energy_b, energy_c, max_energy;
4535 	uint32_t val;
4536 
4537 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4538 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4539 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4540 	energy_a = energy_a ? -energy_a : -256;
4541 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4542 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4543 	energy_b = energy_b ? -energy_b : -256;
4544 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4545 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4546 	energy_c = energy_c ? -energy_c : -256;
4547 	max_energy = MAX(energy_a, energy_b);
4548 	max_energy = MAX(max_energy, energy_c);
4549 
4550 	return max_energy;
4551 }
4552 
4553 int
4554 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4555     struct iwm_rx_mpdu_desc *desc)
4556 {
4557 	int energy_a, energy_b;
4558 
4559 	energy_a = desc->v1.energy_a;
4560 	energy_b = desc->v1.energy_b;
4561 	energy_a = energy_a ? -energy_a : -256;
4562 	energy_b = energy_b ? -energy_b : -256;
4563 	return MAX(energy_a, energy_b);
4564 }
4565 
4566 void
4567 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4568     struct iwm_rx_data *data)
4569 {
4570 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4571 
4572 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4573 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4574 
4575 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4576 }
4577 
4578 /*
4579  * Retrieve the average noise (in dBm) among receivers.
4580  */
4581 int
4582 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4583 {
4584 	int i, total, nbant, noise;
4585 
4586 	total = nbant = noise = 0;
4587 	for (i = 0; i < 3; i++) {
4588 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4589 		if (noise) {
4590 			total += noise;
4591 			nbant++;
4592 		}
4593 	}
4594 
4595 	/* There should be at least one antenna but check anyway. */
4596 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4597 }
4598 
4599 int
4600 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4601     struct ieee80211_rxinfo *rxi)
4602 {
4603 	struct ieee80211com *ic = &sc->sc_ic;
4604 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4605 	struct ieee80211_frame *wh;
4606 	uint64_t pn, *prsc;
4607 	uint8_t *ivp;
4608 	uint8_t tid;
4609 	int hdrlen, hasqos;
4610 
4611 	wh = mtod(m, struct ieee80211_frame *);
4612 	hdrlen = ieee80211_get_hdrlen(wh);
4613 	ivp = (uint8_t *)wh + hdrlen;
4614 
4615 	/* Check that ExtIV bit is set. */
4616 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4617 		return 1;
4618 
4619 	hasqos = ieee80211_has_qos(wh);
4620 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4621 	prsc = &k->k_rsc[tid];
4622 
4623 	/* Extract the 48-bit PN from the CCMP header. */
4624 	pn = (uint64_t)ivp[0]       |
4625 	     (uint64_t)ivp[1] <<  8 |
4626 	     (uint64_t)ivp[4] << 16 |
4627 	     (uint64_t)ivp[5] << 24 |
4628 	     (uint64_t)ivp[6] << 32 |
4629 	     (uint64_t)ivp[7] << 40;
4630 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4631 		if (pn < *prsc) {
4632 			ic->ic_stats.is_ccmp_replays++;
4633 			return 1;
4634 		}
4635 	} else if (pn <= *prsc) {
4636 		ic->ic_stats.is_ccmp_replays++;
4637 		return 1;
4638 	}
4639 	/* Last seen packet number is updated in ieee80211_inputm(). */
4640 
4641 	/*
4642 	 * Some firmware versions strip the MIC, and some don't. It is not
4643 	 * clear which of the capability flags could tell us what to expect.
4644 	 * For now, keep things simple and just leave the MIC in place if
4645 	 * it is present.
4646 	 *
4647 	 * The IV will be stripped by ieee80211_inputm().
4648 	 */
4649 	return 0;
4650 }
4651 
4652 int
4653 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4654     struct ieee80211_rxinfo *rxi)
4655 {
4656 	struct ieee80211com *ic = &sc->sc_ic;
4657 	struct ifnet *ifp = IC2IFP(ic);
4658 	struct ieee80211_frame *wh;
4659 	struct ieee80211_node *ni;
4660 	int ret = 0;
4661 	uint8_t type, subtype;
4662 
4663 	wh = mtod(m, struct ieee80211_frame *);
4664 
4665 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4666 	if (type == IEEE80211_FC0_TYPE_CTL)
4667 		return 0;
4668 
4669 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4670 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4671 		return 0;
4672 
4673 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4674 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4675 		return 0;
4676 
4677 	ni = ieee80211_find_rxnode(ic, wh);
4678 	/* Handle hardware decryption. */
4679 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4680 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4681 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4682 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4683 			ic->ic_stats.is_ccmp_dec_errs++;
4684 			ret = 1;
4685 			goto out;
4686 		}
4687 		/* Check whether decryption was successful or not. */
4688 		if ((rx_pkt_status &
4689 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4690 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4691 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4692 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4693 			ic->ic_stats.is_ccmp_dec_errs++;
4694 			ret = 1;
4695 			goto out;
4696 		}
4697 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4698 	}
4699 out:
4700 	if (ret)
4701 		ifp->if_ierrors++;
4702 	ieee80211_release_node(ic, ni);
4703 	return ret;
4704 }
4705 
4706 void
4707 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4708     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4709     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4710     struct mbuf_list *ml)
4711 {
4712 	struct ieee80211com *ic = &sc->sc_ic;
4713 	struct ifnet *ifp = IC2IFP(ic);
4714 	struct ieee80211_frame *wh;
4715 	struct ieee80211_node *ni;
4716 	struct ieee80211_channel *bss_chan;
4717 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
4718 
4719 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4720 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4721 
4722 	wh = mtod(m, struct ieee80211_frame *);
4723 	ni = ieee80211_find_rxnode(ic, wh);
4724 	if (ni == ic->ic_bss) {
4725 		/*
4726 		 * We may switch ic_bss's channel during scans.
4727 		 * Record the current channel so we can restore it later.
4728 		 */
4729 		bss_chan = ni->ni_chan;
4730 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
4731 	}
4732 	ni->ni_chan = &ic->ic_channels[chanidx];
4733 
4734 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4735 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4736 		ifp->if_ierrors++;
4737 		m_freem(m);
4738 		ieee80211_release_node(ic, ni);
4739 		return;
4740 	}
4741 
4742 #if NBPFILTER > 0
4743 	if (sc->sc_drvbpf != NULL) {
4744 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4745 		uint16_t chan_flags;
4746 
4747 		tap->wr_flags = 0;
4748 		if (is_shortpre)
4749 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4750 		tap->wr_chan_freq =
4751 		    htole16(ic->ic_channels[chanidx].ic_freq);
4752 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4753 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4754 			chan_flags &= ~IEEE80211_CHAN_HT;
4755 		tap->wr_chan_flags = htole16(chan_flags);
4756 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4757 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4758 		tap->wr_tsft = device_timestamp;
4759 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4760 			uint8_t mcs = (rate_n_flags &
4761 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4762 			    IWM_RATE_HT_MCS_NSS_MSK));
4763 			tap->wr_rate = (0x80 | mcs);
4764 		} else {
4765 			uint8_t rate = (rate_n_flags &
4766 			    IWM_RATE_LEGACY_RATE_MSK);
4767 			switch (rate) {
4768 			/* CCK rates. */
4769 			case  10: tap->wr_rate =   2; break;
4770 			case  20: tap->wr_rate =   4; break;
4771 			case  55: tap->wr_rate =  11; break;
4772 			case 110: tap->wr_rate =  22; break;
4773 			/* OFDM rates. */
4774 			case 0xd: tap->wr_rate =  12; break;
4775 			case 0xf: tap->wr_rate =  18; break;
4776 			case 0x5: tap->wr_rate =  24; break;
4777 			case 0x7: tap->wr_rate =  36; break;
4778 			case 0x9: tap->wr_rate =  48; break;
4779 			case 0xb: tap->wr_rate =  72; break;
4780 			case 0x1: tap->wr_rate =  96; break;
4781 			case 0x3: tap->wr_rate = 108; break;
4782 			/* Unknown rate: should not happen. */
4783 			default:  tap->wr_rate =   0;
4784 			}
4785 		}
4786 
4787 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4788 		    m, BPF_DIRECTION_IN);
4789 	}
4790 #endif
4791 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4792 	/*
4793 	 * ieee80211_inputm() might have changed our BSS.
4794 	 * Restore ic_bss's channel if we are still in the same BSS.
4795 	 */
4796 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
4797 		ni->ni_chan = bss_chan;
4798 	ieee80211_release_node(ic, ni);
4799 }
4800 
4801 void
4802 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4803     size_t maxlen, struct mbuf_list *ml)
4804 {
4805 	struct ieee80211com *ic = &sc->sc_ic;
4806 	struct ieee80211_rxinfo rxi;
4807 	struct iwm_rx_phy_info *phy_info;
4808 	struct iwm_rx_mpdu_res_start *rx_res;
4809 	int device_timestamp;
4810 	uint16_t phy_flags;
4811 	uint32_t len;
4812 	uint32_t rx_pkt_status;
4813 	int rssi, chanidx, rate_n_flags;
4814 
4815 	memset(&rxi, 0, sizeof(rxi));
4816 
4817 	phy_info = &sc->sc_last_phy_info;
4818 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4819 	len = le16toh(rx_res->byte_count);
4820 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4821 		/* Allow control frames in monitor mode. */
4822 		if (len < sizeof(struct ieee80211_frame_cts)) {
4823 			ic->ic_stats.is_rx_tooshort++;
4824 			IC2IFP(ic)->if_ierrors++;
4825 			m_freem(m);
4826 			return;
4827 		}
4828 	} else if (len < sizeof(struct ieee80211_frame)) {
4829 		ic->ic_stats.is_rx_tooshort++;
4830 		IC2IFP(ic)->if_ierrors++;
4831 		m_freem(m);
4832 		return;
4833 	}
4834 	if (len > maxlen - sizeof(*rx_res)) {
4835 		IC2IFP(ic)->if_ierrors++;
4836 		m_freem(m);
4837 		return;
4838 	}
4839 
4840 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4841 		m_freem(m);
4842 		return;
4843 	}
4844 
4845 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4846 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4847 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4848 		m_freem(m);
4849 		return; /* drop */
4850 	}
4851 
4852 	m->m_data = pktdata + sizeof(*rx_res);
4853 	m->m_pkthdr.len = m->m_len = len;
4854 
4855 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4856 		m_freem(m);
4857 		return;
4858 	}
4859 
4860 	chanidx = letoh32(phy_info->channel);
4861 	device_timestamp = le32toh(phy_info->system_timestamp);
4862 	phy_flags = letoh16(phy_info->phy_flags);
4863 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4864 
4865 	rssi = iwm_get_signal_strength(sc, phy_info);
4866 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4867 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4868 
4869 	rxi.rxi_rssi = rssi;
4870 	rxi.rxi_tstamp = device_timestamp;
4871 
4872 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4873 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4874 	    rate_n_flags, device_timestamp, &rxi, ml);
4875 }
4876 
4877 void
4878 iwm_flip_address(uint8_t *addr)
4879 {
4880 	int i;
4881 	uint8_t mac_addr[ETHER_ADDR_LEN];
4882 
4883 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4884 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4885 	IEEE80211_ADDR_COPY(addr, mac_addr);
4886 }
4887 
4888 /*
4889  * Drop duplicate 802.11 retransmissions
4890  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4891  * and handle pseudo-duplicate frames which result from deaggregation
4892  * of A-MSDU frames in hardware.
4893  */
4894 int
4895 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4896     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4897 {
4898 	struct ieee80211com *ic = &sc->sc_ic;
4899 	struct iwm_node *in = (void *)ic->ic_bss;
4900 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4901 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4902 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4903 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4904 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4905 	int hasqos = ieee80211_has_qos(wh);
4906 	uint16_t seq;
4907 
4908 	if (type == IEEE80211_FC0_TYPE_CTL ||
4909 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4910 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4911 		return 0;
4912 
4913 	if (hasqos) {
4914 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4915 		if (tid > IWM_MAX_TID_COUNT)
4916 			tid = IWM_MAX_TID_COUNT;
4917 	}
4918 
4919 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4920 	subframe_idx = desc->amsdu_info &
4921 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4922 
4923 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4924 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4925 	    dup_data->last_seq[tid] == seq &&
4926 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4927 		return 1;
4928 
4929 	/*
4930 	 * Allow the same frame sequence number for all A-MSDU subframes
4931 	 * following the first subframe.
4932 	 * Otherwise these subframes would be discarded as replays.
4933 	 */
4934 	if (dup_data->last_seq[tid] == seq &&
4935 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4936 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
4937 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4938 	}
4939 
4940 	dup_data->last_seq[tid] = seq;
4941 	dup_data->last_sub_frame[tid] = subframe_idx;
4942 
4943 	return 0;
4944 }
4945 
4946 /*
4947  * Returns true if sn2 - buffer_size < sn1 < sn2.
4948  * To be used only in order to compare reorder buffer head with NSSN.
4949  * We fully trust NSSN unless it is behind us due to reorder timeout.
4950  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4951  */
4952 int
4953 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4954 {
4955 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4956 }
4957 
4958 void
4959 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
4960     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
4961     uint16_t nssn, struct mbuf_list *ml)
4962 {
4963 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
4964 	uint16_t ssn = reorder_buf->head_sn;
4965 
4966 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4967 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4968 		goto set_timer;
4969 
4970 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4971 		int index = ssn % reorder_buf->buf_size;
4972 		struct mbuf *m;
4973 		int chanidx, is_shortpre;
4974 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4975 		struct ieee80211_rxinfo *rxi;
4976 
4977 		/* This data is the same for all A-MSDU subframes. */
4978 		chanidx = entries[index].chanidx;
4979 		rx_pkt_status = entries[index].rx_pkt_status;
4980 		is_shortpre = entries[index].is_shortpre;
4981 		rate_n_flags = entries[index].rate_n_flags;
4982 		device_timestamp = entries[index].device_timestamp;
4983 		rxi = &entries[index].rxi;
4984 
4985 		/*
4986 		 * Empty the list. Will have more than one frame for A-MSDU.
4987 		 * Empty list is valid as well since nssn indicates frames were
4988 		 * received.
4989 		 */
4990 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4991 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4992 			    rate_n_flags, device_timestamp, rxi, ml);
4993 			reorder_buf->num_stored--;
4994 
4995 			/*
4996 			 * Allow the same frame sequence number and CCMP PN for
4997 			 * all A-MSDU subframes following the first subframe.
4998 			 * Otherwise they would be discarded as replays.
4999 			 */
5000 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5001 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5002 		}
5003 
5004 		ssn = (ssn + 1) & 0xfff;
5005 	}
5006 	reorder_buf->head_sn = nssn;
5007 
5008 set_timer:
5009 	if (reorder_buf->num_stored && !reorder_buf->removed) {
5010 		timeout_add_usec(&reorder_buf->reorder_timer,
5011 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
5012 	} else
5013 		timeout_del(&reorder_buf->reorder_timer);
5014 }
5015 
5016 int
5017 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5018     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5019 {
5020 	struct ieee80211com *ic = &sc->sc_ic;
5021 
5022 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5023 		/* we have a new (A-)MPDU ... */
5024 
5025 		/*
5026 		 * reset counter to 0 if we didn't have any oldsn in
5027 		 * the last A-MPDU (as detected by GP2 being identical)
5028 		 */
5029 		if (!buffer->consec_oldsn_prev_drop)
5030 			buffer->consec_oldsn_drops = 0;
5031 
5032 		/* either way, update our tracking state */
5033 		buffer->consec_oldsn_ampdu_gp2 = gp2;
5034 	} else if (buffer->consec_oldsn_prev_drop) {
5035 		/*
5036 		 * tracking state didn't change, and we had an old SN
5037 		 * indication before - do nothing in this case, we
5038 		 * already noted this one down and are waiting for the
5039 		 * next A-MPDU (by GP2)
5040 		 */
5041 		return 0;
5042 	}
5043 
5044 	/* return unless this MPDU has old SN */
5045 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
5046 		return 0;
5047 
5048 	/* update state */
5049 	buffer->consec_oldsn_prev_drop = 1;
5050 	buffer->consec_oldsn_drops++;
5051 
5052 	/* if limit is reached, send del BA and reset state */
5053 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
5054 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5055 		    0, tid);
5056 		buffer->consec_oldsn_prev_drop = 0;
5057 		buffer->consec_oldsn_drops = 0;
5058 		return 1;
5059 	}
5060 
5061 	return 0;
5062 }
5063 
5064 /*
5065  * Handle re-ordering of frames which were de-aggregated in hardware.
5066  * Returns 1 if the MPDU was consumed (buffered or dropped).
5067  * Returns 0 if the MPDU should be passed to upper layer.
5068  */
5069 int
5070 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5071     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5072     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5073     struct mbuf_list *ml)
5074 {
5075 	struct ieee80211com *ic = &sc->sc_ic;
5076 	struct ieee80211_frame *wh;
5077 	struct ieee80211_node *ni;
5078 	struct iwm_rxba_data *rxba;
5079 	struct iwm_reorder_buffer *buffer;
5080 	uint32_t reorder_data = le32toh(desc->reorder_data);
5081 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
5082 	int last_subframe =
5083 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
5084 	uint8_t tid;
5085 	uint8_t subframe_idx = (desc->amsdu_info &
5086 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5087 	struct iwm_reorder_buf_entry *entries;
5088 	int index;
5089 	uint16_t nssn, sn;
5090 	uint8_t baid, type, subtype;
5091 	int hasqos;
5092 
5093 	wh = mtod(m, struct ieee80211_frame *);
5094 	hasqos = ieee80211_has_qos(wh);
5095 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5096 
5097 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5098 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5099 
5100 	/*
5101 	 * We are only interested in Block Ack requests and unicast QoS data.
5102 	 */
5103 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5104 		return 0;
5105 	if (hasqos) {
5106 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5107 			return 0;
5108 	} else {
5109 		if (type != IEEE80211_FC0_TYPE_CTL ||
5110 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5111 			return 0;
5112 	}
5113 
5114 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
5115 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
5116 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5117 	    baid >= nitems(sc->sc_rxba_data))
5118 		return 0;
5119 
5120 	rxba = &sc->sc_rxba_data[baid];
5121 	if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
5122 		return 0;
5123 
5124 	/* Bypass A-MPDU re-ordering in net80211. */
5125 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5126 
5127 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
5128 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
5129 		IWM_RX_MPDU_REORDER_SN_SHIFT;
5130 
5131 	buffer = &rxba->reorder_buf;
5132 	entries = &rxba->entries[0];
5133 
5134 	if (!buffer->valid) {
5135 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
5136 			return 0;
5137 		buffer->valid = 1;
5138 	}
5139 
5140 	ni = ieee80211_find_rxnode(ic, wh);
5141 	if (type == IEEE80211_FC0_TYPE_CTL &&
5142 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5143 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5144 		goto drop;
5145 	}
5146 
5147 	/*
5148 	 * If there was a significant jump in the nssn - adjust.
5149 	 * If the SN is smaller than the NSSN it might need to first go into
5150 	 * the reorder buffer, in which case we just release up to it and the
5151 	 * rest of the function will take care of storing it and releasing up to
5152 	 * the nssn.
5153 	 */
5154 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5155 	    buffer->buf_size) ||
5156 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5157 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5158 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5159 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5160 	}
5161 
5162 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5163 	    device_timestamp)) {
5164 		 /* BA session will be torn down. */
5165 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5166 		goto drop;
5167 
5168 	}
5169 
5170 	/* drop any outdated packets */
5171 	if (SEQ_LT(sn, buffer->head_sn)) {
5172 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5173 		goto drop;
5174 	}
5175 
5176 	/* release immediately if allowed by nssn and no stored frames */
5177 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5178 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5179 		   (!is_amsdu || last_subframe))
5180 			buffer->head_sn = nssn;
5181 		ieee80211_release_node(ic, ni);
5182 		return 0;
5183 	}
5184 
5185 	/*
5186 	 * release immediately if there are no stored frames, and the sn is
5187 	 * equal to the head.
5188 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5189 	 * When we released everything, and we got the next frame in the
5190 	 * sequence, according to the NSSN we can't release immediately,
5191 	 * while technically there is no hole and we can move forward.
5192 	 */
5193 	if (!buffer->num_stored && sn == buffer->head_sn) {
5194 		if (!is_amsdu || last_subframe)
5195 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5196 		ieee80211_release_node(ic, ni);
5197 		return 0;
5198 	}
5199 
5200 	index = sn % buffer->buf_size;
5201 
5202 	/*
5203 	 * Check if we already stored this frame
5204 	 * As AMSDU is either received or not as whole, logic is simple:
5205 	 * If we have frames in that position in the buffer and the last frame
5206 	 * originated from AMSDU had a different SN then it is a retransmission.
5207 	 * If it is the same SN then if the subframe index is incrementing it
5208 	 * is the same AMSDU - otherwise it is a retransmission.
5209 	 */
5210 	if (!ml_empty(&entries[index].frames)) {
5211 		if (!is_amsdu) {
5212 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5213 			goto drop;
5214 		} else if (sn != buffer->last_amsdu ||
5215 		    buffer->last_sub_index >= subframe_idx) {
5216 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5217 			goto drop;
5218 		}
5219 	} else {
5220 		/* This data is the same for all A-MSDU subframes. */
5221 		entries[index].chanidx = chanidx;
5222 		entries[index].is_shortpre = is_shortpre;
5223 		entries[index].rate_n_flags = rate_n_flags;
5224 		entries[index].device_timestamp = device_timestamp;
5225 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5226 	}
5227 
5228 	/* put in reorder buffer */
5229 	ml_enqueue(&entries[index].frames, m);
5230 	buffer->num_stored++;
5231 	getmicrouptime(&entries[index].reorder_time);
5232 
5233 	if (is_amsdu) {
5234 		buffer->last_amsdu = sn;
5235 		buffer->last_sub_index = subframe_idx;
5236 	}
5237 
5238 	/*
5239 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5240 	 * The reason is that NSSN advances on the first sub-frame, and may
5241 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5242 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5243 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5244 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5245 	 * already ahead and it will be dropped.
5246 	 * If the last sub-frame is not on this queue - we will get frame
5247 	 * release notification with up to date NSSN.
5248 	 */
5249 	if (!is_amsdu || last_subframe)
5250 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5251 
5252 	ieee80211_release_node(ic, ni);
5253 	return 1;
5254 
5255 drop:
5256 	m_freem(m);
5257 	ieee80211_release_node(ic, ni);
5258 	return 1;
5259 }
5260 
5261 void
5262 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5263     size_t maxlen, struct mbuf_list *ml)
5264 {
5265 	struct ieee80211com *ic = &sc->sc_ic;
5266 	struct ieee80211_rxinfo rxi;
5267 	struct iwm_rx_mpdu_desc *desc;
5268 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5269 	int rssi;
5270 	uint8_t chanidx;
5271 	uint16_t phy_info;
5272 
5273 	memset(&rxi, 0, sizeof(rxi));
5274 
5275 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5276 
5277 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5278 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5279 		m_freem(m);
5280 		return; /* drop */
5281 	}
5282 
5283 	len = le16toh(desc->mpdu_len);
5284 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5285 		/* Allow control frames in monitor mode. */
5286 		if (len < sizeof(struct ieee80211_frame_cts)) {
5287 			ic->ic_stats.is_rx_tooshort++;
5288 			IC2IFP(ic)->if_ierrors++;
5289 			m_freem(m);
5290 			return;
5291 		}
5292 	} else if (len < sizeof(struct ieee80211_frame)) {
5293 		ic->ic_stats.is_rx_tooshort++;
5294 		IC2IFP(ic)->if_ierrors++;
5295 		m_freem(m);
5296 		return;
5297 	}
5298 	if (len > maxlen - sizeof(*desc)) {
5299 		IC2IFP(ic)->if_ierrors++;
5300 		m_freem(m);
5301 		return;
5302 	}
5303 
5304 	m->m_data = pktdata + sizeof(*desc);
5305 	m->m_pkthdr.len = m->m_len = len;
5306 
5307 	/* Account for padding following the frame header. */
5308 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5309 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5310 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5311 		if (type == IEEE80211_FC0_TYPE_CTL) {
5312 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5313 			case IEEE80211_FC0_SUBTYPE_CTS:
5314 				hdrlen = sizeof(struct ieee80211_frame_cts);
5315 				break;
5316 			case IEEE80211_FC0_SUBTYPE_ACK:
5317 				hdrlen = sizeof(struct ieee80211_frame_ack);
5318 				break;
5319 			default:
5320 				hdrlen = sizeof(struct ieee80211_frame_min);
5321 				break;
5322 			}
5323 		} else
5324 			hdrlen = ieee80211_get_hdrlen(wh);
5325 
5326 		if ((le16toh(desc->status) &
5327 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5328 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5329 			/* Padding is inserted after the IV. */
5330 			hdrlen += IEEE80211_CCMP_HDRLEN;
5331 		}
5332 
5333 		memmove(m->m_data + 2, m->m_data, hdrlen);
5334 		m_adj(m, 2);
5335 	}
5336 
5337 	/*
5338 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5339 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5340 	 * bit set in the frame header. We need to clear this bit ourselves.
5341 	 *
5342 	 * And we must allow the same CCMP PN for subframes following the
5343 	 * first subframe. Otherwise they would be discarded as replays.
5344 	 */
5345 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5346 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5347 		uint8_t subframe_idx = (desc->amsdu_info &
5348 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5349 		if (subframe_idx > 0)
5350 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5351 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5352 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5353 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5354 			    struct ieee80211_qosframe_addr4 *);
5355 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5356 
5357 			/* HW reverses addr3 and addr4. */
5358 			iwm_flip_address(qwh4->i_addr3);
5359 			iwm_flip_address(qwh4->i_addr4);
5360 		} else if (ieee80211_has_qos(wh) &&
5361 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5362 			struct ieee80211_qosframe *qwh = mtod(m,
5363 			    struct ieee80211_qosframe *);
5364 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5365 
5366 			/* HW reverses addr3. */
5367 			iwm_flip_address(qwh->i_addr3);
5368 		}
5369 	}
5370 
5371 	/*
5372 	 * Verify decryption before duplicate detection. The latter uses
5373 	 * the TID supplied in QoS frame headers and this TID is implicitly
5374 	 * verified as part of the CCMP nonce.
5375 	 */
5376 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5377 		m_freem(m);
5378 		return;
5379 	}
5380 
5381 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5382 		m_freem(m);
5383 		return;
5384 	}
5385 
5386 	phy_info = le16toh(desc->phy_info);
5387 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5388 	chanidx = desc->v1.channel;
5389 	device_timestamp = desc->v1.gp2_on_air_rise;
5390 
5391 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5392 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5393 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5394 
5395 	rxi.rxi_rssi = rssi;
5396 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5397 
5398 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5399 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5400 	    rate_n_flags, device_timestamp, &rxi, ml))
5401 		return;
5402 
5403 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5404 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5405 	    rate_n_flags, device_timestamp, &rxi, ml);
5406 }
5407 
5408 void
5409 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5410 {
5411 	struct ieee80211com *ic = &sc->sc_ic;
5412 	struct iwm_node *in = (void *)ni;
5413 	int old_txmcs = ni->ni_txmcs;
5414 
5415 	ieee80211_ra_choose(&in->in_rn, ic, ni);
5416 
5417 	/*
5418 	 * If RA has chosen a new TX rate we must update
5419 	 * the firmware's LQ rate table.
5420 	 */
5421 	if (ni->ni_txmcs != old_txmcs)
5422 		iwm_setrates(in, 1);
5423 }
5424 
5425 void
5426 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5427     int txmcs, uint8_t failure_frame, int txfail)
5428 {
5429 	struct ieee80211com *ic = &sc->sc_ic;
5430 	struct iwm_node *in = (void *)ni;
5431 
5432 	/* Ignore Tx reports which don't match our last LQ command. */
5433 	if (txmcs != ni->ni_txmcs) {
5434 		if (++in->lq_rate_mismatch > 15) {
5435 			/* Try to sync firmware with the driver... */
5436 			iwm_setrates(in, 1);
5437 			in->lq_rate_mismatch = 0;
5438 		}
5439 	} else {
5440 		int mcs = txmcs;
5441 		const struct ieee80211_ht_rateset *rs =
5442 		    ieee80211_ra_get_ht_rateset(txmcs,
5443 		        ieee80211_node_supports_ht_chan40(ni),
5444 			ieee80211_ra_use_ht_sgi(ni));
5445 		unsigned int retries = 0, i;
5446 
5447 		in->lq_rate_mismatch = 0;
5448 
5449 		for (i = 0; i < failure_frame; i++) {
5450 			if (mcs > rs->min_mcs) {
5451 				ieee80211_ra_add_stats_ht(&in->in_rn,
5452 				    ic, ni, mcs, 1, 1);
5453 				mcs--;
5454 			} else
5455 				retries++;
5456 		}
5457 
5458 		if (txfail && failure_frame == 0) {
5459 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5460 			    txmcs, 1, 1);
5461 		} else {
5462 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5463 			    mcs, retries + 1, retries);
5464 		}
5465 
5466 		iwm_ra_choose(sc, ni);
5467 	}
5468 }
5469 
5470 void
5471 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5472     struct iwm_node *in, int txmcs, int txrate)
5473 {
5474 	struct ieee80211com *ic = &sc->sc_ic;
5475 	struct ieee80211_node *ni = &in->in_ni;
5476 	struct ifnet *ifp = IC2IFP(ic);
5477 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5478 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5479 	int txfail;
5480 
5481 	KASSERT(tx_resp->frame_count == 1);
5482 
5483 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5484 	    status != IWM_TX_STATUS_DIRECT_DONE);
5485 
5486 	/*
5487 	 * Update rate control statistics.
5488 	 * Only report frames which were actually queued with the currently
5489 	 * selected Tx rate. Because Tx queues are relatively long we may
5490 	 * encounter previously selected rates here during Tx bursts.
5491 	 * Providing feedback based on such frames can lead to suboptimal
5492 	 * Tx rate control decisions.
5493 	 */
5494 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5495 		if (txrate != ni->ni_txrate) {
5496 			if (++in->lq_rate_mismatch > 15) {
5497 				/* Try to sync firmware with the driver... */
5498 				iwm_setrates(in, 1);
5499 				in->lq_rate_mismatch = 0;
5500 			}
5501 		} else {
5502 			in->lq_rate_mismatch = 0;
5503 
5504 			in->in_amn.amn_txcnt++;
5505 			if (txfail)
5506 				in->in_amn.amn_retrycnt++;
5507 			if (tx_resp->failure_frame > 0)
5508 				in->in_amn.amn_retrycnt++;
5509 		}
5510 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5511 	    (le32toh(tx_resp->initial_rate) & IWM_RATE_MCS_HT_MSK)) {
5512 		int txmcs = le32toh(tx_resp->initial_rate) &
5513 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5514 		iwm_ht_single_rate_control(sc, ni, txmcs,
5515 		    tx_resp->failure_frame, txfail);
5516 	}
5517 
5518 	if (txfail)
5519 		ifp->if_oerrors++;
5520 }
5521 
5522 void
5523 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5524 {
5525 	struct ieee80211com *ic = &sc->sc_ic;
5526 
5527 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5528 	    BUS_DMASYNC_POSTWRITE);
5529 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5530 	m_freem(txd->m);
5531 	txd->m = NULL;
5532 
5533 	KASSERT(txd->in);
5534 	ieee80211_release_node(ic, &txd->in->in_ni);
5535 	txd->in = NULL;
5536 	txd->ampdu_nframes = 0;
5537 	txd->ampdu_txmcs = 0;
5538 }
5539 
5540 void
5541 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5542 {
5543 	struct iwm_tx_data *txd;
5544 
5545 	while (ring->tail != idx) {
5546 		txd = &ring->data[ring->tail];
5547 		if (txd->m != NULL) {
5548 			if (ring->qid < IWM_FIRST_AGG_TX_QUEUE)
5549 				DPRINTF(("%s: missed Tx completion: tail=%d "
5550 				    "idx=%d\n", __func__, ring->tail, idx));
5551 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5552 			iwm_txd_done(sc, txd);
5553 			ring->queued--;
5554 		}
5555 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5556 	}
5557 
5558 	wakeup(ring);
5559 }
5560 
5561 void
5562 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5563     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5564     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5565     struct iwm_agg_tx_status *agg_status)
5566 {
5567 	struct ieee80211com *ic = &sc->sc_ic;
5568 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5569 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5570 	struct ieee80211_node *ni = &in->in_ni;
5571 	struct ieee80211_tx_ba *ba;
5572 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5573 	    status != IWM_TX_STATUS_DIRECT_DONE);
5574 	uint16_t seq;
5575 
5576 	sc->sc_tx_timer = 0;
5577 
5578 	if (ic->ic_state != IEEE80211_S_RUN)
5579 		return;
5580 
5581 	if (nframes > 1) {
5582 		int i;
5583  		/*
5584 		 * Collect information about this A-MPDU.
5585 		 */
5586 
5587 		for (i = 0; i < nframes; i++) {
5588 			uint8_t qid = agg_status[i].qid;
5589 			uint8_t idx = agg_status[i].idx;
5590 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5591 			    IWM_AGG_TX_STATE_STATUS_MSK);
5592 
5593 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5594 				continue;
5595 
5596 			if (qid != cmd_hdr->qid)
5597 				continue;
5598 
5599 			txdata = &txq->data[idx];
5600 			if (txdata->m == NULL)
5601 				continue;
5602 
5603 			/* The Tx rate was the same for all subframes. */
5604 			txdata->ampdu_txmcs = initial_rate &
5605 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5606 			   IWM_RATE_HT_MCS_NSS_MSK);
5607 			txdata->ampdu_nframes = nframes;
5608 		}
5609 		return;
5610 	}
5611 
5612 	ba = &ni->ni_tx_ba[tid];
5613 	if (ba->ba_state != IEEE80211_BA_AGREED)
5614 		return;
5615 	if (SEQ_LT(ssn, ba->ba_winstart))
5616 		return;
5617 
5618 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5619 	seq = (ssn - 1) & 0xfff;
5620 
5621 	/*
5622 	 * Skip rate control if our Tx rate is fixed.
5623 	 * Don't report frames to MiRA which were sent at a different
5624 	 * Tx rate than ni->ni_txmcs.
5625 	 */
5626 	if (ic->ic_fixed_mcs == -1) {
5627 		if (txdata->ampdu_nframes > 1) {
5628 			/*
5629 			 * This frame was once part of an A-MPDU.
5630 			 * Report one failed A-MPDU Tx attempt.
5631 			 * The firmware might have made several such
5632 			 * attempts but we don't keep track of this.
5633 			 */
5634 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5635 			    txdata->ampdu_txmcs, 1, 1);
5636 		}
5637 
5638 		/* Report the final single-frame Tx attempt. */
5639 		if (initial_rate & IWM_RATE_HT_MCS_RATE_CODE_MSK) {
5640 			int txmcs = initial_rate &
5641 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5642 			   IWM_RATE_HT_MCS_NSS_MSK);
5643 			iwm_ht_single_rate_control(sc, ni, txmcs,
5644 			    failure_frame, txfail);
5645 		}
5646 	}
5647 
5648 	if (txfail)
5649 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5650 
5651 	/*
5652 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5653 	 * in firmware's BA window. Firmware is not going to retransmit any
5654 	 * frames before its BA window so mark them all as done.
5655 	 */
5656 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5657 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5658 	iwm_clear_oactive(sc, txq);
5659 }
5660 
5661 void
5662 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5663     struct iwm_rx_data *data)
5664 {
5665 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5666 	int idx = cmd_hdr->idx;
5667 	int qid = cmd_hdr->qid;
5668 	struct iwm_tx_ring *ring = &sc->txq[qid];
5669 	struct iwm_tx_data *txd;
5670 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5671 	uint32_t ssn;
5672 	uint32_t len = iwm_rx_packet_len(pkt);
5673 
5674 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5675 	    BUS_DMASYNC_POSTREAD);
5676 
5677 	sc->sc_tx_timer = 0;
5678 
5679 	/* Sanity checks. */
5680 	if (sizeof(*tx_resp) > len)
5681 		return;
5682 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5683 		return;
5684 	if (sizeof(*tx_resp) + sizeof(ssn) +
5685 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5686 		return;
5687 
5688 	txd = &ring->data[idx];
5689 	if (txd->m == NULL)
5690 		return;
5691 
5692 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5693 	ssn = le32toh(ssn) & 0xfff;
5694 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5695 		int status;
5696 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5697 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5698 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5699 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5700 	} else {
5701 		/*
5702 		 * Even though this is not an agg queue, we must only free
5703 		 * frames before the firmware's starting sequence number.
5704 		 */
5705 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5706 		iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5707 		iwm_clear_oactive(sc, ring);
5708 	}
5709 }
5710 
5711 void
5712 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5713 {
5714 	struct ieee80211com *ic = &sc->sc_ic;
5715 	struct ifnet *ifp = IC2IFP(ic);
5716 
5717 	if (ring->queued < IWM_TX_RING_LOMARK) {
5718 		sc->qfullmsk &= ~(1 << ring->qid);
5719 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5720 			ifq_clr_oactive(&ifp->if_snd);
5721 			/*
5722 			 * Well, we're in interrupt context, but then again
5723 			 * I guess net80211 does all sorts of stunts in
5724 			 * interrupt context, so maybe this is no biggie.
5725 			 */
5726 			(*ifp->if_start)(ifp);
5727 		}
5728 	}
5729 }
5730 
5731 void
5732 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5733     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5734 {
5735 	struct ieee80211com *ic = &sc->sc_ic;
5736 	struct iwm_node *in = (void *)ni;
5737 	int idx, end_idx;
5738 
5739 	/*
5740 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5741 	 */
5742 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5743 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5744 	while (idx != end_idx) {
5745 		struct iwm_tx_data *txdata = &txq->data[idx];
5746 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5747 			/*
5748 			 * We can assume that this subframe has been ACKed
5749 			 * because ACK failures come as single frames and
5750 			 * before failing an A-MPDU subframe the firmware
5751 			 * sends it as a single frame at least once.
5752 			 */
5753 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5754 			    txdata->ampdu_txmcs, 1, 0);
5755 
5756 			/* Report this frame only once. */
5757 			txdata->ampdu_nframes = 0;
5758 		}
5759 
5760 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5761 	}
5762 
5763 	iwm_ra_choose(sc, ni);
5764 }
5765 
5766 void
5767 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5768     struct iwm_rx_data *data)
5769 {
5770 	struct iwm_ba_notif *ban = (void *)pkt->data;
5771 	struct ieee80211com *ic = &sc->sc_ic;
5772 	struct ieee80211_node *ni = ic->ic_bss;
5773 	struct iwm_node *in = (void *)ni;
5774 	struct ieee80211_tx_ba *ba;
5775 	struct iwm_tx_ring *ring;
5776 	uint16_t seq, ssn;
5777 	int qid;
5778 
5779 	if (ic->ic_state != IEEE80211_S_RUN)
5780 		return;
5781 
5782 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5783 		return;
5784 
5785 	if (ban->sta_id != IWM_STATION_ID ||
5786 	    !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr))
5787 		return;
5788 
5789 	qid = le16toh(ban->scd_flow);
5790 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5791 		return;
5792 
5793 	/* Protect against a firmware bug where the queue/TID are off. */
5794 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5795 		return;
5796 
5797 	sc->sc_tx_timer = 0;
5798 
5799 	ba = &ni->ni_tx_ba[ban->tid];
5800 	if (ba->ba_state != IEEE80211_BA_AGREED)
5801 		return;
5802 
5803 	ring = &sc->txq[qid];
5804 
5805 	/*
5806 	 * The first bit in ban->bitmap corresponds to the sequence number
5807 	 * stored in the sequence control field ban->seq_ctl.
5808 	 * Multiple BA notifications in a row may be using this number, with
5809 	 * additional bits being set in cba->bitmap. It is unclear how the
5810 	 * firmware decides to shift this window forward.
5811 	 * We rely on ba->ba_winstart instead.
5812 	 */
5813 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
5814 
5815 	/*
5816 	 * The firmware's new BA window starting sequence number
5817 	 * corresponds to the first hole in ban->scd_ssn, implying
5818 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
5819 	 * have been acked.
5820 	 */
5821 	ssn = le16toh(ban->scd_ssn);
5822 
5823 	if (SEQ_LT(ssn, ba->ba_winstart))
5824 		return;
5825 
5826 	/* Skip rate control if our Tx rate is fixed. */
5827 	if (ic->ic_fixed_mcs == -1)
5828 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
5829 		    ba->ba_winstart, ssn);
5830 
5831 	/*
5832 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5833 	 * in firmware's BA window. Firmware is not going to retransmit any
5834 	 * frames before its BA window so mark them all as done.
5835 	 */
5836 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
5837 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5838 	iwm_clear_oactive(sc, ring);
5839 }
5840 
5841 void
5842 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5843     struct iwm_rx_data *data)
5844 {
5845 	struct ieee80211com *ic = &sc->sc_ic;
5846 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
5847 	uint32_t missed;
5848 
5849 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5850 	    (ic->ic_state != IEEE80211_S_RUN))
5851 		return;
5852 
5853 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5854 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5855 
5856 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5857 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5858 		if (ic->ic_if.if_flags & IFF_DEBUG)
5859 			printf("%s: receiving no beacons from %s; checking if "
5860 			    "this AP is still responding to probe requests\n",
5861 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5862 		/*
5863 		 * Rather than go directly to scan state, try to send a
5864 		 * directed probe request first. If that fails then the
5865 		 * state machine will drop us into scanning after timing
5866 		 * out waiting for a probe response.
5867 		 */
5868 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5869 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5870 	}
5871 
5872 }
5873 
5874 int
5875 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
5876 {
5877 	struct iwm_binding_cmd cmd;
5878 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
5879 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5880 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
5881 	uint32_t status;
5882 	size_t len;
5883 
5884 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
5885 		panic("binding already added");
5886 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
5887 		panic("binding already removed");
5888 
5889 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
5890 		return EINVAL;
5891 
5892 	memset(&cmd, 0, sizeof(cmd));
5893 
5894 	cmd.id_and_color
5895 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5896 	cmd.action = htole32(action);
5897 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5898 
5899 	cmd.macs[0] = htole32(mac_id);
5900 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
5901 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
5902 
5903 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
5904 	    !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
5905 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
5906 	else
5907 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
5908 
5909 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
5910 		len = sizeof(cmd);
5911 	else
5912 		len = sizeof(struct iwm_binding_cmd_v1);
5913 	status = 0;
5914 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
5915 	    &status);
5916 	if (err == 0 && status != 0)
5917 		err = EIO;
5918 
5919 	return err;
5920 }
5921 
5922 void
5923 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5924     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
5925 {
5926 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
5927 
5928 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
5929 	    ctxt->color));
5930 	cmd->action = htole32(action);
5931 	cmd->apply_time = htole32(apply_time);
5932 }
5933 
5934 void
5935 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
5936     struct ieee80211_channel *chan, uint8_t chains_static,
5937     uint8_t chains_dynamic, uint8_t sco)
5938 {
5939 	struct ieee80211com *ic = &sc->sc_ic;
5940 	uint8_t active_cnt, idle_cnt;
5941 
5942 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5943 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
5944 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
5945 	if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5946 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5947 			/* secondary chan above -> control chan below */
5948 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
5949 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
5950 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5951 			/* secondary chan below -> control chan above */
5952 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
5953 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
5954 		} else {
5955 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
5956 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
5957 		}
5958 	} else {
5959 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
5960 		cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
5961 	}
5962 
5963 	/* Set rx the chains */
5964 	idle_cnt = chains_static;
5965 	active_cnt = chains_dynamic;
5966 
5967 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
5968 					IWM_PHY_RX_CHAIN_VALID_POS);
5969 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
5970 	cmd->rxchain_info |= htole32(active_cnt <<
5971 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
5972 
5973 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
5974 }
5975 
5976 int
5977 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5978     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5979     uint32_t apply_time, uint8_t sco)
5980 {
5981 	struct ieee80211com *ic = &sc->sc_ic;
5982 	struct iwm_phy_context_cmd_uhb cmd;
5983 	uint8_t active_cnt, idle_cnt;
5984 	struct ieee80211_channel *chan = ctxt->channel;
5985 
5986 	memset(&cmd, 0, sizeof(cmd));
5987 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
5988 	    ctxt->color));
5989 	cmd.action = htole32(action);
5990 	cmd.apply_time = htole32(apply_time);
5991 
5992 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5993 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
5994 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5995 	if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5996 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5997 			/* secondary chan above -> control chan below */
5998 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
5999 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6000 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6001 			/* secondary chan below -> control chan above */
6002 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6003 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6004 		} else {
6005 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6006 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6007 		}
6008 	} else {
6009 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6010 		cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6011 	}
6012 
6013 	idle_cnt = chains_static;
6014 	active_cnt = chains_dynamic;
6015 	cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6016 					IWM_PHY_RX_CHAIN_VALID_POS);
6017 	cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6018 	cmd.rxchain_info |= htole32(active_cnt <<
6019 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6020 	cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6021 
6022 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6023 }
6024 
6025 int
6026 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6027     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6028     uint32_t apply_time, uint8_t sco)
6029 {
6030 	struct iwm_phy_context_cmd cmd;
6031 
6032 	/*
6033 	 * Intel increased the size of the fw_channel_info struct and neglected
6034 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6035 	 * member in the middle.
6036 	 * To keep things simple we use a separate function to handle the larger
6037 	 * variant of the phy context command.
6038 	 */
6039 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6040 		return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6041 		    chains_dynamic, action, apply_time, sco);
6042 
6043 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6044 
6045 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6046 	    chains_static, chains_dynamic, sco);
6047 
6048 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6049 	    sizeof(struct iwm_phy_context_cmd), &cmd);
6050 }
6051 
6052 int
6053 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6054 {
6055 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6056 	struct iwm_tfd *desc;
6057 	struct iwm_tx_data *txdata;
6058 	struct iwm_device_cmd *cmd;
6059 	struct mbuf *m;
6060 	bus_addr_t paddr;
6061 	uint32_t addr_lo;
6062 	int err = 0, i, paylen, off, s;
6063 	int idx, code, async, group_id;
6064 	size_t hdrlen, datasz;
6065 	uint8_t *data;
6066 	int generation = sc->sc_generation;
6067 
6068 	code = hcmd->id;
6069 	async = hcmd->flags & IWM_CMD_ASYNC;
6070 	idx = ring->cur;
6071 
6072 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
6073 		paylen += hcmd->len[i];
6074 	}
6075 
6076 	/* If this command waits for a response, allocate response buffer. */
6077 	hcmd->resp_pkt = NULL;
6078 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
6079 		uint8_t *resp_buf;
6080 		KASSERT(!async);
6081 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
6082 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
6083 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
6084 			return ENOSPC;
6085 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
6086 		    M_NOWAIT | M_ZERO);
6087 		if (resp_buf == NULL)
6088 			return ENOMEM;
6089 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
6090 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6091 	} else {
6092 		sc->sc_cmd_resp_pkt[idx] = NULL;
6093 	}
6094 
6095 	s = splnet();
6096 
6097 	desc = &ring->desc[idx];
6098 	txdata = &ring->data[idx];
6099 
6100 	group_id = iwm_cmd_groupid(code);
6101 	if (group_id != 0) {
6102 		hdrlen = sizeof(cmd->hdr_wide);
6103 		datasz = sizeof(cmd->data_wide);
6104 	} else {
6105 		hdrlen = sizeof(cmd->hdr);
6106 		datasz = sizeof(cmd->data);
6107 	}
6108 
6109 	if (paylen > datasz) {
6110 		/* Command is too large to fit in pre-allocated space. */
6111 		size_t totlen = hdrlen + paylen;
6112 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
6113 			printf("%s: firmware command too long (%zd bytes)\n",
6114 			    DEVNAME(sc), totlen);
6115 			err = EINVAL;
6116 			goto out;
6117 		}
6118 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
6119 		if (m == NULL) {
6120 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6121 			    DEVNAME(sc), totlen);
6122 			err = ENOMEM;
6123 			goto out;
6124 		}
6125 		cmd = mtod(m, struct iwm_device_cmd *);
6126 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6127 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6128 		if (err) {
6129 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6130 			    DEVNAME(sc), totlen);
6131 			m_freem(m);
6132 			goto out;
6133 		}
6134 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6135 		paddr = txdata->map->dm_segs[0].ds_addr;
6136 	} else {
6137 		cmd = &ring->cmd[idx];
6138 		paddr = txdata->cmd_paddr;
6139 	}
6140 
6141 	if (group_id != 0) {
6142 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6143 		cmd->hdr_wide.group_id = group_id;
6144 		cmd->hdr_wide.qid = ring->qid;
6145 		cmd->hdr_wide.idx = idx;
6146 		cmd->hdr_wide.length = htole16(paylen);
6147 		cmd->hdr_wide.version = iwm_cmd_version(code);
6148 		data = cmd->data_wide;
6149 	} else {
6150 		cmd->hdr.code = code;
6151 		cmd->hdr.flags = 0;
6152 		cmd->hdr.qid = ring->qid;
6153 		cmd->hdr.idx = idx;
6154 		data = cmd->data;
6155 	}
6156 
6157 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
6158 		if (hcmd->len[i] == 0)
6159 			continue;
6160 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
6161 		off += hcmd->len[i];
6162 	}
6163 	KASSERT(off == paylen);
6164 
6165 	/* lo field is not aligned */
6166 	addr_lo = htole32((uint32_t)paddr);
6167 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
6168 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
6169 	    | ((hdrlen + paylen) << 4));
6170 	desc->num_tbs = 1;
6171 
6172 	if (paylen > datasz) {
6173 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6174 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6175 	} else {
6176 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6177 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6178 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6179 	}
6180 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6181 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6182 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6183 
6184 	/*
6185 	 * Wake up the NIC to make sure that the firmware will see the host
6186 	 * command - we will let the NIC sleep once all the host commands
6187 	 * returned. This needs to be done only on 7000 family NICs.
6188 	 */
6189 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6190 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6191 			err = EBUSY;
6192 			goto out;
6193 		}
6194 	}
6195 
6196 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6197 
6198 	/* Kick command ring. */
6199 	ring->queued++;
6200 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6201 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6202 
6203 	if (!async) {
6204 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
6205 		if (err == 0) {
6206 			/* if hardware is no longer up, return error */
6207 			if (generation != sc->sc_generation) {
6208 				err = ENXIO;
6209 				goto out;
6210 			}
6211 
6212 			/* Response buffer will be freed in iwm_free_resp(). */
6213 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6214 			sc->sc_cmd_resp_pkt[idx] = NULL;
6215 		} else if (generation == sc->sc_generation) {
6216 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6217 			    sc->sc_cmd_resp_len[idx]);
6218 			sc->sc_cmd_resp_pkt[idx] = NULL;
6219 		}
6220 	}
6221  out:
6222 	splx(s);
6223 
6224 	return err;
6225 }
6226 
6227 int
6228 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6229     uint16_t len, const void *data)
6230 {
6231 	struct iwm_host_cmd cmd = {
6232 		.id = id,
6233 		.len = { len, },
6234 		.data = { data, },
6235 		.flags = flags,
6236 	};
6237 
6238 	return iwm_send_cmd(sc, &cmd);
6239 }
6240 
6241 int
6242 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6243     uint32_t *status)
6244 {
6245 	struct iwm_rx_packet *pkt;
6246 	struct iwm_cmd_response *resp;
6247 	int err, resp_len;
6248 
6249 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
6250 	cmd->flags |= IWM_CMD_WANT_RESP;
6251 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6252 
6253 	err = iwm_send_cmd(sc, cmd);
6254 	if (err)
6255 		return err;
6256 
6257 	pkt = cmd->resp_pkt;
6258 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
6259 		return EIO;
6260 
6261 	resp_len = iwm_rx_packet_payload_len(pkt);
6262 	if (resp_len != sizeof(*resp)) {
6263 		iwm_free_resp(sc, cmd);
6264 		return EIO;
6265 	}
6266 
6267 	resp = (void *)pkt->data;
6268 	*status = le32toh(resp->status);
6269 	iwm_free_resp(sc, cmd);
6270 	return err;
6271 }
6272 
6273 int
6274 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6275     const void *data, uint32_t *status)
6276 {
6277 	struct iwm_host_cmd cmd = {
6278 		.id = id,
6279 		.len = { len, },
6280 		.data = { data, },
6281 	};
6282 
6283 	return iwm_send_cmd_status(sc, &cmd, status);
6284 }
6285 
6286 void
6287 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6288 {
6289 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
6290 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6291 	hcmd->resp_pkt = NULL;
6292 }
6293 
6294 void
6295 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6296 {
6297 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6298 	struct iwm_tx_data *data;
6299 
6300 	if (qid != sc->cmdqid) {
6301 		return;	/* Not a command ack. */
6302 	}
6303 
6304 	data = &ring->data[idx];
6305 
6306 	if (data->m != NULL) {
6307 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6308 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6309 		bus_dmamap_unload(sc->sc_dmat, data->map);
6310 		m_freem(data->m);
6311 		data->m = NULL;
6312 	}
6313 	wakeup(&ring->desc[idx]);
6314 
6315 	if (ring->queued == 0) {
6316 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6317 		    DEVNAME(sc), code));
6318 	} else if (--ring->queued == 0) {
6319 		/*
6320 		 * 7000 family NICs are locked while commands are in progress.
6321 		 * All commands are now done so we may unlock the NIC again.
6322 		 */
6323 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6324 			iwm_nic_unlock(sc);
6325 	}
6326 }
6327 
6328 void
6329 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6330     uint16_t len)
6331 {
6332 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6333 	uint16_t val;
6334 
6335 	scd_bc_tbl = sc->sched_dma.vaddr;
6336 
6337 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6338 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6339 		len = roundup(len, 4) / 4;
6340 
6341 	val = htole16(sta_id << 12 | len);
6342 
6343 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6344 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6345 
6346 	/* Update TX scheduler. */
6347 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6348 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6349 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6350 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6351 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6352 }
6353 
6354 void
6355 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6356 {
6357 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6358 	uint16_t val;
6359 
6360 	scd_bc_tbl = sc->sched_dma.vaddr;
6361 
6362 	val = htole16(1 | (sta_id << 12));
6363 
6364 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6365 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6366 
6367 	/* Update TX scheduler. */
6368 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6369 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6370 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6371 
6372 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6373 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6374 }
6375 
6376 /*
6377  * Fill in various bit for management frames, and leave them
6378  * unfilled for data frames (firmware takes care of that).
6379  * Return the selected TX rate.
6380  */
6381 const struct iwm_rate *
6382 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6383     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6384 {
6385 	struct ieee80211com *ic = &sc->sc_ic;
6386 	struct ieee80211_node *ni = &in->in_ni;
6387 	const struct iwm_rate *rinfo;
6388 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6389 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6390 	int ridx, rate_flags;
6391 
6392 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6393 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6394 
6395 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6396 	    type != IEEE80211_FC0_TYPE_DATA) {
6397 		/* for non-data, use the lowest supported rate */
6398 		ridx = min_ridx;
6399 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6400 	} else if (ic->ic_fixed_mcs != -1) {
6401 		ridx = sc->sc_fixed_ridx;
6402 	} else if (ic->ic_fixed_rate != -1) {
6403 		ridx = sc->sc_fixed_ridx;
6404  	} else {
6405 		int i;
6406 		/* Use firmware rateset retry table. */
6407 		tx->initial_rate_index = 0;
6408 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6409 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6410 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
6411 			return &iwm_rates[ridx];
6412 		}
6413 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6414 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6415 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6416 			if (iwm_rates[i].rate == (ni->ni_txrate &
6417 			    IEEE80211_RATE_VAL)) {
6418 				ridx = i;
6419 				break;
6420 			}
6421 		}
6422 		return &iwm_rates[ridx];
6423 	}
6424 
6425 	rinfo = &iwm_rates[ridx];
6426 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6427 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6428 	else if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
6429 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
6430 	else
6431 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
6432 	if (IWM_RIDX_IS_CCK(ridx))
6433 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6434 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6435 	    type == IEEE80211_FC0_TYPE_DATA &&
6436 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6437 		uint8_t sco;
6438 		if (ieee80211_node_supports_ht_chan40(ni))
6439 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6440 		else
6441 			sco = IEEE80211_HTOP0_SCO_SCN;
6442 		rate_flags |= IWM_RATE_MCS_HT_MSK;
6443 		if ((sco == IEEE80211_HTOP0_SCO_SCA ||
6444 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
6445 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
6446 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40;
6447 			if (ieee80211_node_supports_ht_sgi40(ni))
6448 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6449 		} else if (ieee80211_node_supports_ht_sgi20(ni))
6450 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6451 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6452 	} else
6453 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6454 
6455 	return rinfo;
6456 }
6457 
6458 #define TB0_SIZE 16
6459 int
6460 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6461 {
6462 	struct ieee80211com *ic = &sc->sc_ic;
6463 	struct iwm_node *in = (void *)ni;
6464 	struct iwm_tx_ring *ring;
6465 	struct iwm_tx_data *data;
6466 	struct iwm_tfd *desc;
6467 	struct iwm_device_cmd *cmd;
6468 	struct iwm_tx_cmd *tx;
6469 	struct ieee80211_frame *wh;
6470 	struct ieee80211_key *k = NULL;
6471 	const struct iwm_rate *rinfo;
6472 	uint8_t *ivp;
6473 	uint32_t flags;
6474 	u_int hdrlen;
6475 	bus_dma_segment_t *seg;
6476 	uint8_t tid, type, subtype;
6477 	int i, totlen, err, pad;
6478 	int qid, hasqos;
6479 
6480 	wh = mtod(m, struct ieee80211_frame *);
6481 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6482 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6483 	if (type == IEEE80211_FC0_TYPE_CTL)
6484 		hdrlen = sizeof(struct ieee80211_frame_min);
6485 	else
6486 		hdrlen = ieee80211_get_hdrlen(wh);
6487 
6488 	hasqos = ieee80211_has_qos(wh);
6489 	if (type == IEEE80211_FC0_TYPE_DATA)
6490 		tid = IWM_TID_NON_QOS;
6491 	else
6492 		tid = IWM_MAX_TID_COUNT;
6493 
6494 	/*
6495 	 * Map EDCA categories to Tx data queues.
6496 	 *
6497 	 * We use static data queue assignments even in DQA mode. We do not
6498 	 * need to share Tx queues between stations because we only implement
6499 	 * client mode; the firmware's station table contains only one entry
6500 	 * which represents our access point.
6501 	 */
6502 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6503 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6504 	else
6505 		qid = ac;
6506 
6507 	/* If possible, put this frame on an aggregation queue. */
6508 	if (hasqos) {
6509 		struct ieee80211_tx_ba *ba;
6510 		uint16_t qos = ieee80211_get_qos(wh);
6511 		int qostid = qos & IEEE80211_QOS_TID;
6512 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6513 
6514 		ba = &ni->ni_tx_ba[qostid];
6515 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6516 		    type == IEEE80211_FC0_TYPE_DATA &&
6517 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6518 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6519 		    ba->ba_state == IEEE80211_BA_AGREED) {
6520 			qid = agg_qid;
6521 			tid = qostid;
6522 			ac = ieee80211_up_to_ac(ic, qostid);
6523 		}
6524 	}
6525 
6526 	ring = &sc->txq[qid];
6527 	desc = &ring->desc[ring->cur];
6528 	memset(desc, 0, sizeof(*desc));
6529 	data = &ring->data[ring->cur];
6530 
6531 	cmd = &ring->cmd[ring->cur];
6532 	cmd->hdr.code = IWM_TX_CMD;
6533 	cmd->hdr.flags = 0;
6534 	cmd->hdr.qid = ring->qid;
6535 	cmd->hdr.idx = ring->cur;
6536 
6537 	tx = (void *)cmd->data;
6538 	memset(tx, 0, sizeof(*tx));
6539 
6540 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
6541 
6542 #if NBPFILTER > 0
6543 	if (sc->sc_drvbpf != NULL) {
6544 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6545 		uint16_t chan_flags;
6546 
6547 		tap->wt_flags = 0;
6548 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6549 		chan_flags = ni->ni_chan->ic_flags;
6550 		if (ic->ic_curmode != IEEE80211_MODE_11N)
6551 			chan_flags &= ~IEEE80211_CHAN_HT;
6552 		tap->wt_chan_flags = htole16(chan_flags);
6553 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6554 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6555 		    type == IEEE80211_FC0_TYPE_DATA &&
6556 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6557 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6558 		} else
6559 			tap->wt_rate = rinfo->rate;
6560 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6561 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6562 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6563 
6564 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6565 		    m, BPF_DIRECTION_OUT);
6566 	}
6567 #endif
6568 	totlen = m->m_pkthdr.len;
6569 
6570 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6571 		k = ieee80211_get_txkey(ic, wh, ni);
6572 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6573 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6574 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6575 				return ENOBUFS;
6576 			/* 802.11 header may have moved. */
6577 			wh = mtod(m, struct ieee80211_frame *);
6578 			totlen = m->m_pkthdr.len;
6579 			k = NULL; /* skip hardware crypto below */
6580 		} else {
6581 			/* HW appends CCMP MIC */
6582 			totlen += IEEE80211_CCMP_HDRLEN;
6583 		}
6584 	}
6585 
6586 	flags = 0;
6587 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6588 		flags |= IWM_TX_CMD_FLG_ACK;
6589 	}
6590 
6591 	if (type == IEEE80211_FC0_TYPE_DATA &&
6592 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6593 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6594 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6595 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6596 
6597 	tx->sta_id = IWM_STATION_ID;
6598 
6599 	if (type == IEEE80211_FC0_TYPE_MGT) {
6600 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6601 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6602 			tx->pm_frame_timeout = htole16(3);
6603 		else
6604 			tx->pm_frame_timeout = htole16(2);
6605 	} else {
6606 		if (type == IEEE80211_FC0_TYPE_CTL &&
6607 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6608 			struct ieee80211_frame_min *mwh;
6609 			uint8_t *barfrm;
6610 			uint16_t ctl;
6611 			mwh = mtod(m, struct ieee80211_frame_min *);
6612 			barfrm = (uint8_t *)&mwh[1];
6613 			ctl = LE_READ_2(barfrm);
6614 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6615 			    IEEE80211_BA_TID_INFO_SHIFT;
6616 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6617 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6618 		}
6619 
6620 		tx->pm_frame_timeout = htole16(0);
6621 	}
6622 
6623 	if (hdrlen & 3) {
6624 		/* First segment length must be a multiple of 4. */
6625 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6626 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6627 		pad = 4 - (hdrlen & 3);
6628 	} else
6629 		pad = 0;
6630 
6631 	tx->len = htole16(totlen);
6632 	tx->tid_tspec = tid;
6633 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6634 
6635 	/* Set physical address of "scratch area". */
6636 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6637 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6638 
6639 	/* Copy 802.11 header in TX command. */
6640 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6641 
6642 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6643 		/* Trim 802.11 header and prepend CCMP IV. */
6644 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6645 		ivp = mtod(m, u_int8_t *);
6646 		k->k_tsc++;	/* increment the 48-bit PN */
6647 		ivp[0] = k->k_tsc; /* PN0 */
6648 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6649 		ivp[2] = 0;        /* Rsvd */
6650 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6651 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6652 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6653 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6654 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6655 
6656 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6657 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6658 		/* TX scheduler includes CCMP MIC length. */
6659 		totlen += IEEE80211_CCMP_MICLEN;
6660 	} else {
6661 		/* Trim 802.11 header. */
6662 		m_adj(m, hdrlen);
6663 		tx->sec_ctl = 0;
6664 	}
6665 
6666 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6667 	if (!hasqos)
6668 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6669 
6670 	tx->tx_flags |= htole32(flags);
6671 
6672 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6673 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6674 	if (err && err != EFBIG) {
6675 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6676 		m_freem(m);
6677 		return err;
6678 	}
6679 	if (err) {
6680 		/* Too many DMA segments, linearize mbuf. */
6681 		if (m_defrag(m, M_DONTWAIT)) {
6682 			m_freem(m);
6683 			return ENOBUFS;
6684 		}
6685 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6686 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6687 		if (err) {
6688 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6689 			    err);
6690 			m_freem(m);
6691 			return err;
6692 		}
6693 	}
6694 	data->m = m;
6695 	data->in = in;
6696 	data->txmcs = ni->ni_txmcs;
6697 	data->txrate = ni->ni_txrate;
6698 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6699 
6700 	/* Fill TX descriptor. */
6701 	desc->num_tbs = 2 + data->map->dm_nsegs;
6702 
6703 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6704 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6705 	    (TB0_SIZE << 4));
6706 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6707 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6708 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6709 	      + hdrlen + pad - TB0_SIZE) << 4));
6710 
6711 	/* Other DMA segments are for data payload. */
6712 	seg = data->map->dm_segs;
6713 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6714 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6715 		desc->tbs[i+2].hi_n_len = \
6716 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6717 		    | ((seg->ds_len) << 4));
6718 	}
6719 
6720 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6721 	    BUS_DMASYNC_PREWRITE);
6722 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6723 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6724 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6725 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6726 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6727 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6728 
6729 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6730 
6731 	/* Kick TX ring. */
6732 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6733 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6734 
6735 	/* Mark TX ring as full if we reach a certain threshold. */
6736 	if (++ring->queued > IWM_TX_RING_HIMARK) {
6737 		sc->qfullmsk |= 1 << ring->qid;
6738 	}
6739 
6740 	return 0;
6741 }
6742 
6743 int
6744 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
6745 {
6746 	struct iwm_tx_path_flush_cmd flush_cmd = {
6747 		.sta_id = htole32(IWM_STATION_ID),
6748 		.tid_mask = htole16(0xffff),
6749 	};
6750 	int err;
6751 
6752 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
6753 	    sizeof(flush_cmd), &flush_cmd);
6754 	if (err)
6755                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
6756 	return err;
6757 }
6758 
6759 #define IWM_FLUSH_WAIT_MS	2000
6760 
6761 int
6762 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
6763 {
6764 	int i, err;
6765 
6766 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
6767 		struct iwm_tx_ring *ring = &sc->txq[i];
6768 
6769 		if (i == sc->cmdqid)
6770 			continue;
6771 
6772 		while (ring->queued > 0) {
6773 			err = tsleep_nsec(ring, 0, "iwmflush",
6774 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
6775 			if (err)
6776 				return err;
6777 		}
6778 	}
6779 
6780 	return 0;
6781 }
6782 
6783 void
6784 iwm_led_enable(struct iwm_softc *sc)
6785 {
6786 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
6787 }
6788 
6789 void
6790 iwm_led_disable(struct iwm_softc *sc)
6791 {
6792 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
6793 }
6794 
6795 int
6796 iwm_led_is_enabled(struct iwm_softc *sc)
6797 {
6798 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
6799 }
6800 
6801 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
6802 
6803 void
6804 iwm_led_blink_timeout(void *arg)
6805 {
6806 	struct iwm_softc *sc = arg;
6807 
6808 	if (iwm_led_is_enabled(sc))
6809 		iwm_led_disable(sc);
6810 	else
6811 		iwm_led_enable(sc);
6812 
6813 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
6814 }
6815 
6816 void
6817 iwm_led_blink_start(struct iwm_softc *sc)
6818 {
6819 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
6820 	iwm_led_enable(sc);
6821 }
6822 
6823 void
6824 iwm_led_blink_stop(struct iwm_softc *sc)
6825 {
6826 	timeout_del(&sc->sc_led_blink_to);
6827 	iwm_led_disable(sc);
6828 }
6829 
6830 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
6831 
6832 int
6833 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
6834     struct iwm_beacon_filter_cmd *cmd)
6835 {
6836 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
6837 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
6838 }
6839 
6840 void
6841 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
6842     struct iwm_beacon_filter_cmd *cmd)
6843 {
6844 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
6845 }
6846 
6847 int
6848 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
6849 {
6850 	struct iwm_beacon_filter_cmd cmd = {
6851 		IWM_BF_CMD_CONFIG_DEFAULTS,
6852 		.bf_enable_beacon_filter = htole32(1),
6853 		.ba_enable_beacon_abort = htole32(enable),
6854 	};
6855 
6856 	if (!sc->sc_bf.bf_enabled)
6857 		return 0;
6858 
6859 	sc->sc_bf.ba_enabled = enable;
6860 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6861 	return iwm_beacon_filter_send_cmd(sc, &cmd);
6862 }
6863 
6864 void
6865 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
6866     struct iwm_mac_power_cmd *cmd)
6867 {
6868 	struct ieee80211com *ic = &sc->sc_ic;
6869 	struct ieee80211_node *ni = &in->in_ni;
6870 	int dtim_period, dtim_msec, keep_alive;
6871 
6872 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6873 	    in->in_color));
6874 	if (ni->ni_dtimperiod)
6875 		dtim_period = ni->ni_dtimperiod;
6876 	else
6877 		dtim_period = 1;
6878 
6879 	/*
6880 	 * Regardless of power management state the driver must set
6881 	 * keep alive period. FW will use it for sending keep alive NDPs
6882 	 * immediately after association. Check that keep alive period
6883 	 * is at least 3 * DTIM.
6884 	 */
6885 	dtim_msec = dtim_period * ni->ni_intval;
6886 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
6887 	keep_alive = roundup(keep_alive, 1000) / 1000;
6888 	cmd->keep_alive_seconds = htole16(keep_alive);
6889 
6890 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6891 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6892 }
6893 
6894 int
6895 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
6896 {
6897 	int err;
6898 	int ba_enable;
6899 	struct iwm_mac_power_cmd cmd;
6900 
6901 	memset(&cmd, 0, sizeof(cmd));
6902 
6903 	iwm_power_build_cmd(sc, in, &cmd);
6904 
6905 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
6906 	    sizeof(cmd), &cmd);
6907 	if (err != 0)
6908 		return err;
6909 
6910 	ba_enable = !!(cmd.flags &
6911 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6912 	return iwm_update_beacon_abort(sc, in, ba_enable);
6913 }
6914 
6915 int
6916 iwm_power_update_device(struct iwm_softc *sc)
6917 {
6918 	struct iwm_device_power_cmd cmd = { };
6919 	struct ieee80211com *ic = &sc->sc_ic;
6920 
6921 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6922 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6923 
6924 	return iwm_send_cmd_pdu(sc,
6925 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6926 }
6927 
6928 int
6929 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
6930 {
6931 	struct iwm_beacon_filter_cmd cmd = {
6932 		IWM_BF_CMD_CONFIG_DEFAULTS,
6933 		.bf_enable_beacon_filter = htole32(1),
6934 	};
6935 	int err;
6936 
6937 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6938 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
6939 
6940 	if (err == 0)
6941 		sc->sc_bf.bf_enabled = 1;
6942 
6943 	return err;
6944 }
6945 
6946 int
6947 iwm_disable_beacon_filter(struct iwm_softc *sc)
6948 {
6949 	struct iwm_beacon_filter_cmd cmd;
6950 	int err;
6951 
6952 	memset(&cmd, 0, sizeof(cmd));
6953 
6954 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
6955 	if (err == 0)
6956 		sc->sc_bf.bf_enabled = 0;
6957 
6958 	return err;
6959 }
6960 
6961 int
6962 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
6963 {
6964 	struct iwm_add_sta_cmd add_sta_cmd;
6965 	int err;
6966 	uint32_t status;
6967 	size_t cmdsize;
6968 	struct ieee80211com *ic = &sc->sc_ic;
6969 
6970 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
6971 		panic("STA already added");
6972 
6973 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6974 
6975 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6976 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
6977 	else
6978 		add_sta_cmd.sta_id = IWM_STATION_ID;
6979 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
6980 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6981 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
6982 		else
6983 			add_sta_cmd.station_type = IWM_STA_LINK;
6984 	}
6985 	add_sta_cmd.mac_id_n_color
6986 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6987 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6988 		int qid;
6989 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
6990 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6991 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
6992 		else
6993 			qid = IWM_AUX_QUEUE;
6994 		in->tfd_queue_msk |= (1 << qid);
6995 	} else {
6996 		int ac;
6997 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6998 			int qid = ac;
6999 			if (isset(sc->sc_enabled_capa,
7000 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7001 				qid += IWM_DQA_MIN_MGMT_QUEUE;
7002 			in->tfd_queue_msk |= (1 << qid);
7003 		}
7004 	}
7005 	if (!update) {
7006 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7007 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7008 			    etherbroadcastaddr);
7009 		else
7010 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7011 			    in->in_macaddr);
7012 	}
7013 	add_sta_cmd.add_modify = update ? 1 : 0;
7014 	add_sta_cmd.station_flags_msk
7015 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
7016 	if (update) {
7017 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
7018 		    IWM_STA_MODIFY_TID_DISABLE_TX);
7019 	}
7020 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
7021 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
7022 
7023 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7024 		add_sta_cmd.station_flags_msk
7025 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
7026 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
7027 
7028 		if (iwm_mimo_enabled(sc)) {
7029 			if (in->in_ni.ni_rxmcs[1] != 0) {
7030 				add_sta_cmd.station_flags |=
7031 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7032 			}
7033 			if (in->in_ni.ni_rxmcs[2] != 0) {
7034 				add_sta_cmd.station_flags |=
7035 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
7036 			}
7037 		}
7038 
7039 		if (ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7040 			add_sta_cmd.station_flags |= htole32(
7041 			    IWM_STA_FLG_FAT_EN_40MHZ);
7042 		}
7043 
7044 		add_sta_cmd.station_flags
7045 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
7046 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
7047 		case IEEE80211_AMPDU_PARAM_SS_2:
7048 			add_sta_cmd.station_flags
7049 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
7050 			break;
7051 		case IEEE80211_AMPDU_PARAM_SS_4:
7052 			add_sta_cmd.station_flags
7053 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
7054 			break;
7055 		case IEEE80211_AMPDU_PARAM_SS_8:
7056 			add_sta_cmd.station_flags
7057 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
7058 			break;
7059 		case IEEE80211_AMPDU_PARAM_SS_16:
7060 			add_sta_cmd.station_flags
7061 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
7062 			break;
7063 		default:
7064 			break;
7065 		}
7066 	}
7067 
7068 	status = IWM_ADD_STA_SUCCESS;
7069 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7070 		cmdsize = sizeof(add_sta_cmd);
7071 	else
7072 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7073 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7074 	    &add_sta_cmd, &status);
7075 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7076 		err = EIO;
7077 
7078 	return err;
7079 }
7080 
7081 int
7082 iwm_add_aux_sta(struct iwm_softc *sc)
7083 {
7084 	struct iwm_add_sta_cmd cmd;
7085 	int err, qid;
7086 	uint32_t status;
7087 	size_t cmdsize;
7088 
7089 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7090 		qid = IWM_DQA_AUX_QUEUE;
7091 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7092 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
7093 	} else {
7094 		qid = IWM_AUX_QUEUE;
7095 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7096 	}
7097 	if (err)
7098 		return err;
7099 
7100 	memset(&cmd, 0, sizeof(cmd));
7101 	cmd.sta_id = IWM_AUX_STA_ID;
7102 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7103 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
7104 	cmd.mac_id_n_color =
7105 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
7106 	cmd.tfd_queue_msk = htole32(1 << qid);
7107 	cmd.tid_disable_tx = htole16(0xffff);
7108 
7109 	status = IWM_ADD_STA_SUCCESS;
7110 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7111 		cmdsize = sizeof(cmd);
7112 	else
7113 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7114 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7115 	    &status);
7116 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7117 		err = EIO;
7118 
7119 	return err;
7120 }
7121 
7122 int
7123 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7124 {
7125 	struct iwm_add_sta_cmd cmd;
7126 	int err;
7127 	uint32_t status;
7128 	size_t cmdsize;
7129 
7130 	memset(&cmd, 0, sizeof(cmd));
7131 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7132 	    in->in_color));
7133 	cmd.sta_id = IWM_STATION_ID;
7134 	cmd.add_modify = IWM_STA_MODE_MODIFY;
7135 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
7136 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
7137 
7138 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7139 		cmdsize = sizeof(cmd);
7140 	else
7141 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7142 
7143 	status = IWM_ADD_STA_SUCCESS;
7144 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7145 	    cmdsize, &cmd, &status);
7146 	if (err) {
7147 		printf("%s: could not update sta (error %d)\n",
7148 		    DEVNAME(sc), err);
7149 		return err;
7150 	}
7151 
7152 	switch (status & IWM_ADD_STA_STATUS_MASK) {
7153 	case IWM_ADD_STA_SUCCESS:
7154 		break;
7155 	default:
7156 		err = EIO;
7157 		printf("%s: Couldn't %s draining for station\n",
7158 		    DEVNAME(sc), drain ? "enable" : "disable");
7159 		break;
7160 	}
7161 
7162 	return err;
7163 }
7164 
7165 int
7166 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7167 {
7168 	int err;
7169 
7170 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
7171 
7172 	err = iwm_drain_sta(sc, in, 1);
7173 	if (err)
7174 		goto done;
7175 
7176 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7177 	if (err) {
7178 		printf("%s: could not flush Tx path (error %d)\n",
7179 		    DEVNAME(sc), err);
7180 		goto done;
7181 	}
7182 
7183 	/*
7184 	 * Flushing Tx rings may fail if the AP has disappeared.
7185 	 * We can rely on iwm_newstate_task() to reset everything and begin
7186 	 * scanning again if we are left with outstanding frames on queues.
7187 	 */
7188 	err = iwm_wait_tx_queues_empty(sc);
7189 	if (err)
7190 		goto done;
7191 
7192 	err = iwm_drain_sta(sc, in, 0);
7193 done:
7194 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7195 	return err;
7196 }
7197 
7198 int
7199 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7200 {
7201 	struct ieee80211com *ic = &sc->sc_ic;
7202 	struct iwm_rm_sta_cmd rm_sta_cmd;
7203 	int err;
7204 
7205 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7206 		panic("sta already removed");
7207 
7208 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7209 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7210 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7211 	else
7212 		rm_sta_cmd.sta_id = IWM_STATION_ID;
7213 
7214 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7215 	    &rm_sta_cmd);
7216 
7217 	return err;
7218 }
7219 
7220 uint16_t
7221 iwm_scan_rx_chain(struct iwm_softc *sc)
7222 {
7223 	uint16_t rx_chain;
7224 	uint8_t rx_ant;
7225 
7226 	rx_ant = iwm_fw_valid_rx_ant(sc);
7227 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
7228 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
7229 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
7230 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
7231 	return htole16(rx_chain);
7232 }
7233 
7234 uint32_t
7235 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7236 {
7237 	uint32_t tx_ant;
7238 	int i, ind;
7239 
7240 	for (i = 0, ind = sc->sc_scan_last_antenna;
7241 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
7242 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
7243 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7244 			sc->sc_scan_last_antenna = ind;
7245 			break;
7246 		}
7247 	}
7248 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7249 
7250 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
7251 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
7252 				   tx_ant);
7253 	else
7254 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
7255 }
7256 
7257 uint8_t
7258 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7259     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7260 {
7261 	struct ieee80211com *ic = &sc->sc_ic;
7262 	struct ieee80211_channel *c;
7263 	uint8_t nchan;
7264 
7265 	for (nchan = 0, c = &ic->ic_channels[1];
7266 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7267 	    nchan < sc->sc_capa_n_scan_channels;
7268 	    c++) {
7269 		if (c->ic_flags == 0)
7270 			continue;
7271 
7272 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
7273 		chan->iter_count = htole16(1);
7274 		chan->iter_interval = 0;
7275 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
7276 		/*
7277 		 * Firmware may become unresponsive when asked to send
7278 		 * a directed probe request on a passive channel.
7279 		 */
7280 		if (n_ssids != 0 && !bgscan &&
7281 		    (c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
7282 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
7283 		chan++;
7284 		nchan++;
7285 	}
7286 
7287 	return nchan;
7288 }
7289 
7290 uint8_t
7291 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7292     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7293 {
7294 	struct ieee80211com *ic = &sc->sc_ic;
7295 	struct ieee80211_channel *c;
7296 	uint8_t nchan;
7297 
7298 	for (nchan = 0, c = &ic->ic_channels[1];
7299 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7300 	    nchan < sc->sc_capa_n_scan_channels;
7301 	    c++) {
7302 		if (c->ic_flags == 0)
7303 			continue;
7304 
7305 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7306 		chan->iter_count = 1;
7307 		chan->iter_interval = htole16(0);
7308 		/*
7309 		 * Firmware may become unresponsive when asked to send
7310 		 * a directed probe request on a passive channel.
7311 		 */
7312 		if (n_ssids != 0 && !bgscan &&
7313 		    (c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
7314 			chan->flags = htole32(1 << 0); /* select SSID 0 */
7315 		chan++;
7316 		nchan++;
7317 	}
7318 
7319 	return nchan;
7320 }
7321 
7322 int
7323 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7324 {
7325 	struct iwm_scan_probe_req preq2;
7326 	int err, i;
7327 
7328 	err = iwm_fill_probe_req(sc, &preq2);
7329 	if (err)
7330 		return err;
7331 
7332 	preq1->mac_header = preq2.mac_header;
7333 	for (i = 0; i < nitems(preq1->band_data); i++)
7334 		preq1->band_data[i] = preq2.band_data[i];
7335 	preq1->common_data = preq2.common_data;
7336 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7337 	return 0;
7338 }
7339 
7340 int
7341 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7342 {
7343 	struct ieee80211com *ic = &sc->sc_ic;
7344 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7345 	struct ieee80211_rateset *rs;
7346 	size_t remain = sizeof(preq->buf);
7347 	uint8_t *frm, *pos;
7348 
7349 	memset(preq, 0, sizeof(*preq));
7350 
7351 	if (remain < sizeof(*wh) + 2)
7352 		return ENOBUFS;
7353 
7354 	/*
7355 	 * Build a probe request frame.  Most of the following code is a
7356 	 * copy & paste of what is done in net80211.
7357 	 */
7358 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7359 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7360 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7361 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7362 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7363 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7364 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7365 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7366 
7367 	frm = (uint8_t *)(wh + 1);
7368 
7369 	*frm++ = IEEE80211_ELEMID_SSID;
7370 	*frm++ = 0;
7371 	/* hardware inserts SSID */
7372 
7373 	/* Tell firmware where the MAC header and SSID IE are. */
7374 	preq->mac_header.offset = 0;
7375 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7376 	remain -= frm - (uint8_t *)wh;
7377 
7378 	/* Fill in 2GHz IEs and tell firmware where they are. */
7379 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7380 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7381 		if (remain < 4 + rs->rs_nrates)
7382 			return ENOBUFS;
7383 	} else if (remain < 2 + rs->rs_nrates)
7384 		return ENOBUFS;
7385 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7386 	pos = frm;
7387 	frm = ieee80211_add_rates(frm, rs);
7388 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7389 		frm = ieee80211_add_xrates(frm, rs);
7390 	remain -= frm - pos;
7391 
7392 	if (isset(sc->sc_enabled_capa,
7393 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7394 		if (remain < 3)
7395 			return ENOBUFS;
7396 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7397 		*frm++ = 1;
7398 		*frm++ = 0;
7399 		remain -= 3;
7400 	}
7401 	preq->band_data[0].len = htole16(frm - pos);
7402 
7403 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7404 		/* Fill in 5GHz IEs. */
7405 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7406 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7407 			if (remain < 4 + rs->rs_nrates)
7408 				return ENOBUFS;
7409 		} else if (remain < 2 + rs->rs_nrates)
7410 			return ENOBUFS;
7411 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7412 		pos = frm;
7413 		frm = ieee80211_add_rates(frm, rs);
7414 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7415 			frm = ieee80211_add_xrates(frm, rs);
7416 		preq->band_data[1].len = htole16(frm - pos);
7417 		remain -= frm - pos;
7418 	}
7419 
7420 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7421 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7422 	pos = frm;
7423 	if (ic->ic_flags & IEEE80211_F_HTON) {
7424 		if (remain < 28)
7425 			return ENOBUFS;
7426 		frm = ieee80211_add_htcaps(frm, ic);
7427 		/* XXX add WME info? */
7428 	}
7429 	preq->common_data.len = htole16(frm - pos);
7430 
7431 	return 0;
7432 }
7433 
7434 int
7435 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7436 {
7437 	struct ieee80211com *ic = &sc->sc_ic;
7438 	struct iwm_host_cmd hcmd = {
7439 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7440 		.len = { 0, },
7441 		.data = { NULL, },
7442 		.flags = 0,
7443 	};
7444 	struct iwm_scan_req_lmac *req;
7445 	struct iwm_scan_probe_req_v1 *preq;
7446 	size_t req_len;
7447 	int err, async = bgscan;
7448 
7449 	req_len = sizeof(struct iwm_scan_req_lmac) +
7450 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7451 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7452 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7453 		return ENOMEM;
7454 	req = malloc(req_len, M_DEVBUF,
7455 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7456 	if (req == NULL)
7457 		return ENOMEM;
7458 
7459 	hcmd.len[0] = (uint16_t)req_len;
7460 	hcmd.data[0] = (void *)req;
7461 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7462 
7463 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7464 	req->active_dwell = 10;
7465 	req->passive_dwell = 110;
7466 	req->fragmented_dwell = 44;
7467 	req->extended_dwell = 90;
7468 	if (bgscan) {
7469 		req->max_out_time = htole32(120);
7470 		req->suspend_time = htole32(120);
7471 	} else {
7472 		req->max_out_time = htole32(0);
7473 		req->suspend_time = htole32(0);
7474 	}
7475 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7476 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7477 	req->iter_num = htole32(1);
7478 	req->delay = 0;
7479 
7480 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7481 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7482 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7483 	if (ic->ic_des_esslen == 0)
7484 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7485 	else
7486 		req->scan_flags |=
7487 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7488 	if (isset(sc->sc_enabled_capa,
7489 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7490 	    isset(sc->sc_enabled_capa,
7491 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7492 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7493 
7494 	req->flags = htole32(IWM_PHY_BAND_24);
7495 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7496 		req->flags |= htole32(IWM_PHY_BAND_5);
7497 	req->filter_flags =
7498 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7499 
7500 	/* Tx flags 2 GHz. */
7501 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7502 	    IWM_TX_CMD_FLG_BT_DIS);
7503 	req->tx_cmd[0].rate_n_flags =
7504 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7505 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7506 
7507 	/* Tx flags 5 GHz. */
7508 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7509 	    IWM_TX_CMD_FLG_BT_DIS);
7510 	req->tx_cmd[1].rate_n_flags =
7511 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7512 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7513 
7514 	/* Check if we're doing an active directed scan. */
7515 	if (ic->ic_des_esslen != 0) {
7516 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7517 		req->direct_scan[0].len = ic->ic_des_esslen;
7518 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7519 		    ic->ic_des_esslen);
7520 	}
7521 
7522 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7523 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7524 	    ic->ic_des_esslen != 0, bgscan);
7525 
7526 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7527 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7528 	    sc->sc_capa_n_scan_channels));
7529 	err = iwm_fill_probe_req_v1(sc, preq);
7530 	if (err) {
7531 		free(req, M_DEVBUF, req_len);
7532 		return err;
7533 	}
7534 
7535 	/* Specify the scan plan: We'll do one iteration. */
7536 	req->schedule[0].iterations = 1;
7537 	req->schedule[0].full_scan_mul = 1;
7538 
7539 	/* Disable EBS. */
7540 	req->channel_opt[0].non_ebs_ratio = 1;
7541 	req->channel_opt[1].non_ebs_ratio = 1;
7542 
7543 	err = iwm_send_cmd(sc, &hcmd);
7544 	free(req, M_DEVBUF, req_len);
7545 	return err;
7546 }
7547 
7548 int
7549 iwm_config_umac_scan(struct iwm_softc *sc)
7550 {
7551 	struct ieee80211com *ic = &sc->sc_ic;
7552 	struct iwm_scan_config *scan_config;
7553 	int err, nchan;
7554 	size_t cmd_size;
7555 	struct ieee80211_channel *c;
7556 	struct iwm_host_cmd hcmd = {
7557 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7558 		.flags = 0,
7559 	};
7560 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7561 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7562 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7563 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7564 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7565 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7566 	    IWM_SCAN_CONFIG_RATE_54M);
7567 
7568 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7569 
7570 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7571 	if (scan_config == NULL)
7572 		return ENOMEM;
7573 
7574 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7575 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7576 	scan_config->legacy_rates = htole32(rates |
7577 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7578 
7579 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7580 	scan_config->dwell_active = 10;
7581 	scan_config->dwell_passive = 110;
7582 	scan_config->dwell_fragmented = 44;
7583 	scan_config->dwell_extended = 90;
7584 	scan_config->out_of_channel_time = htole32(0);
7585 	scan_config->suspend_time = htole32(0);
7586 
7587 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7588 
7589 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7590 	scan_config->channel_flags = 0;
7591 
7592 	for (c = &ic->ic_channels[1], nchan = 0;
7593 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7594 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7595 		if (c->ic_flags == 0)
7596 			continue;
7597 		scan_config->channel_array[nchan++] =
7598 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7599 	}
7600 
7601 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7602 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7603 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7604 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7605 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7606 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7607 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7608 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7609 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7610 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7611 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7612 
7613 	hcmd.data[0] = scan_config;
7614 	hcmd.len[0] = cmd_size;
7615 
7616 	err = iwm_send_cmd(sc, &hcmd);
7617 	free(scan_config, M_DEVBUF, cmd_size);
7618 	return err;
7619 }
7620 
7621 int
7622 iwm_umac_scan_size(struct iwm_softc *sc)
7623 {
7624 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7625 	int tail_size;
7626 
7627 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7628 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7629 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7630 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7631 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7632 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7633 	else
7634 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7635 
7636 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7637 	    sc->sc_capa_n_scan_channels + tail_size;
7638 }
7639 
7640 struct iwm_scan_umac_chan_param *
7641 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7642     struct iwm_scan_req_umac *req)
7643 {
7644 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7645 		return &req->v8.channel;
7646 
7647 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7648 		return &req->v7.channel;
7649 
7650 	return &req->v1.channel;
7651 }
7652 
7653 void *
7654 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7655 {
7656 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7657 		return (void *)&req->v8.data;
7658 
7659 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7660 		return (void *)&req->v7.data;
7661 
7662 	return (void *)&req->v1.data;
7663 
7664 }
7665 
7666 /* adaptive dwell max budget time [TU] for full scan */
7667 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7668 /* adaptive dwell max budget time [TU] for directed scan */
7669 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7670 /* adaptive dwell default high band APs number */
7671 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7672 /* adaptive dwell default low band APs number */
7673 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7674 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7675 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7676 
7677 int
7678 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7679 {
7680 	struct ieee80211com *ic = &sc->sc_ic;
7681 	struct iwm_host_cmd hcmd = {
7682 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7683 		.len = { 0, },
7684 		.data = { NULL, },
7685 		.flags = 0,
7686 	};
7687 	struct iwm_scan_req_umac *req;
7688 	void *cmd_data, *tail_data;
7689 	struct iwm_scan_req_umac_tail_v2 *tail;
7690 	struct iwm_scan_req_umac_tail_v1 *tailv1;
7691 	struct iwm_scan_umac_chan_param *chanparam;
7692 	size_t req_len;
7693 	int err, async = bgscan;
7694 
7695 	req_len = iwm_umac_scan_size(sc);
7696 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
7697 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7698 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7699 		return ERANGE;
7700 	req = malloc(req_len, M_DEVBUF,
7701 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7702 	if (req == NULL)
7703 		return ENOMEM;
7704 
7705 	hcmd.len[0] = (uint16_t)req_len;
7706 	hcmd.data[0] = (void *)req;
7707 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7708 
7709 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7710 		req->v7.adwell_default_n_aps_social =
7711 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7712 		req->v7.adwell_default_n_aps =
7713 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
7714 
7715 		if (ic->ic_des_esslen != 0)
7716 			req->v7.adwell_max_budget =
7717 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7718 		else
7719 			req->v7.adwell_max_budget =
7720 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7721 
7722 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7723 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
7724 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
7725 
7726 		if (isset(sc->sc_ucode_api,
7727 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
7728 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
7729 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
7730 		} else {
7731 			req->v7.active_dwell = 10;
7732 			req->v7.passive_dwell = 110;
7733 			req->v7.fragmented_dwell = 44;
7734 		}
7735 	} else {
7736 		/* These timings correspond to iwlwifi's UNASSOC scan. */
7737 		req->v1.active_dwell = 10;
7738 		req->v1.passive_dwell = 110;
7739 		req->v1.fragmented_dwell = 44;
7740 		req->v1.extended_dwell = 90;
7741 
7742 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7743 	}
7744 
7745 	if (bgscan) {
7746 		const uint32_t timeout = htole32(120);
7747 		if (isset(sc->sc_ucode_api,
7748 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
7749 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7750 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7751 		} else if (isset(sc->sc_ucode_api,
7752 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7753 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7754 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
7755 		} else {
7756 			req->v1.max_out_time = timeout;
7757 			req->v1.suspend_time = timeout;
7758 		}
7759 	}
7760 
7761 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7762 
7763 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
7764 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
7765 	chanparam->count = iwm_umac_scan_fill_channels(sc,
7766 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
7767 	    ic->ic_des_esslen != 0, bgscan);
7768 	chanparam->flags = 0;
7769 
7770 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
7771 	    sc->sc_capa_n_scan_channels;
7772 	tail = tail_data;
7773 	/* tail v1 layout differs in preq and direct_scan member fields. */
7774 	tailv1 = tail_data;
7775 
7776 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
7777 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
7778 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
7779 		req->v8.general_flags2 =
7780 			IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
7781 	}
7782 
7783 	/* Check if we're doing an active directed scan. */
7784 	if (ic->ic_des_esslen != 0) {
7785 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
7786 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7787 			tail->direct_scan[0].len = ic->ic_des_esslen;
7788 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
7789 			    ic->ic_des_esslen);
7790 		} else {
7791 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7792 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
7793 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
7794 			    ic->ic_des_esslen);
7795 		}
7796 		req->general_flags |=
7797 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
7798 	} else
7799 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
7800 
7801 	if (isset(sc->sc_enabled_capa,
7802 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7803 	    isset(sc->sc_enabled_capa,
7804 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7805 		req->general_flags |=
7806 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
7807 
7808 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7809 		req->general_flags |=
7810 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
7811 	} else {
7812 		req->general_flags |=
7813 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
7814 	}
7815 
7816 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7817 		err = iwm_fill_probe_req(sc, &tail->preq);
7818 	else
7819 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
7820 	if (err) {
7821 		free(req, M_DEVBUF, req_len);
7822 		return err;
7823 	}
7824 
7825 	/* Specify the scan plan: We'll do one iteration. */
7826 	tail->schedule[0].interval = 0;
7827 	tail->schedule[0].iter_count = 1;
7828 
7829 	err = iwm_send_cmd(sc, &hcmd);
7830 	free(req, M_DEVBUF, req_len);
7831 	return err;
7832 }
7833 
7834 void
7835 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
7836 {
7837 	struct ieee80211com *ic = &sc->sc_ic;
7838 	struct ifnet *ifp = IC2IFP(ic);
7839 	char alpha2[3];
7840 
7841 	snprintf(alpha2, sizeof(alpha2), "%c%c",
7842 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
7843 
7844 	if (ifp->if_flags & IFF_DEBUG) {
7845 		printf("%s: firmware has detected regulatory domain '%s' "
7846 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
7847 	}
7848 
7849 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
7850 }
7851 
7852 uint8_t
7853 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7854 {
7855 	int i;
7856 	uint8_t rval;
7857 
7858 	for (i = 0; i < rs->rs_nrates; i++) {
7859 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
7860 		if (rval == iwm_rates[ridx].rate)
7861 			return rs->rs_rates[i];
7862 	}
7863 
7864 	return 0;
7865 }
7866 
7867 int
7868 iwm_rval2ridx(int rval)
7869 {
7870 	int ridx;
7871 
7872 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
7873 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
7874 			continue;
7875 		if (rval == iwm_rates[ridx].rate)
7876 			break;
7877 	}
7878 
7879        return ridx;
7880 }
7881 
7882 void
7883 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
7884     int *ofdm_rates)
7885 {
7886 	struct ieee80211_node *ni = &in->in_ni;
7887 	struct ieee80211_rateset *rs = &ni->ni_rates;
7888 	int lowest_present_ofdm = -1;
7889 	int lowest_present_cck = -1;
7890 	uint8_t cck = 0;
7891 	uint8_t ofdm = 0;
7892 	int i;
7893 
7894 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7895 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7896 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
7897 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7898 				continue;
7899 			cck |= (1 << i);
7900 			if (lowest_present_cck == -1 || lowest_present_cck > i)
7901 				lowest_present_cck = i;
7902 		}
7903 	}
7904 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
7905 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7906 			continue;
7907 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
7908 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7909 			lowest_present_ofdm = i;
7910 	}
7911 
7912 	/*
7913 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7914 	 * variables. This isn't sufficient though, as there might not
7915 	 * be all the right rates in the bitmap. E.g. if the only basic
7916 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7917 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7918 	 *
7919 	 *    [...] a STA responding to a received frame shall transmit
7920 	 *    its Control Response frame [...] at the highest rate in the
7921 	 *    BSSBasicRateSet parameter that is less than or equal to the
7922 	 *    rate of the immediately previous frame in the frame exchange
7923 	 *    sequence ([...]) and that is of the same modulation class
7924 	 *    ([...]) as the received frame. If no rate contained in the
7925 	 *    BSSBasicRateSet parameter meets these conditions, then the
7926 	 *    control frame sent in response to a received frame shall be
7927 	 *    transmitted at the highest mandatory rate of the PHY that is
7928 	 *    less than or equal to the rate of the received frame, and
7929 	 *    that is of the same modulation class as the received frame.
7930 	 *
7931 	 * As a consequence, we need to add all mandatory rates that are
7932 	 * lower than all of the basic rates to these bitmaps.
7933 	 */
7934 
7935 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
7936 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
7937 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
7938 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
7939 	/* 6M already there or needed so always add */
7940 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
7941 
7942 	/*
7943 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7944 	 * Note, however:
7945 	 *  - if no CCK rates are basic, it must be ERP since there must
7946 	 *    be some basic rates at all, so they're OFDM => ERP PHY
7947 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7948 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7949 	 *  - if 5.5M is basic, 1M and 2M are mandatory
7950 	 *  - if 2M is basic, 1M is mandatory
7951 	 *  - if 1M is basic, that's the only valid ACK rate.
7952 	 * As a consequence, it's not as complicated as it sounds, just add
7953 	 * any lower rates to the ACK rate bitmap.
7954 	 */
7955 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
7956 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
7957 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
7958 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
7959 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
7960 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
7961 	/* 1M already there or needed so always add */
7962 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
7963 
7964 	*cck_rates = cck;
7965 	*ofdm_rates = ofdm;
7966 }
7967 
7968 void
7969 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
7970     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
7971 {
7972 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7973 	struct ieee80211com *ic = &sc->sc_ic;
7974 	struct ieee80211_node *ni = ic->ic_bss;
7975 	int cck_ack_rates, ofdm_ack_rates;
7976 	int i;
7977 
7978 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7979 	    in->in_color));
7980 	cmd->action = htole32(action);
7981 
7982 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7983 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
7984 	else if (ic->ic_opmode == IEEE80211_M_STA)
7985 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
7986 	else
7987 		panic("unsupported operating mode %d", ic->ic_opmode);
7988 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
7989 
7990 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7991 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7992 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7993 		return;
7994 	}
7995 
7996 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
7997 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7998 	cmd->cck_rates = htole32(cck_ack_rates);
7999 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
8000 
8001 	cmd->cck_short_preamble
8002 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
8003 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
8004 	cmd->short_slot
8005 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
8006 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
8007 
8008 	for (i = 0; i < EDCA_NUM_AC; i++) {
8009 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8010 		int txf = iwm_ac_to_tx_fifo[i];
8011 
8012 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
8013 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
8014 		cmd->ac[txf].aifsn = ac->ac_aifsn;
8015 		cmd->ac[txf].fifos_mask = (1 << txf);
8016 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
8017 	}
8018 	if (ni->ni_flags & IEEE80211_NODE_QOS)
8019 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
8020 
8021 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8022 		enum ieee80211_htprot htprot =
8023 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
8024 		switch (htprot) {
8025 		case IEEE80211_HTPROT_NONE:
8026 			break;
8027 		case IEEE80211_HTPROT_NONMEMBER:
8028 		case IEEE80211_HTPROT_NONHT_MIXED:
8029 			cmd->protection_flags |=
8030 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8031 			    IWM_MAC_PROT_FLG_FAT_PROT);
8032 			break;
8033 		case IEEE80211_HTPROT_20MHZ:
8034 			if (in->in_phyctxt &&
8035 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8036 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
8037 				cmd->protection_flags |=
8038 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8039 				    IWM_MAC_PROT_FLG_FAT_PROT);
8040 			}
8041 			break;
8042 		default:
8043 			break;
8044 		}
8045 
8046 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
8047 	}
8048 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8049 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
8050 
8051 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
8052 #undef IWM_EXP2
8053 }
8054 
8055 void
8056 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8057     struct iwm_mac_data_sta *sta, int assoc)
8058 {
8059 	struct ieee80211_node *ni = &in->in_ni;
8060 	uint32_t dtim_off;
8061 	uint64_t tsf;
8062 
8063 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
8064 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
8065 	tsf = letoh64(tsf);
8066 
8067 	sta->is_assoc = htole32(assoc);
8068 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
8069 	sta->dtim_tsf = htole64(tsf + dtim_off);
8070 	sta->bi = htole32(ni->ni_intval);
8071 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
8072 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
8073 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
8074 	sta->listen_interval = htole32(10);
8075 	sta->assoc_id = htole32(ni->ni_associd);
8076 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
8077 }
8078 
8079 int
8080 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8081     int assoc)
8082 {
8083 	struct ieee80211com *ic = &sc->sc_ic;
8084 	struct ieee80211_node *ni = &in->in_ni;
8085 	struct iwm_mac_ctx_cmd cmd;
8086 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8087 
8088 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
8089 		panic("MAC already added");
8090 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
8091 		panic("MAC already removed");
8092 
8093 	memset(&cmd, 0, sizeof(cmd));
8094 
8095 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8096 
8097 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8098 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
8099 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
8100 		    IWM_MAC_FILTER_ACCEPT_GRP |
8101 		    IWM_MAC_FILTER_IN_BEACON |
8102 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
8103 		    IWM_MAC_FILTER_IN_CRC32);
8104 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8105 		/*
8106 		 * Allow beacons to pass through as long as we are not
8107 		 * associated or we do not have dtim period information.
8108 		 */
8109 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
8110 	else
8111 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8112 
8113 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8114 }
8115 
8116 int
8117 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8118 {
8119 	struct iwm_time_quota_cmd_v1 cmd;
8120 	int i, idx, num_active_macs, quota, quota_rem;
8121 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
8122 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
8123 	uint16_t id;
8124 
8125 	memset(&cmd, 0, sizeof(cmd));
8126 
8127 	/* currently, PHY ID == binding ID */
8128 	if (in && in->in_phyctxt) {
8129 		id = in->in_phyctxt->id;
8130 		KASSERT(id < IWM_MAX_BINDINGS);
8131 		colors[id] = in->in_phyctxt->color;
8132 		if (running)
8133 			n_ifs[id] = 1;
8134 	}
8135 
8136 	/*
8137 	 * The FW's scheduling session consists of
8138 	 * IWM_MAX_QUOTA fragments. Divide these fragments
8139 	 * equally between all the bindings that require quota
8140 	 */
8141 	num_active_macs = 0;
8142 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8143 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
8144 		num_active_macs += n_ifs[i];
8145 	}
8146 
8147 	quota = 0;
8148 	quota_rem = 0;
8149 	if (num_active_macs) {
8150 		quota = IWM_MAX_QUOTA / num_active_macs;
8151 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
8152 	}
8153 
8154 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8155 		if (colors[i] < 0)
8156 			continue;
8157 
8158 		cmd.quotas[idx].id_and_color =
8159 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
8160 
8161 		if (n_ifs[i] <= 0) {
8162 			cmd.quotas[idx].quota = htole32(0);
8163 			cmd.quotas[idx].max_duration = htole32(0);
8164 		} else {
8165 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8166 			cmd.quotas[idx].max_duration = htole32(0);
8167 		}
8168 		idx++;
8169 	}
8170 
8171 	/* Give the remainder of the session to the first binding */
8172 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
8173 
8174 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8175 		struct iwm_time_quota_cmd cmd_v2;
8176 
8177 		memset(&cmd_v2, 0, sizeof(cmd_v2));
8178 		for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8179 			cmd_v2.quotas[i].id_and_color =
8180 			    cmd.quotas[i].id_and_color;
8181 			cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8182 			cmd_v2.quotas[i].max_duration =
8183 			    cmd.quotas[i].max_duration;
8184 		}
8185 		return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8186 		    sizeof(cmd_v2), &cmd_v2);
8187 	}
8188 
8189 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8190 }
8191 
8192 void
8193 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8194 {
8195 	int s = splnet();
8196 
8197 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8198 		splx(s);
8199 		return;
8200 	}
8201 
8202 	refcnt_take(&sc->task_refs);
8203 	if (!task_add(taskq, task))
8204 		refcnt_rele_wake(&sc->task_refs);
8205 	splx(s);
8206 }
8207 
8208 void
8209 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8210 {
8211 	if (task_del(taskq, task))
8212 		refcnt_rele(&sc->task_refs);
8213 }
8214 
8215 int
8216 iwm_scan(struct iwm_softc *sc)
8217 {
8218 	struct ieee80211com *ic = &sc->sc_ic;
8219 	struct ifnet *ifp = IC2IFP(ic);
8220 	int err;
8221 
8222 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8223 		err = iwm_scan_abort(sc);
8224 		if (err) {
8225 			printf("%s: could not abort background scan\n",
8226 			    DEVNAME(sc));
8227 			return err;
8228 		}
8229 	}
8230 
8231 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8232 		err = iwm_umac_scan(sc, 0);
8233 	else
8234 		err = iwm_lmac_scan(sc, 0);
8235 	if (err) {
8236 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8237 		return err;
8238 	}
8239 
8240 	/*
8241 	 * The current mode might have been fixed during association.
8242 	 * Ensure all channels get scanned.
8243 	 */
8244 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
8245 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8246 
8247 	sc->sc_flags |= IWM_FLAG_SCANNING;
8248 	if (ifp->if_flags & IFF_DEBUG)
8249 		printf("%s: %s -> %s\n", ifp->if_xname,
8250 		    ieee80211_state_name[ic->ic_state],
8251 		    ieee80211_state_name[IEEE80211_S_SCAN]);
8252 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8253 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
8254 		ieee80211_node_cleanup(ic, ic->ic_bss);
8255 	}
8256 	ic->ic_state = IEEE80211_S_SCAN;
8257 	iwm_led_blink_start(sc);
8258 	wakeup(&ic->ic_state); /* wake iwm_init() */
8259 
8260 	return 0;
8261 }
8262 
8263 int
8264 iwm_bgscan(struct ieee80211com *ic)
8265 {
8266 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8267 	int err;
8268 
8269 	if (sc->sc_flags & IWM_FLAG_SCANNING)
8270 		return 0;
8271 
8272 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8273 		err = iwm_umac_scan(sc, 1);
8274 	else
8275 		err = iwm_lmac_scan(sc, 1);
8276 	if (err) {
8277 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8278 		return err;
8279 	}
8280 
8281 	sc->sc_flags |= IWM_FLAG_BGSCAN;
8282 	return 0;
8283 }
8284 
8285 int
8286 iwm_umac_scan_abort(struct iwm_softc *sc)
8287 {
8288 	struct iwm_umac_scan_abort cmd = { 0 };
8289 
8290 	return iwm_send_cmd_pdu(sc,
8291 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
8292 	    0, sizeof(cmd), &cmd);
8293 }
8294 
8295 int
8296 iwm_lmac_scan_abort(struct iwm_softc *sc)
8297 {
8298 	struct iwm_host_cmd cmd = {
8299 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
8300 	};
8301 	int err, status;
8302 
8303 	err = iwm_send_cmd_status(sc, &cmd, &status);
8304 	if (err)
8305 		return err;
8306 
8307 	if (status != IWM_CAN_ABORT_STATUS) {
8308 		/*
8309 		 * The scan abort will return 1 for success or
8310 		 * 2 for "failure".  A failure condition can be
8311 		 * due to simply not being in an active scan which
8312 		 * can occur if we send the scan abort before the
8313 		 * microcode has notified us that a scan is completed.
8314 		 */
8315 		return EBUSY;
8316 	}
8317 
8318 	return 0;
8319 }
8320 
8321 int
8322 iwm_scan_abort(struct iwm_softc *sc)
8323 {
8324 	int err;
8325 
8326 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8327 		err = iwm_umac_scan_abort(sc);
8328 	else
8329 		err = iwm_lmac_scan_abort(sc);
8330 
8331 	if (err == 0)
8332 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8333 	return err;
8334 }
8335 
8336 int
8337 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8338     struct ieee80211_channel *chan, uint8_t chains_static,
8339     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco)
8340 {
8341 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8342 	int err;
8343 
8344 	if (isset(sc->sc_enabled_capa,
8345 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8346 	    (phyctxt->channel->ic_flags & band_flags) !=
8347 	    (chan->ic_flags & band_flags)) {
8348 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8349 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time, sco);
8350 		if (err) {
8351 			printf("%s: could not remove PHY context "
8352 			    "(error %d)\n", DEVNAME(sc), err);
8353 			return err;
8354 		}
8355 		phyctxt->channel = chan;
8356 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8357 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time, sco);
8358 		if (err) {
8359 			printf("%s: could not remove PHY context "
8360 			    "(error %d)\n", DEVNAME(sc), err);
8361 			return err;
8362 		}
8363 	} else {
8364 		phyctxt->channel = chan;
8365 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8366 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time, sco);
8367 		if (err) {
8368 			printf("%s: could not update PHY context (error %d)\n",
8369 			    DEVNAME(sc), err);
8370 			return err;
8371 		}
8372 	}
8373 
8374 	phyctxt->sco = sco;
8375 	return 0;
8376 }
8377 
8378 int
8379 iwm_auth(struct iwm_softc *sc)
8380 {
8381 	struct ieee80211com *ic = &sc->sc_ic;
8382 	struct iwm_node *in = (void *)ic->ic_bss;
8383 	uint32_t duration;
8384 	int generation = sc->sc_generation, err;
8385 
8386 	splassert(IPL_NET);
8387 
8388 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8389 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8390 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
8391 		if (err)
8392 			return err;
8393 	} else {
8394 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8395 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
8396 		if (err)
8397 			return err;
8398 	}
8399 	in->in_phyctxt = &sc->sc_phyctxt[0];
8400 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8401 	iwm_setrates(in, 0);
8402 
8403 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8404 	if (err) {
8405 		printf("%s: could not add MAC context (error %d)\n",
8406 		    DEVNAME(sc), err);
8407 		return err;
8408  	}
8409 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8410 
8411 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8412 	if (err) {
8413 		printf("%s: could not add binding (error %d)\n",
8414 		    DEVNAME(sc), err);
8415 		goto rm_mac_ctxt;
8416 	}
8417 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8418 
8419 	in->tid_disable_ampdu = 0xffff;
8420 	err = iwm_add_sta_cmd(sc, in, 0);
8421 	if (err) {
8422 		printf("%s: could not add sta (error %d)\n",
8423 		    DEVNAME(sc), err);
8424 		goto rm_binding;
8425 	}
8426 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8427 
8428 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8429 		return 0;
8430 
8431 	/*
8432 	 * Prevent the FW from wandering off channel during association
8433 	 * by "protecting" the session with a time event.
8434 	 */
8435 	if (in->in_ni.ni_intval)
8436 		duration = in->in_ni.ni_intval * 2;
8437 	else
8438 		duration = IEEE80211_DUR_TU;
8439 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8440 
8441 	return 0;
8442 
8443 rm_binding:
8444 	if (generation == sc->sc_generation) {
8445 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8446 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8447 	}
8448 rm_mac_ctxt:
8449 	if (generation == sc->sc_generation) {
8450 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8451 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8452 	}
8453 	return err;
8454 }
8455 
8456 int
8457 iwm_deauth(struct iwm_softc *sc)
8458 {
8459 	struct ieee80211com *ic = &sc->sc_ic;
8460 	struct iwm_node *in = (void *)ic->ic_bss;
8461 	int err;
8462 
8463 	splassert(IPL_NET);
8464 
8465 	iwm_unprotect_session(sc, in);
8466 
8467 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8468 		err = iwm_flush_sta(sc, in);
8469 		if (err)
8470 			return err;
8471 		err = iwm_rm_sta_cmd(sc, in);
8472 		if (err) {
8473 			printf("%s: could not remove STA (error %d)\n",
8474 			    DEVNAME(sc), err);
8475 			return err;
8476 		}
8477 		in->tid_disable_ampdu = 0xffff;
8478 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8479 		sc->sc_rx_ba_sessions = 0;
8480 		sc->ba_rx.start_tidmask = 0;
8481 		sc->ba_rx.stop_tidmask = 0;
8482 		sc->tx_ba_queue_mask = 0;
8483 		sc->ba_tx.start_tidmask = 0;
8484 		sc->ba_tx.stop_tidmask = 0;
8485 	}
8486 
8487 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8488 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8489 		if (err) {
8490 			printf("%s: could not remove binding (error %d)\n",
8491 			    DEVNAME(sc), err);
8492 			return err;
8493 		}
8494 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8495 	}
8496 
8497 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8498 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8499 		if (err) {
8500 			printf("%s: could not remove MAC context (error %d)\n",
8501 			    DEVNAME(sc), err);
8502 			return err;
8503 		}
8504 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8505 	}
8506 
8507 	/* Move unused PHY context to a default channel. */
8508 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8509 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
8510 	if (err)
8511 		return err;
8512 
8513 	return 0;
8514 }
8515 
8516 int
8517 iwm_run(struct iwm_softc *sc)
8518 {
8519 	struct ieee80211com *ic = &sc->sc_ic;
8520 	struct iwm_node *in = (void *)ic->ic_bss;
8521 	struct ieee80211_node *ni = &in->in_ni;
8522 	int err;
8523 
8524 	splassert(IPL_NET);
8525 
8526 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8527 		/* Add a MAC context and a sniffing STA. */
8528 		err = iwm_auth(sc);
8529 		if (err)
8530 			return err;
8531 	}
8532 
8533 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8534 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8535 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8536 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8537 		    in->in_phyctxt->channel, chains, chains,
8538 		    0, IEEE80211_HTOP0_SCO_SCN);
8539 		if (err) {
8540 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8541 			return err;
8542 		}
8543 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8544 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8545 		uint8_t sco;
8546 		if (ieee80211_node_supports_ht_chan40(ni))
8547 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8548 		else
8549 			sco = IEEE80211_HTOP0_SCO_SCN;
8550 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8551 		    in->in_phyctxt->channel, chains, chains,
8552 		    0, sco);
8553 		if (err) {
8554 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8555 			return err;
8556 		}
8557 	}
8558 
8559 	/* Update STA again, for HT-related settings such as MIMO. */
8560 	err = iwm_add_sta_cmd(sc, in, 1);
8561 	if (err) {
8562 		printf("%s: could not update STA (error %d)\n",
8563 		    DEVNAME(sc), err);
8564 		return err;
8565 	}
8566 
8567 	/* We have now been assigned an associd by the AP. */
8568 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8569 	if (err) {
8570 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8571 		return err;
8572 	}
8573 
8574 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8575 	if (err) {
8576 		printf("%s: could not set sf full on (error %d)\n",
8577 		    DEVNAME(sc), err);
8578 		return err;
8579 	}
8580 
8581 	err = iwm_allow_mcast(sc);
8582 	if (err) {
8583 		printf("%s: could not allow mcast (error %d)\n",
8584 		    DEVNAME(sc), err);
8585 		return err;
8586 	}
8587 
8588 	err = iwm_power_update_device(sc);
8589 	if (err) {
8590 		printf("%s: could not send power command (error %d)\n",
8591 		    DEVNAME(sc), err);
8592 		return err;
8593 	}
8594 #ifdef notyet
8595 	/*
8596 	 * Disabled for now. Default beacon filter settings
8597 	 * prevent net80211 from getting ERP and HT protection
8598 	 * updates from beacons.
8599 	 */
8600 	err = iwm_enable_beacon_filter(sc, in);
8601 	if (err) {
8602 		printf("%s: could not enable beacon filter\n",
8603 		    DEVNAME(sc));
8604 		return err;
8605 	}
8606 #endif
8607 	err = iwm_power_mac_update_mode(sc, in);
8608 	if (err) {
8609 		printf("%s: could not update MAC power (error %d)\n",
8610 		    DEVNAME(sc), err);
8611 		return err;
8612 	}
8613 
8614 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
8615 		err = iwm_update_quotas(sc, in, 1);
8616 		if (err) {
8617 			printf("%s: could not update quotas (error %d)\n",
8618 			    DEVNAME(sc), err);
8619 			return err;
8620 		}
8621 	}
8622 
8623 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
8624 	ieee80211_ra_node_init(&in->in_rn);
8625 
8626 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8627 		iwm_led_blink_start(sc);
8628 		return 0;
8629 	}
8630 
8631 	/* Start at lowest available bit-rate, AMRR will raise. */
8632 	in->in_ni.ni_txrate = 0;
8633 	in->in_ni.ni_txmcs = 0;
8634 	iwm_setrates(in, 0);
8635 
8636 	timeout_add_msec(&sc->sc_calib_to, 500);
8637 	iwm_led_enable(sc);
8638 
8639 	return 0;
8640 }
8641 
8642 int
8643 iwm_run_stop(struct iwm_softc *sc)
8644 {
8645 	struct ieee80211com *ic = &sc->sc_ic;
8646 	struct iwm_node *in = (void *)ic->ic_bss;
8647 	struct ieee80211_node *ni = &in->in_ni;
8648 	int err, i, tid;
8649 
8650 	splassert(IPL_NET);
8651 
8652 	/*
8653 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
8654 	 * for this when moving out of RUN state since it runs in a
8655 	 * separate thread.
8656 	 * Note that in->in_ni (struct ieee80211_node) already represents
8657 	 * our new access point in case we are roaming between APs.
8658 	 * This means we cannot rely on struct ieee802111_node to tell
8659 	 * us which BA sessions exist.
8660 	 */
8661 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8662 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
8663 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
8664 			continue;
8665 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8666 		if (err)
8667 			return err;
8668 		iwm_clear_reorder_buffer(sc, rxba);
8669 		if (sc->sc_rx_ba_sessions > 0)
8670 			sc->sc_rx_ba_sessions--;
8671 	}
8672 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
8673 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
8674 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8675 			continue;
8676 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8677 		if (err)
8678 			return err;
8679 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8680 		if (err)
8681 			return err;
8682 		in->tfd_queue_msk &= ~(1 << qid);
8683 	}
8684 	ieee80211_ba_del(ni);
8685 
8686 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8687 		iwm_led_blink_stop(sc);
8688 
8689 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
8690 	if (err)
8691 		return err;
8692 
8693 	iwm_disable_beacon_filter(sc);
8694 
8695 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
8696 		err = iwm_update_quotas(sc, in, 0);
8697 		if (err) {
8698 			printf("%s: could not update quotas (error %d)\n",
8699 			    DEVNAME(sc), err);
8700 			return err;
8701 		}
8702 	}
8703 
8704 	/* Mark station as disassociated. */
8705 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
8706 	if (err) {
8707 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8708 		return err;
8709 	}
8710 
8711 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
8712 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
8713 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8714 		   in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
8715 		if (err) {
8716 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8717 			return err;
8718 		}
8719 	}
8720 
8721 	return 0;
8722 }
8723 
8724 struct ieee80211_node *
8725 iwm_node_alloc(struct ieee80211com *ic)
8726 {
8727 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8728 }
8729 
8730 int
8731 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
8732     struct ieee80211_key *k)
8733 {
8734 	struct iwm_softc *sc = ic->ic_softc;
8735 	struct iwm_add_sta_key_cmd_v1 cmd;
8736 
8737 	memset(&cmd, 0, sizeof(cmd));
8738 
8739 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
8740 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
8741 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8742 	    IWM_STA_KEY_FLG_KEYID_MSK));
8743 	if (k->k_flags & IEEE80211_KEY_GROUP)
8744 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
8745 
8746 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8747 	cmd.common.key_offset = 0;
8748 	cmd.common.sta_id = IWM_STATION_ID;
8749 
8750 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
8751 	    sizeof(cmd), &cmd);
8752 }
8753 
8754 int
8755 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8756     struct ieee80211_key *k)
8757 {
8758 	struct iwm_softc *sc = ic->ic_softc;
8759 	struct iwm_add_sta_key_cmd cmd;
8760 
8761 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
8762 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
8763 		/* Fallback to software crypto for other ciphers. */
8764 		return (ieee80211_set_key(ic, ni, k));
8765 	}
8766 
8767 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
8768 		return iwm_set_key_v1(ic, ni, k);
8769 
8770 	memset(&cmd, 0, sizeof(cmd));
8771 
8772 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
8773 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
8774 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8775 	    IWM_STA_KEY_FLG_KEYID_MSK));
8776 	if (k->k_flags & IEEE80211_KEY_GROUP)
8777 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
8778 
8779 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8780 	cmd.common.key_offset = 0;
8781 	cmd.common.sta_id = IWM_STATION_ID;
8782 
8783 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8784 
8785 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
8786 	    sizeof(cmd), &cmd);
8787 }
8788 
8789 void
8790 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
8791     struct ieee80211_key *k)
8792 {
8793 	struct iwm_softc *sc = ic->ic_softc;
8794 	struct iwm_add_sta_key_cmd_v1 cmd;
8795 
8796 	memset(&cmd, 0, sizeof(cmd));
8797 
8798 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
8799 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
8800 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8801 	    IWM_STA_KEY_FLG_KEYID_MSK));
8802 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8803 	cmd.common.key_offset = 0;
8804 	cmd.common.sta_id = IWM_STATION_ID;
8805 
8806 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
8807 }
8808 
8809 void
8810 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8811     struct ieee80211_key *k)
8812 {
8813 	struct iwm_softc *sc = ic->ic_softc;
8814 	struct iwm_add_sta_key_cmd cmd;
8815 
8816 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
8817 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
8818 		/* Fallback to software crypto for other ciphers. */
8819                 ieee80211_delete_key(ic, ni, k);
8820 		return;
8821 	}
8822 
8823 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
8824 		return iwm_delete_key_v1(ic, ni, k);
8825 
8826 	memset(&cmd, 0, sizeof(cmd));
8827 
8828 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
8829 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
8830 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
8831 	    IWM_STA_KEY_FLG_KEYID_MSK));
8832 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8833 	cmd.common.key_offset = 0;
8834 	cmd.common.sta_id = IWM_STATION_ID;
8835 
8836 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
8837 }
8838 
8839 void
8840 iwm_calib_timeout(void *arg)
8841 {
8842 	struct iwm_softc *sc = arg;
8843 	struct ieee80211com *ic = &sc->sc_ic;
8844 	struct iwm_node *in = (void *)ic->ic_bss;
8845 	struct ieee80211_node *ni = &in->in_ni;
8846 	int s;
8847 
8848 	s = splnet();
8849 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
8850 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
8851 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
8852 		int old_txrate = ni->ni_txrate;
8853 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
8854 		/*
8855 		 * If AMRR has chosen a new TX rate we must update
8856 		 * the firwmare's LQ rate table.
8857 		 * ni_txrate may change again before the task runs so
8858 		 * cache the chosen rate in the iwm_node structure.
8859 		 */
8860 		if (ni->ni_txrate != old_txrate)
8861 			iwm_setrates(in, 1);
8862 	}
8863 
8864 	splx(s);
8865 
8866 	timeout_add_msec(&sc->sc_calib_to, 500);
8867 }
8868 
8869 void
8870 iwm_setrates(struct iwm_node *in, int async)
8871 {
8872 	struct ieee80211_node *ni = &in->in_ni;
8873 	struct ieee80211com *ic = ni->ni_ic;
8874 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8875 	struct iwm_lq_cmd lqcmd;
8876 	struct ieee80211_rateset *rs = &ni->ni_rates;
8877 	int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
8878 	struct iwm_host_cmd cmd = {
8879 		.id = IWM_LQ_CMD,
8880 		.len = { sizeof(lqcmd), },
8881 	};
8882 
8883 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
8884 
8885 	memset(&lqcmd, 0, sizeof(lqcmd));
8886 	lqcmd.sta_id = IWM_STATION_ID;
8887 
8888 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8889 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
8890 
8891 	/*
8892 	 * Fill the LQ rate selection table with legacy and/or HT rates
8893 	 * in descending order, i.e. with the node's current TX rate first.
8894 	 * In cases where throughput of an HT rate corresponds to a legacy
8895 	 * rate it makes no sense to add both. We rely on the fact that
8896 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
8897 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
8898 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
8899 	 */
8900 	j = 0;
8901 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
8902 	mimo = iwm_is_mimo_mcs(ni->ni_txmcs);
8903 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
8904 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
8905 		uint8_t plcp = iwm_rates[ridx].plcp;
8906 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
8907 
8908 		if (j >= nitems(lqcmd.rs_table))
8909 			break;
8910 		tab = 0;
8911 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8912 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
8913 				continue;
8914 	 		/* Do not mix SISO and MIMO HT rates. */
8915 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
8916 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
8917 				continue;
8918 			for (i = ni->ni_txmcs; i >= 0; i--) {
8919 				if (isclr(ni->ni_rxmcs, i))
8920 					continue;
8921 				if (ridx != iwm_mcs2ridx[i])
8922 					continue;
8923 				tab = ht_plcp;
8924 				tab |= IWM_RATE_MCS_HT_MSK;
8925 				/* First two Tx attempts may use 40MHz/SGI. */
8926 				if (j > 1)
8927 					break;
8928 				if (in->in_phyctxt->sco ==
8929 				    IEEE80211_HTOP0_SCO_SCA ||
8930 				    in->in_phyctxt->sco ==
8931 				    IEEE80211_HTOP0_SCO_SCB) {
8932 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
8933 					tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
8934 				}
8935 				if (ieee80211_ra_use_ht_sgi(ni))
8936 					tab |= IWM_RATE_MCS_SGI_MSK;
8937 				break;
8938 			}
8939 		} else if (plcp != IWM_RATE_INVM_PLCP) {
8940 			for (i = ni->ni_txrate; i >= 0; i--) {
8941 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
8942 				    IEEE80211_RATE_VAL)) {
8943 					tab = plcp;
8944 					break;
8945 				}
8946 			}
8947 		}
8948 
8949 		if (tab == 0)
8950 			continue;
8951 
8952 		if (iwm_is_mimo_ht_plcp(ht_plcp))
8953 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
8954 		else if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
8955 			tab |= IWM_RATE_MCS_ANT_B_MSK;
8956 		else
8957 			tab |= IWM_RATE_MCS_ANT_A_MSK;
8958 
8959 		if (IWM_RIDX_IS_CCK(ridx))
8960 			tab |= IWM_RATE_MCS_CCK_MSK;
8961 		lqcmd.rs_table[j++] = htole32(tab);
8962 	}
8963 
8964 	lqcmd.mimo_delim = (mimo ? j : 0);
8965 
8966 	/* Fill the rest with the lowest possible rate */
8967 	while (j < nitems(lqcmd.rs_table)) {
8968 		tab = iwm_rates[ridx_min].plcp;
8969 		if (IWM_RIDX_IS_CCK(ridx_min))
8970 			tab |= IWM_RATE_MCS_CCK_MSK;
8971 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
8972 			tab |= IWM_RATE_MCS_ANT_B_MSK;
8973 		else
8974 			tab |= IWM_RATE_MCS_ANT_A_MSK;
8975 		lqcmd.rs_table[j++] = htole32(tab);
8976 	}
8977 
8978 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
8979 		lqcmd.single_stream_ant_msk = IWM_ANT_B;
8980 	else
8981 		lqcmd.single_stream_ant_msk = IWM_ANT_A;
8982 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
8983 
8984 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
8985 	lqcmd.agg_disable_start_th = 3;
8986 	lqcmd.agg_frame_cnt_limit = 0x3f;
8987 
8988 	cmd.data[0] = &lqcmd;
8989 	iwm_send_cmd(sc, &cmd);
8990 }
8991 
8992 int
8993 iwm_media_change(struct ifnet *ifp)
8994 {
8995 	struct iwm_softc *sc = ifp->if_softc;
8996 	struct ieee80211com *ic = &sc->sc_ic;
8997 	uint8_t rate, ridx;
8998 	int err;
8999 
9000 	err = ieee80211_media_change(ifp);
9001 	if (err != ENETRESET)
9002 		return err;
9003 
9004 	if (ic->ic_fixed_mcs != -1)
9005 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
9006 	else if (ic->ic_fixed_rate != -1) {
9007 		rate = ic->ic_sup_rates[ic->ic_curmode].
9008 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
9009 		/* Map 802.11 rate to HW rate index. */
9010 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
9011 			if (iwm_rates[ridx].rate == rate)
9012 				break;
9013 		sc->sc_fixed_ridx = ridx;
9014 	}
9015 
9016 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9017 	    (IFF_UP | IFF_RUNNING)) {
9018 		iwm_stop(ifp);
9019 		err = iwm_init(ifp);
9020 	}
9021 	return err;
9022 }
9023 
9024 void
9025 iwm_newstate_task(void *psc)
9026 {
9027 	struct iwm_softc *sc = (struct iwm_softc *)psc;
9028 	struct ieee80211com *ic = &sc->sc_ic;
9029 	enum ieee80211_state nstate = sc->ns_nstate;
9030 	enum ieee80211_state ostate = ic->ic_state;
9031 	int arg = sc->ns_arg;
9032 	int err = 0, s = splnet();
9033 
9034 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9035 		/* iwm_stop() is waiting for us. */
9036 		refcnt_rele_wake(&sc->task_refs);
9037 		splx(s);
9038 		return;
9039 	}
9040 
9041 	if (ostate == IEEE80211_S_SCAN) {
9042 		if (nstate == ostate) {
9043 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
9044 				refcnt_rele_wake(&sc->task_refs);
9045 				splx(s);
9046 				return;
9047 			}
9048 			/* Firmware is no longer scanning. Do another scan. */
9049 			goto next_scan;
9050 		} else
9051 			iwm_led_blink_stop(sc);
9052 	}
9053 
9054 	if (nstate <= ostate) {
9055 		switch (ostate) {
9056 		case IEEE80211_S_RUN:
9057 			err = iwm_run_stop(sc);
9058 			if (err)
9059 				goto out;
9060 			/* FALLTHROUGH */
9061 		case IEEE80211_S_ASSOC:
9062 		case IEEE80211_S_AUTH:
9063 			if (nstate <= IEEE80211_S_AUTH) {
9064 				err = iwm_deauth(sc);
9065 				if (err)
9066 					goto out;
9067 			}
9068 			/* FALLTHROUGH */
9069 		case IEEE80211_S_SCAN:
9070 		case IEEE80211_S_INIT:
9071 			break;
9072 		}
9073 
9074 		/* Die now if iwm_stop() was called while we were sleeping. */
9075 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9076 			refcnt_rele_wake(&sc->task_refs);
9077 			splx(s);
9078 			return;
9079 		}
9080 	}
9081 
9082 	switch (nstate) {
9083 	case IEEE80211_S_INIT:
9084 		break;
9085 
9086 	case IEEE80211_S_SCAN:
9087 next_scan:
9088 		err = iwm_scan(sc);
9089 		if (err)
9090 			break;
9091 		refcnt_rele_wake(&sc->task_refs);
9092 		splx(s);
9093 		return;
9094 
9095 	case IEEE80211_S_AUTH:
9096 		err = iwm_auth(sc);
9097 		break;
9098 
9099 	case IEEE80211_S_ASSOC:
9100 		break;
9101 
9102 	case IEEE80211_S_RUN:
9103 		err = iwm_run(sc);
9104 		break;
9105 	}
9106 
9107 out:
9108 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9109 		if (err)
9110 			task_add(systq, &sc->init_task);
9111 		else
9112 			sc->sc_newstate(ic, nstate, arg);
9113 	}
9114 	refcnt_rele_wake(&sc->task_refs);
9115 	splx(s);
9116 }
9117 
9118 int
9119 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9120 {
9121 	struct ifnet *ifp = IC2IFP(ic);
9122 	struct iwm_softc *sc = ifp->if_softc;
9123 
9124 	/*
9125 	 * Prevent attemps to transition towards the same state, unless
9126 	 * we are scanning in which case a SCAN -> SCAN transition
9127 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9128 	 * to support band-steering.
9129 	 */
9130 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9131 	    nstate != IEEE80211_S_AUTH)
9132 		return 0;
9133 
9134 	if (ic->ic_state == IEEE80211_S_RUN) {
9135 		timeout_del(&sc->sc_calib_to);
9136 		iwm_del_task(sc, systq, &sc->ba_task);
9137 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9138 		iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9139 	}
9140 
9141 	sc->ns_nstate = nstate;
9142 	sc->ns_arg = arg;
9143 
9144 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9145 
9146 	return 0;
9147 }
9148 
9149 void
9150 iwm_endscan(struct iwm_softc *sc)
9151 {
9152 	struct ieee80211com *ic = &sc->sc_ic;
9153 
9154 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9155 		return;
9156 
9157 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9158 	ieee80211_end_scan(&ic->ic_if);
9159 }
9160 
9161 /*
9162  * Aging and idle timeouts for the different possible scenarios
9163  * in default configuration
9164  */
9165 static const uint32_t
9166 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9167 	{
9168 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9169 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9170 	},
9171 	{
9172 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
9173 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9174 	},
9175 	{
9176 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
9177 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
9178 	},
9179 	{
9180 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
9181 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
9182 	},
9183 	{
9184 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
9185 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
9186 	},
9187 };
9188 
9189 /*
9190  * Aging and idle timeouts for the different possible scenarios
9191  * in single BSS MAC configuration.
9192  */
9193 static const uint32_t
9194 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9195 	{
9196 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
9197 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
9198 	},
9199 	{
9200 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
9201 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
9202 	},
9203 	{
9204 		htole32(IWM_SF_MCAST_AGING_TIMER),
9205 		htole32(IWM_SF_MCAST_IDLE_TIMER)
9206 	},
9207 	{
9208 		htole32(IWM_SF_BA_AGING_TIMER),
9209 		htole32(IWM_SF_BA_IDLE_TIMER)
9210 	},
9211 	{
9212 		htole32(IWM_SF_TX_RE_AGING_TIMER),
9213 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
9214 	},
9215 };
9216 
9217 void
9218 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9219     struct ieee80211_node *ni)
9220 {
9221 	int i, j, watermark;
9222 
9223 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
9224 
9225 	/*
9226 	 * If we are in association flow - check antenna configuration
9227 	 * capabilities of the AP station, and choose the watermark accordingly.
9228 	 */
9229 	if (ni) {
9230 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9231 			if (ni->ni_rxmcs[1] != 0)
9232 				watermark = IWM_SF_W_MARK_MIMO2;
9233 			else
9234 				watermark = IWM_SF_W_MARK_SISO;
9235 		} else {
9236 			watermark = IWM_SF_W_MARK_LEGACY;
9237 		}
9238 	/* default watermark value for unassociated mode. */
9239 	} else {
9240 		watermark = IWM_SF_W_MARK_MIMO2;
9241 	}
9242 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
9243 
9244 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
9245 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
9246 			sf_cmd->long_delay_timeouts[i][j] =
9247 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
9248 		}
9249 	}
9250 
9251 	if (ni) {
9252 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
9253 		       sizeof(iwm_sf_full_timeout));
9254 	} else {
9255 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
9256 		       sizeof(iwm_sf_full_timeout_def));
9257 	}
9258 
9259 }
9260 
9261 int
9262 iwm_sf_config(struct iwm_softc *sc, int new_state)
9263 {
9264 	struct ieee80211com *ic = &sc->sc_ic;
9265 	struct iwm_sf_cfg_cmd sf_cmd = {
9266 		.state = htole32(new_state),
9267 	};
9268 	int err = 0;
9269 
9270 #if 0	/* only used for models with sdio interface, in iwlwifi */
9271 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9272 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
9273 #endif
9274 
9275 	switch (new_state) {
9276 	case IWM_SF_UNINIT:
9277 	case IWM_SF_INIT_OFF:
9278 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
9279 		break;
9280 	case IWM_SF_FULL_ON:
9281 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9282 		break;
9283 	default:
9284 		return EINVAL;
9285 	}
9286 
9287 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9288 				   sizeof(sf_cmd), &sf_cmd);
9289 	return err;
9290 }
9291 
9292 int
9293 iwm_send_bt_init_conf(struct iwm_softc *sc)
9294 {
9295 	struct iwm_bt_coex_cmd bt_cmd;
9296 
9297 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
9298 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
9299 
9300 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9301 	    &bt_cmd);
9302 }
9303 
9304 int
9305 iwm_send_soc_conf(struct iwm_softc *sc)
9306 {
9307 	struct iwm_soc_configuration_cmd cmd;
9308 	int err;
9309 	uint32_t cmd_id, flags = 0;
9310 
9311 	memset(&cmd, 0, sizeof(cmd));
9312 
9313 	/*
9314 	 * In VER_1 of this command, the discrete value is considered
9315 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9316 	 * values in VER_1, this is backwards-compatible with VER_2,
9317 	 * as long as we don't set any other flag bits.
9318 	 */
9319 	if (!sc->sc_integrated) { /* VER_1 */
9320 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9321 	} else { /* VER_2 */
9322 		uint8_t scan_cmd_ver;
9323 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9324 			flags |= (sc->sc_ltr_delay &
9325 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9326 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9327 		    IWM_SCAN_REQ_UMAC);
9328 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
9329 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9330 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9331 	}
9332 	cmd.flags = htole32(flags);
9333 
9334 	cmd.latency = htole32(sc->sc_xtal_latency);
9335 
9336 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
9337 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9338 	if (err)
9339 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9340 	return err;
9341 }
9342 
9343 int
9344 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9345 {
9346 	struct iwm_mcc_update_cmd mcc_cmd;
9347 	struct iwm_host_cmd hcmd = {
9348 		.id = IWM_MCC_UPDATE_CMD,
9349 		.flags = IWM_CMD_WANT_RESP,
9350 		.resp_pkt_len = IWM_CMD_RESP_MAX,
9351 		.data = { &mcc_cmd },
9352 	};
9353 	struct iwm_rx_packet *pkt;
9354 	size_t resp_len;
9355 	int err;
9356 	int resp_v3 = isset(sc->sc_enabled_capa,
9357 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
9358 
9359 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9360 	    !sc->sc_nvm.lar_enabled) {
9361 		return 0;
9362 	}
9363 
9364 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9365 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9366 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9367 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9368 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
9369 	else
9370 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
9371 
9372 	if (resp_v3) { /* same size as resp_v2 */
9373 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9374 	} else {
9375 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9376 	}
9377 
9378 	err = iwm_send_cmd(sc, &hcmd);
9379 	if (err)
9380 		return err;
9381 
9382 	pkt = hcmd.resp_pkt;
9383 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
9384 		err = EIO;
9385 		goto out;
9386 	}
9387 
9388 	if (resp_v3) {
9389 		struct iwm_mcc_update_resp_v3 *resp;
9390 		resp_len = iwm_rx_packet_payload_len(pkt);
9391 		if (resp_len < sizeof(*resp)) {
9392 			err = EIO;
9393 			goto out;
9394 		}
9395 
9396 		resp = (void *)pkt->data;
9397 		if (resp_len != sizeof(*resp) +
9398 		    resp->n_channels * sizeof(resp->channels[0])) {
9399 			err = EIO;
9400 			goto out;
9401 		}
9402 	} else {
9403 		struct iwm_mcc_update_resp_v1 *resp_v1;
9404 		resp_len = iwm_rx_packet_payload_len(pkt);
9405 		if (resp_len < sizeof(*resp_v1)) {
9406 			err = EIO;
9407 			goto out;
9408 		}
9409 
9410 		resp_v1 = (void *)pkt->data;
9411 		if (resp_len != sizeof(*resp_v1) +
9412 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9413 			err = EIO;
9414 			goto out;
9415 		}
9416 	}
9417 out:
9418 	iwm_free_resp(sc, &hcmd);
9419 	return err;
9420 }
9421 
9422 int
9423 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9424 {
9425 	struct iwm_temp_report_ths_cmd cmd;
9426 	int err;
9427 
9428 	/*
9429 	 * In order to give responsibility for critical-temperature-kill
9430 	 * and TX backoff to FW we need to send an empty temperature
9431 	 * reporting command at init time.
9432 	 */
9433 	memset(&cmd, 0, sizeof(cmd));
9434 
9435 	err = iwm_send_cmd_pdu(sc,
9436 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
9437 	    0, sizeof(cmd), &cmd);
9438 	if (err)
9439 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9440 		    DEVNAME(sc), err);
9441 
9442 	return err;
9443 }
9444 
9445 void
9446 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9447 {
9448 	struct iwm_host_cmd cmd = {
9449 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
9450 		.len = { sizeof(uint32_t), },
9451 		.data = { &backoff, },
9452 	};
9453 
9454 	iwm_send_cmd(sc, &cmd);
9455 }
9456 
9457 void
9458 iwm_free_fw_paging(struct iwm_softc *sc)
9459 {
9460 	int i;
9461 
9462 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9463 		return;
9464 
9465 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
9466 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9467 	}
9468 
9469 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9470 }
9471 
9472 int
9473 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9474 {
9475 	int sec_idx, idx;
9476 	uint32_t offset = 0;
9477 
9478 	/*
9479 	 * find where is the paging image start point:
9480 	 * if CPU2 exist and it's in paging format, then the image looks like:
9481 	 * CPU1 sections (2 or more)
9482 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9483 	 * CPU2 sections (not paged)
9484 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9485 	 * non paged to CPU2 paging sec
9486 	 * CPU2 paging CSS
9487 	 * CPU2 paging image (including instruction and data)
9488 	 */
9489 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
9490 		if (image->fw_sect[sec_idx].fws_devoff ==
9491 		    IWM_PAGING_SEPARATOR_SECTION) {
9492 			sec_idx++;
9493 			break;
9494 		}
9495 	}
9496 
9497 	/*
9498 	 * If paging is enabled there should be at least 2 more sections left
9499 	 * (one for CSS and one for Paging data)
9500 	 */
9501 	if (sec_idx >= nitems(image->fw_sect) - 1) {
9502 		printf("%s: Paging: Missing CSS and/or paging sections\n",
9503 		    DEVNAME(sc));
9504 		iwm_free_fw_paging(sc);
9505 		return EINVAL;
9506 	}
9507 
9508 	/* copy the CSS block to the dram */
9509 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
9510 	    DEVNAME(sc), sec_idx));
9511 
9512 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
9513 	    image->fw_sect[sec_idx].fws_data,
9514 	    sc->fw_paging_db[0].fw_paging_size);
9515 
9516 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
9517 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
9518 
9519 	sec_idx++;
9520 
9521 	/*
9522 	 * copy the paging blocks to the dram
9523 	 * loop index start from 1 since that CSS block already copied to dram
9524 	 * and CSS index is 0.
9525 	 * loop stop at num_of_paging_blk since that last block is not full.
9526 	 */
9527 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
9528 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9529 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9530 		    sc->fw_paging_db[idx].fw_paging_size);
9531 
9532 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
9533 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
9534 
9535 		offset += sc->fw_paging_db[idx].fw_paging_size;
9536 	}
9537 
9538 	/* copy the last paging block */
9539 	if (sc->num_of_pages_in_last_blk > 0) {
9540 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9541 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9542 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
9543 
9544 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
9545 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
9546 	}
9547 
9548 	return 0;
9549 }
9550 
9551 int
9552 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9553 {
9554 	int blk_idx = 0;
9555 	int error, num_of_pages;
9556 
9557 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
9558 		int i;
9559 		/* Device got reset, and we setup firmware paging again */
9560 		bus_dmamap_sync(sc->sc_dmat,
9561 		    sc->fw_paging_db[0].fw_paging_block.map,
9562 		    0, IWM_FW_PAGING_SIZE,
9563 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
9564 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
9565 			bus_dmamap_sync(sc->sc_dmat,
9566 			    sc->fw_paging_db[i].fw_paging_block.map,
9567 			    0, IWM_PAGING_BLOCK_SIZE,
9568 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
9569 		}
9570 		return 0;
9571 	}
9572 
9573 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
9574 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
9575 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
9576 #endif
9577 
9578 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
9579 	sc->num_of_paging_blk =
9580 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
9581 
9582 	sc->num_of_pages_in_last_blk =
9583 		num_of_pages -
9584 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
9585 
9586 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
9587 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
9588 	    sc->num_of_paging_blk,
9589 	    sc->num_of_pages_in_last_blk));
9590 
9591 	/* allocate block of 4Kbytes for paging CSS */
9592 	error = iwm_dma_contig_alloc(sc->sc_dmat,
9593 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
9594 	    4096);
9595 	if (error) {
9596 		/* free all the previous pages since we failed */
9597 		iwm_free_fw_paging(sc);
9598 		return ENOMEM;
9599 	}
9600 
9601 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
9602 
9603 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
9604 	    DEVNAME(sc)));
9605 
9606 	/*
9607 	 * allocate blocks in dram.
9608 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
9609 	 */
9610 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
9611 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
9612 		/* XXX Use iwm_dma_contig_alloc for allocating */
9613 		error = iwm_dma_contig_alloc(sc->sc_dmat,
9614 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
9615 		    IWM_PAGING_BLOCK_SIZE, 4096);
9616 		if (error) {
9617 			/* free all the previous pages since we failed */
9618 			iwm_free_fw_paging(sc);
9619 			return ENOMEM;
9620 		}
9621 
9622 		sc->fw_paging_db[blk_idx].fw_paging_size =
9623 		    IWM_PAGING_BLOCK_SIZE;
9624 
9625 		DPRINTF((
9626 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
9627 		    DEVNAME(sc)));
9628 	}
9629 
9630 	return 0;
9631 }
9632 
9633 int
9634 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
9635 {
9636 	int ret;
9637 
9638 	ret = iwm_alloc_fw_paging_mem(sc, fw);
9639 	if (ret)
9640 		return ret;
9641 
9642 	return iwm_fill_paging_mem(sc, fw);
9643 }
9644 
9645 /* send paging cmd to FW in case CPU2 has paging image */
9646 int
9647 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
9648 {
9649 	int blk_idx;
9650 	uint32_t dev_phy_addr;
9651 	struct iwm_fw_paging_cmd fw_paging_cmd = {
9652 		.flags =
9653 			htole32(IWM_PAGING_CMD_IS_SECURED |
9654 				IWM_PAGING_CMD_IS_ENABLED |
9655 				(sc->num_of_pages_in_last_blk <<
9656 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
9657 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
9658 		.block_num = htole32(sc->num_of_paging_blk),
9659 	};
9660 
9661 	/* loop for for all paging blocks + CSS block */
9662 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
9663 		dev_phy_addr = htole32(
9664 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
9665 		    IWM_PAGE_2_EXP_SIZE);
9666 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
9667 		bus_dmamap_sync(sc->sc_dmat,
9668 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
9669 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
9670 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
9671 	}
9672 
9673 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
9674 					       IWM_LONG_GROUP, 0),
9675 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
9676 }
9677 
9678 int
9679 iwm_init_hw(struct iwm_softc *sc)
9680 {
9681 	struct ieee80211com *ic = &sc->sc_ic;
9682 	int err, i, ac, qid;
9683 
9684 	err = iwm_preinit(sc);
9685 	if (err)
9686 		return err;
9687 
9688 	err = iwm_start_hw(sc);
9689 	if (err) {
9690 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9691 		return err;
9692 	}
9693 
9694 	err = iwm_run_init_mvm_ucode(sc, 0);
9695 	if (err)
9696 		return err;
9697 
9698 	/* Should stop and start HW since INIT image just loaded. */
9699 	iwm_stop_device(sc);
9700 	err = iwm_start_hw(sc);
9701 	if (err) {
9702 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9703 		return err;
9704 	}
9705 
9706 	/* Restart, this time with the regular firmware */
9707 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
9708 	if (err) {
9709 		printf("%s: could not load firmware\n", DEVNAME(sc));
9710 		goto err;
9711 	}
9712 
9713 	if (!iwm_nic_lock(sc))
9714 		return EBUSY;
9715 
9716 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
9717 	if (err) {
9718 		printf("%s: could not init tx ant config (error %d)\n",
9719 		    DEVNAME(sc), err);
9720 		goto err;
9721 	}
9722 
9723 	err = iwm_send_phy_db_data(sc);
9724 	if (err) {
9725 		printf("%s: could not init phy db (error %d)\n",
9726 		    DEVNAME(sc), err);
9727 		goto err;
9728 	}
9729 
9730 	err = iwm_send_phy_cfg_cmd(sc);
9731 	if (err) {
9732 		printf("%s: could not send phy config (error %d)\n",
9733 		    DEVNAME(sc), err);
9734 		goto err;
9735 	}
9736 
9737 	err = iwm_send_bt_init_conf(sc);
9738 	if (err) {
9739 		printf("%s: could not init bt coex (error %d)\n",
9740 		    DEVNAME(sc), err);
9741 		return err;
9742 	}
9743 
9744 	if (isset(sc->sc_enabled_capa,
9745 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
9746 		err = iwm_send_soc_conf(sc);
9747 		if (err)
9748 			return err;
9749 	}
9750 
9751 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
9752 		err = iwm_send_dqa_cmd(sc);
9753 		if (err)
9754 			return err;
9755 	}
9756 
9757 	/* Add auxiliary station for scanning */
9758 	err = iwm_add_aux_sta(sc);
9759 	if (err) {
9760 		printf("%s: could not add aux station (error %d)\n",
9761 		    DEVNAME(sc), err);
9762 		goto err;
9763 	}
9764 
9765 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
9766 		/*
9767 		 * The channel used here isn't relevant as it's
9768 		 * going to be overwritten in the other flows.
9769 		 * For now use the first channel we have.
9770 		 */
9771 		sc->sc_phyctxt[i].id = i;
9772 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9773 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9774 		    IWM_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN);
9775 		if (err) {
9776 			printf("%s: could not add phy context %d (error %d)\n",
9777 			    DEVNAME(sc), i, err);
9778 			goto err;
9779 		}
9780 	}
9781 
9782 	/* Initialize tx backoffs to the minimum. */
9783 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
9784 		iwm_tt_tx_backoff(sc, 0);
9785 
9786 
9787 	err = iwm_config_ltr(sc);
9788 	if (err) {
9789 		printf("%s: PCIe LTR configuration failed (error %d)\n",
9790 		    DEVNAME(sc), err);
9791 	}
9792 
9793 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
9794 		err = iwm_send_temp_report_ths_cmd(sc);
9795 		if (err)
9796 			goto err;
9797 	}
9798 
9799 	err = iwm_power_update_device(sc);
9800 	if (err) {
9801 		printf("%s: could not send power command (error %d)\n",
9802 		    DEVNAME(sc), err);
9803 		goto err;
9804 	}
9805 
9806 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
9807 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
9808 		if (err) {
9809 			printf("%s: could not init LAR (error %d)\n",
9810 			    DEVNAME(sc), err);
9811 			goto err;
9812 		}
9813 	}
9814 
9815 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
9816 		err = iwm_config_umac_scan(sc);
9817 		if (err) {
9818 			printf("%s: could not configure scan (error %d)\n",
9819 			    DEVNAME(sc), err);
9820 			goto err;
9821 		}
9822 	}
9823 
9824 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9825 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
9826 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
9827 		else
9828 			qid = IWM_AUX_QUEUE;
9829 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
9830 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
9831 		if (err) {
9832 			printf("%s: could not enable monitor inject Tx queue "
9833 			    "(error %d)\n", DEVNAME(sc), err);
9834 			goto err;
9835 		}
9836 	} else {
9837 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
9838 			if (isset(sc->sc_enabled_capa,
9839 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
9840 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
9841 			else
9842 				qid = ac;
9843 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
9844 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
9845 			if (err) {
9846 				printf("%s: could not enable Tx queue %d "
9847 				    "(error %d)\n", DEVNAME(sc), ac, err);
9848 				goto err;
9849 			}
9850 		}
9851 	}
9852 
9853 	err = iwm_disable_beacon_filter(sc);
9854 	if (err) {
9855 		printf("%s: could not disable beacon filter (error %d)\n",
9856 		    DEVNAME(sc), err);
9857 		goto err;
9858 	}
9859 
9860 err:
9861 	iwm_nic_unlock(sc);
9862 	return err;
9863 }
9864 
9865 /* Allow multicast from our BSSID. */
9866 int
9867 iwm_allow_mcast(struct iwm_softc *sc)
9868 {
9869 	struct ieee80211com *ic = &sc->sc_ic;
9870 	struct iwm_node *in = (void *)ic->ic_bss;
9871 	struct iwm_mcast_filter_cmd *cmd;
9872 	size_t size;
9873 	int err;
9874 
9875 	size = roundup(sizeof(*cmd), 4);
9876 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
9877 	if (cmd == NULL)
9878 		return ENOMEM;
9879 	cmd->filter_own = 1;
9880 	cmd->port_id = 0;
9881 	cmd->count = 0;
9882 	cmd->pass_all = 1;
9883 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
9884 
9885 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
9886 	    0, size, cmd);
9887 	free(cmd, M_DEVBUF, size);
9888 	return err;
9889 }
9890 
9891 int
9892 iwm_init(struct ifnet *ifp)
9893 {
9894 	struct iwm_softc *sc = ifp->if_softc;
9895 	struct ieee80211com *ic = &sc->sc_ic;
9896 	int err, generation;
9897 
9898 	rw_assert_wrlock(&sc->ioctl_rwl);
9899 
9900 	generation = ++sc->sc_generation;
9901 
9902 	KASSERT(sc->task_refs.refs == 0);
9903 	refcnt_init(&sc->task_refs);
9904 
9905 	err = iwm_init_hw(sc);
9906 	if (err) {
9907 		if (generation == sc->sc_generation)
9908 			iwm_stop(ifp);
9909 		return err;
9910 	}
9911 
9912 	if (sc->sc_nvm.sku_cap_11n_enable)
9913 		iwm_setup_ht_rates(sc);
9914 
9915 	ifq_clr_oactive(&ifp->if_snd);
9916 	ifp->if_flags |= IFF_RUNNING;
9917 
9918 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9919 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
9920 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9921 		return 0;
9922 	}
9923 
9924 	ieee80211_begin_scan(ifp);
9925 
9926 	/*
9927 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
9928 	 * Wait until the transition to SCAN state has completed.
9929 	 */
9930 	do {
9931 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
9932 		    SEC_TO_NSEC(1));
9933 		if (generation != sc->sc_generation)
9934 			return ENXIO;
9935 		if (err) {
9936 			iwm_stop(ifp);
9937 			return err;
9938 		}
9939 	} while (ic->ic_state != IEEE80211_S_SCAN);
9940 
9941 	return 0;
9942 }
9943 
9944 void
9945 iwm_start(struct ifnet *ifp)
9946 {
9947 	struct iwm_softc *sc = ifp->if_softc;
9948 	struct ieee80211com *ic = &sc->sc_ic;
9949 	struct ieee80211_node *ni;
9950 	struct ether_header *eh;
9951 	struct mbuf *m;
9952 	int ac = EDCA_AC_BE; /* XXX */
9953 
9954 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
9955 		return;
9956 
9957 	for (;;) {
9958 		/* why isn't this done per-queue? */
9959 		if (sc->qfullmsk != 0) {
9960 			ifq_set_oactive(&ifp->if_snd);
9961 			break;
9962 		}
9963 
9964 		/* Don't queue additional frames while flushing Tx queues. */
9965 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
9966 			break;
9967 
9968 		/* need to send management frames even if we're not RUNning */
9969 		m = mq_dequeue(&ic->ic_mgtq);
9970 		if (m) {
9971 			ni = m->m_pkthdr.ph_cookie;
9972 			goto sendit;
9973 		}
9974 
9975 		if (ic->ic_state != IEEE80211_S_RUN ||
9976 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
9977 			break;
9978 
9979 		m = ifq_dequeue(&ifp->if_snd);
9980 		if (!m)
9981 			break;
9982 		if (m->m_len < sizeof (*eh) &&
9983 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
9984 			ifp->if_oerrors++;
9985 			continue;
9986 		}
9987 #if NBPFILTER > 0
9988 		if (ifp->if_bpf != NULL)
9989 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
9990 #endif
9991 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
9992 			ifp->if_oerrors++;
9993 			continue;
9994 		}
9995 
9996  sendit:
9997 #if NBPFILTER > 0
9998 		if (ic->ic_rawbpf != NULL)
9999 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
10000 #endif
10001 		if (iwm_tx(sc, m, ni, ac) != 0) {
10002 			ieee80211_release_node(ic, ni);
10003 			ifp->if_oerrors++;
10004 			continue;
10005 		}
10006 
10007 		if (ifp->if_flags & IFF_UP) {
10008 			sc->sc_tx_timer = 15;
10009 			ifp->if_timer = 1;
10010 		}
10011 	}
10012 
10013 	return;
10014 }
10015 
10016 void
10017 iwm_stop(struct ifnet *ifp)
10018 {
10019 	struct iwm_softc *sc = ifp->if_softc;
10020 	struct ieee80211com *ic = &sc->sc_ic;
10021 	struct iwm_node *in = (void *)ic->ic_bss;
10022 	int i, s = splnet();
10023 
10024 	rw_assert_wrlock(&sc->ioctl_rwl);
10025 
10026 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10027 
10028 	/* Cancel scheduled tasks and let any stale tasks finish up. */
10029 	task_del(systq, &sc->init_task);
10030 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10031 	iwm_del_task(sc, systq, &sc->ba_task);
10032 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10033 	iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10034 	KASSERT(sc->task_refs.refs >= 1);
10035 	refcnt_finalize(&sc->task_refs, "iwmstop");
10036 
10037 	iwm_stop_device(sc);
10038 
10039 	/* Reset soft state. */
10040 
10041 	sc->sc_generation++;
10042 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10043 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10044 		sc->sc_cmd_resp_pkt[i] = NULL;
10045 		sc->sc_cmd_resp_len[i] = 0;
10046 	}
10047 	ifp->if_flags &= ~IFF_RUNNING;
10048 	ifq_clr_oactive(&ifp->if_snd);
10049 
10050 	in->in_phyctxt = NULL;
10051 	in->tid_disable_ampdu = 0xffff;
10052 	in->tfd_queue_msk = 0;
10053 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
10054 
10055 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10056 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10057 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10058 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10059 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10060 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10061 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10062 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10063 
10064 	sc->sc_rx_ba_sessions = 0;
10065 	sc->ba_rx.start_tidmask = 0;
10066 	sc->ba_rx.stop_tidmask = 0;
10067 	sc->tx_ba_queue_mask = 0;
10068 	sc->ba_tx.start_tidmask = 0;
10069 	sc->ba_tx.stop_tidmask = 0;
10070 
10071 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10072 
10073 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10074 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10075 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10076 		iwm_clear_reorder_buffer(sc, rxba);
10077 	}
10078 	iwm_led_blink_stop(sc);
10079 	ifp->if_timer = sc->sc_tx_timer = 0;
10080 
10081 	splx(s);
10082 }
10083 
10084 void
10085 iwm_watchdog(struct ifnet *ifp)
10086 {
10087 	struct iwm_softc *sc = ifp->if_softc;
10088 
10089 	ifp->if_timer = 0;
10090 	if (sc->sc_tx_timer > 0) {
10091 		if (--sc->sc_tx_timer == 0) {
10092 			printf("%s: device timeout\n", DEVNAME(sc));
10093 			if (ifp->if_flags & IFF_DEBUG) {
10094 				iwm_nic_error(sc);
10095 				iwm_dump_driver_status(sc);
10096 			}
10097 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10098 				task_add(systq, &sc->init_task);
10099 			ifp->if_oerrors++;
10100 			return;
10101 		}
10102 		ifp->if_timer = 1;
10103 	}
10104 
10105 	ieee80211_watchdog(ifp);
10106 }
10107 
10108 int
10109 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10110 {
10111 	struct iwm_softc *sc = ifp->if_softc;
10112 	int s, err = 0, generation = sc->sc_generation;
10113 
10114 	/*
10115 	 * Prevent processes from entering this function while another
10116 	 * process is tsleep'ing in it.
10117 	 */
10118 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10119 	if (err == 0 && generation != sc->sc_generation) {
10120 		rw_exit(&sc->ioctl_rwl);
10121 		return ENXIO;
10122 	}
10123 	if (err)
10124 		return err;
10125 	s = splnet();
10126 
10127 	switch (cmd) {
10128 	case SIOCSIFADDR:
10129 		ifp->if_flags |= IFF_UP;
10130 		/* FALLTHROUGH */
10131 	case SIOCSIFFLAGS:
10132 		if (ifp->if_flags & IFF_UP) {
10133 			if (!(ifp->if_flags & IFF_RUNNING)) {
10134 				/* Force reload of firmware image from disk. */
10135 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10136 				err = iwm_init(ifp);
10137 			}
10138 		} else {
10139 			if (ifp->if_flags & IFF_RUNNING)
10140 				iwm_stop(ifp);
10141 		}
10142 		break;
10143 
10144 	default:
10145 		err = ieee80211_ioctl(ifp, cmd, data);
10146 	}
10147 
10148 	if (err == ENETRESET) {
10149 		err = 0;
10150 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
10151 		    (IFF_UP | IFF_RUNNING)) {
10152 			iwm_stop(ifp);
10153 			err = iwm_init(ifp);
10154 		}
10155 	}
10156 
10157 	splx(s);
10158 	rw_exit(&sc->ioctl_rwl);
10159 
10160 	return err;
10161 }
10162 
10163 /*
10164  * Note: This structure is read from the device with IO accesses,
10165  * and the reading already does the endian conversion. As it is
10166  * read with uint32_t-sized accesses, any members with a different size
10167  * need to be ordered correctly though!
10168  */
10169 struct iwm_error_event_table {
10170 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10171 	uint32_t error_id;		/* type of error */
10172 	uint32_t trm_hw_status0;	/* TRM HW status */
10173 	uint32_t trm_hw_status1;	/* TRM HW status */
10174 	uint32_t blink2;		/* branch link */
10175 	uint32_t ilink1;		/* interrupt link */
10176 	uint32_t ilink2;		/* interrupt link */
10177 	uint32_t data1;		/* error-specific data */
10178 	uint32_t data2;		/* error-specific data */
10179 	uint32_t data3;		/* error-specific data */
10180 	uint32_t bcon_time;		/* beacon timer */
10181 	uint32_t tsf_low;		/* network timestamp function timer */
10182 	uint32_t tsf_hi;		/* network timestamp function timer */
10183 	uint32_t gp1;		/* GP1 timer register */
10184 	uint32_t gp2;		/* GP2 timer register */
10185 	uint32_t fw_rev_type;	/* firmware revision type */
10186 	uint32_t major;		/* uCode version major */
10187 	uint32_t minor;		/* uCode version minor */
10188 	uint32_t hw_ver;		/* HW Silicon version */
10189 	uint32_t brd_ver;		/* HW board version */
10190 	uint32_t log_pc;		/* log program counter */
10191 	uint32_t frame_ptr;		/* frame pointer */
10192 	uint32_t stack_ptr;		/* stack pointer */
10193 	uint32_t hcmd;		/* last host command header */
10194 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
10195 				 * rxtx_flag */
10196 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
10197 				 * host_flag */
10198 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
10199 				 * enc_flag */
10200 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
10201 				 * time_flag */
10202 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
10203 				 * wico interrupt */
10204 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
10205 	uint32_t wait_event;		/* wait event() caller address */
10206 	uint32_t l2p_control;	/* L2pControlField */
10207 	uint32_t l2p_duration;	/* L2pDurationField */
10208 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
10209 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
10210 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
10211 				 * (LMPM_PMG_SEL) */
10212 	uint32_t u_timestamp;	/* indicate when the date and time of the
10213 				 * compilation */
10214 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
10215 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
10216 
10217 /*
10218  * UMAC error struct - relevant starting from family 8000 chip.
10219  * Note: This structure is read from the device with IO accesses,
10220  * and the reading already does the endian conversion. As it is
10221  * read with u32-sized accesses, any members with a different size
10222  * need to be ordered correctly though!
10223  */
10224 struct iwm_umac_error_event_table {
10225 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10226 	uint32_t error_id;	/* type of error */
10227 	uint32_t blink1;	/* branch link */
10228 	uint32_t blink2;	/* branch link */
10229 	uint32_t ilink1;	/* interrupt link */
10230 	uint32_t ilink2;	/* interrupt link */
10231 	uint32_t data1;		/* error-specific data */
10232 	uint32_t data2;		/* error-specific data */
10233 	uint32_t data3;		/* error-specific data */
10234 	uint32_t umac_major;
10235 	uint32_t umac_minor;
10236 	uint32_t frame_pointer;	/* core register 27*/
10237 	uint32_t stack_pointer;	/* core register 28 */
10238 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
10239 	uint32_t nic_isr_pref;	/* ISR status register */
10240 } __packed;
10241 
10242 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
10243 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
10244 
10245 void
10246 iwm_nic_umac_error(struct iwm_softc *sc)
10247 {
10248 	struct iwm_umac_error_event_table table;
10249 	uint32_t base;
10250 
10251 	base = sc->sc_uc.uc_umac_error_event_table;
10252 
10253 	if (base < 0x800000) {
10254 		printf("%s: Invalid error log pointer 0x%08x\n",
10255 		    DEVNAME(sc), base);
10256 		return;
10257 	}
10258 
10259 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10260 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10261 		return;
10262 	}
10263 
10264 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10265 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10266 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10267 			sc->sc_flags, table.valid);
10268 	}
10269 
10270 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10271 		iwm_desc_lookup(table.error_id));
10272 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10273 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10274 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10275 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10276 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10277 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10278 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10279 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10280 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10281 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10282 	    table.frame_pointer);
10283 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10284 	    table.stack_pointer);
10285 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10286 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10287 	    table.nic_isr_pref);
10288 }
10289 
10290 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
10291 static struct {
10292 	const char *name;
10293 	uint8_t num;
10294 } advanced_lookup[] = {
10295 	{ "NMI_INTERRUPT_WDG", 0x34 },
10296 	{ "SYSASSERT", 0x35 },
10297 	{ "UCODE_VERSION_MISMATCH", 0x37 },
10298 	{ "BAD_COMMAND", 0x38 },
10299 	{ "BAD_COMMAND", 0x39 },
10300 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10301 	{ "FATAL_ERROR", 0x3D },
10302 	{ "NMI_TRM_HW_ERR", 0x46 },
10303 	{ "NMI_INTERRUPT_TRM", 0x4C },
10304 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10305 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10306 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10307 	{ "NMI_INTERRUPT_HOST", 0x66 },
10308 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10309 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10310 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10311 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
10312 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
10313 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10314 	{ "ADVANCED_SYSASSERT", 0 },
10315 };
10316 
10317 const char *
10318 iwm_desc_lookup(uint32_t num)
10319 {
10320 	int i;
10321 
10322 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
10323 		if (advanced_lookup[i].num ==
10324 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
10325 			return advanced_lookup[i].name;
10326 
10327 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10328 	return advanced_lookup[i].name;
10329 }
10330 
10331 /*
10332  * Support for dumping the error log seemed like a good idea ...
10333  * but it's mostly hex junk and the only sensible thing is the
10334  * hw/ucode revision (which we know anyway).  Since it's here,
10335  * I'll just leave it in, just in case e.g. the Intel guys want to
10336  * help us decipher some "ADVANCED_SYSASSERT" later.
10337  */
10338 void
10339 iwm_nic_error(struct iwm_softc *sc)
10340 {
10341 	struct iwm_error_event_table table;
10342 	uint32_t base;
10343 
10344 	printf("%s: dumping device error log\n", DEVNAME(sc));
10345 	base = sc->sc_uc.uc_error_event_table;
10346 	if (base < 0x800000) {
10347 		printf("%s: Invalid error log pointer 0x%08x\n",
10348 		    DEVNAME(sc), base);
10349 		return;
10350 	}
10351 
10352 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10353 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10354 		return;
10355 	}
10356 
10357 	if (!table.valid) {
10358 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10359 		return;
10360 	}
10361 
10362 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10363 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10364 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10365 		    sc->sc_flags, table.valid);
10366 	}
10367 
10368 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10369 	    iwm_desc_lookup(table.error_id));
10370 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10371 	    table.trm_hw_status0);
10372 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10373 	    table.trm_hw_status1);
10374 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10375 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10376 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10377 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10378 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10379 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10380 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10381 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10382 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10383 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10384 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10385 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10386 	    table.fw_rev_type);
10387 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10388 	    table.major);
10389 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10390 	    table.minor);
10391 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10392 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10393 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10394 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10395 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10396 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10397 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10398 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10399 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10400 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10401 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10402 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10403 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10404 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10405 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10406 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10407 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10408 
10409 	if (sc->sc_uc.uc_umac_error_event_table)
10410 		iwm_nic_umac_error(sc);
10411 }
10412 
10413 void
10414 iwm_dump_driver_status(struct iwm_softc *sc)
10415 {
10416 	int i;
10417 
10418 	printf("driver status:\n");
10419 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
10420 		struct iwm_tx_ring *ring = &sc->txq[i];
10421 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10422 		    "queued=%-3d\n",
10423 		    i, ring->qid, ring->cur, ring->queued);
10424 	}
10425 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10426 	printf("  802.11 state %s\n",
10427 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10428 }
10429 
10430 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10431 do {									\
10432 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10433 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10434 	_var_ = (void *)((_pkt_)+1);					\
10435 } while (/*CONSTCOND*/0)
10436 
10437 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10438 do {									\
10439 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10440 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10441 	_ptr_ = (void *)((_pkt_)+1);					\
10442 } while (/*CONSTCOND*/0)
10443 
10444 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10445 
10446 int
10447 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10448 {
10449 	int qid, idx, code;
10450 
10451 	qid = pkt->hdr.qid & ~0x80;
10452 	idx = pkt->hdr.idx;
10453 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10454 
10455 	return (!(qid == 0 && idx == 0 && code == 0) &&
10456 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
10457 }
10458 
10459 void
10460 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10461 {
10462 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10463 	struct iwm_rx_packet *pkt, *nextpkt;
10464 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10465 	struct mbuf *m0, *m;
10466 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10467 	int qid, idx, code, handled = 1;
10468 
10469 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
10470 	    BUS_DMASYNC_POSTREAD);
10471 
10472 	m0 = data->m;
10473 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
10474 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
10475 		qid = pkt->hdr.qid;
10476 		idx = pkt->hdr.idx;
10477 
10478 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10479 
10480 		if (!iwm_rx_pkt_valid(pkt))
10481 			break;
10482 
10483 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10484 		if (len < sizeof(pkt->hdr) ||
10485 		    len > (IWM_RBUF_SIZE - offset - minsz))
10486 			break;
10487 
10488 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
10489 			/* Take mbuf m0 off the RX ring. */
10490 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
10491 				ifp->if_ierrors++;
10492 				break;
10493 			}
10494 			KASSERT(data->m != m0);
10495 		}
10496 
10497 		switch (code) {
10498 		case IWM_REPLY_RX_PHY_CMD:
10499 			iwm_rx_rx_phy_cmd(sc, pkt, data);
10500 			break;
10501 
10502 		case IWM_REPLY_RX_MPDU_CMD: {
10503 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
10504 			nextoff = offset +
10505 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
10506 			nextpkt = (struct iwm_rx_packet *)
10507 			    (m0->m_data + nextoff);
10508 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
10509 			    !iwm_rx_pkt_valid(nextpkt)) {
10510 				/* No need to copy last frame in buffer. */
10511 				if (offset > 0)
10512 					m_adj(m0, offset);
10513 				if (sc->sc_mqrx_supported)
10514 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
10515 					    maxlen, ml);
10516 				else
10517 					iwm_rx_mpdu(sc, m0, pkt->data,
10518 					    maxlen, ml);
10519 				m0 = NULL; /* stack owns m0 now; abort loop */
10520 			} else {
10521 				/*
10522 				 * Create an mbuf which points to the current
10523 				 * packet. Always copy from offset zero to
10524 				 * preserve m_pkthdr.
10525 				 */
10526 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
10527 				if (m == NULL) {
10528 					ifp->if_ierrors++;
10529 					m_freem(m0);
10530 					m0 = NULL;
10531 					break;
10532 				}
10533 				m_adj(m, offset);
10534 				if (sc->sc_mqrx_supported)
10535 					iwm_rx_mpdu_mq(sc, m, pkt->data,
10536 					    maxlen, ml);
10537 				else
10538 					iwm_rx_mpdu(sc, m, pkt->data,
10539 					    maxlen, ml);
10540 			}
10541  			break;
10542 		}
10543 
10544 		case IWM_TX_CMD:
10545 			iwm_rx_tx_cmd(sc, pkt, data);
10546 			break;
10547 
10548 		case IWM_BA_NOTIF:
10549 			iwm_rx_compressed_ba(sc, pkt, data);
10550 			break;
10551 
10552 		case IWM_MISSED_BEACONS_NOTIFICATION:
10553 			iwm_rx_bmiss(sc, pkt, data);
10554 			break;
10555 
10556 		case IWM_MFUART_LOAD_NOTIFICATION:
10557 			break;
10558 
10559 		case IWM_ALIVE: {
10560 			struct iwm_alive_resp_v1 *resp1;
10561 			struct iwm_alive_resp_v2 *resp2;
10562 			struct iwm_alive_resp_v3 *resp3;
10563 
10564 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
10565 				SYNC_RESP_STRUCT(resp1, pkt);
10566 				sc->sc_uc.uc_error_event_table
10567 				    = le32toh(resp1->error_event_table_ptr);
10568 				sc->sc_uc.uc_log_event_table
10569 				    = le32toh(resp1->log_event_table_ptr);
10570 				sc->sched_base = le32toh(resp1->scd_base_ptr);
10571 				if (resp1->status == IWM_ALIVE_STATUS_OK)
10572 					sc->sc_uc.uc_ok = 1;
10573 				else
10574 					sc->sc_uc.uc_ok = 0;
10575 			}
10576 
10577 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
10578 				SYNC_RESP_STRUCT(resp2, pkt);
10579 				sc->sc_uc.uc_error_event_table
10580 				    = le32toh(resp2->error_event_table_ptr);
10581 				sc->sc_uc.uc_log_event_table
10582 				    = le32toh(resp2->log_event_table_ptr);
10583 				sc->sched_base = le32toh(resp2->scd_base_ptr);
10584 				sc->sc_uc.uc_umac_error_event_table
10585 				    = le32toh(resp2->error_info_addr);
10586 				if (resp2->status == IWM_ALIVE_STATUS_OK)
10587 					sc->sc_uc.uc_ok = 1;
10588 				else
10589 					sc->sc_uc.uc_ok = 0;
10590 			}
10591 
10592 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
10593 				SYNC_RESP_STRUCT(resp3, pkt);
10594 				sc->sc_uc.uc_error_event_table
10595 				    = le32toh(resp3->error_event_table_ptr);
10596 				sc->sc_uc.uc_log_event_table
10597 				    = le32toh(resp3->log_event_table_ptr);
10598 				sc->sched_base = le32toh(resp3->scd_base_ptr);
10599 				sc->sc_uc.uc_umac_error_event_table
10600 				    = le32toh(resp3->error_info_addr);
10601 				if (resp3->status == IWM_ALIVE_STATUS_OK)
10602 					sc->sc_uc.uc_ok = 1;
10603 				else
10604 					sc->sc_uc.uc_ok = 0;
10605 			}
10606 
10607 			sc->sc_uc.uc_intr = 1;
10608 			wakeup(&sc->sc_uc);
10609 			break;
10610 		}
10611 
10612 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
10613 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
10614 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
10615 			iwm_phy_db_set_section(sc, phy_db_notif);
10616 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
10617 			wakeup(&sc->sc_init_complete);
10618 			break;
10619 		}
10620 
10621 		case IWM_STATISTICS_NOTIFICATION: {
10622 			struct iwm_notif_statistics *stats;
10623 			SYNC_RESP_STRUCT(stats, pkt);
10624 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
10625 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
10626 			break;
10627 		}
10628 
10629 		case IWM_MCC_CHUB_UPDATE_CMD: {
10630 			struct iwm_mcc_chub_notif *notif;
10631 			SYNC_RESP_STRUCT(notif, pkt);
10632 			iwm_mcc_update(sc, notif);
10633 			break;
10634 		}
10635 
10636 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
10637 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
10638 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
10639 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
10640 				 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
10641 			break;
10642 
10643 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
10644 		    IWM_CT_KILL_NOTIFICATION): {
10645 			struct iwm_ct_kill_notif *notif;
10646 			SYNC_RESP_STRUCT(notif, pkt);
10647 			printf("%s: device at critical temperature (%u degC), "
10648 			    "stopping device\n",
10649 			    DEVNAME(sc), le16toh(notif->temperature));
10650 			sc->sc_flags |= IWM_FLAG_HW_ERR;
10651 			task_add(systq, &sc->init_task);
10652 			break;
10653 		}
10654 
10655 		case IWM_ADD_STA_KEY:
10656 		case IWM_PHY_CONFIGURATION_CMD:
10657 		case IWM_TX_ANT_CONFIGURATION_CMD:
10658 		case IWM_ADD_STA:
10659 		case IWM_MAC_CONTEXT_CMD:
10660 		case IWM_REPLY_SF_CFG_CMD:
10661 		case IWM_POWER_TABLE_CMD:
10662 		case IWM_LTR_CONFIG:
10663 		case IWM_PHY_CONTEXT_CMD:
10664 		case IWM_BINDING_CONTEXT_CMD:
10665 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
10666 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
10667 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
10668 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
10669 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
10670 		case IWM_REPLY_BEACON_FILTERING_CMD:
10671 		case IWM_MAC_PM_POWER_TABLE:
10672 		case IWM_TIME_QUOTA_CMD:
10673 		case IWM_REMOVE_STA:
10674 		case IWM_TXPATH_FLUSH:
10675 		case IWM_LQ_CMD:
10676 		case IWM_WIDE_ID(IWM_LONG_GROUP,
10677 				 IWM_FW_PAGING_BLOCK_CMD):
10678 		case IWM_BT_CONFIG:
10679 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
10680 		case IWM_NVM_ACCESS_CMD:
10681 		case IWM_MCC_UPDATE_CMD:
10682 		case IWM_TIME_EVENT_CMD: {
10683 			size_t pkt_len;
10684 
10685 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
10686 				break;
10687 
10688 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
10689 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10690 
10691 			pkt_len = sizeof(pkt->len_n_flags) +
10692 			    iwm_rx_packet_len(pkt);
10693 
10694 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
10695 			    pkt_len < sizeof(*pkt) ||
10696 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
10697 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
10698 				    sc->sc_cmd_resp_len[idx]);
10699 				sc->sc_cmd_resp_pkt[idx] = NULL;
10700 				break;
10701 			}
10702 
10703 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
10704 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10705 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
10706 			break;
10707 		}
10708 
10709 		/* ignore */
10710 		case IWM_PHY_DB_CMD:
10711 			break;
10712 
10713 		case IWM_INIT_COMPLETE_NOTIF:
10714 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
10715 			wakeup(&sc->sc_init_complete);
10716 			break;
10717 
10718 		case IWM_SCAN_OFFLOAD_COMPLETE: {
10719 			struct iwm_periodic_scan_complete *notif;
10720 			SYNC_RESP_STRUCT(notif, pkt);
10721 			break;
10722 		}
10723 
10724 		case IWM_SCAN_ITERATION_COMPLETE: {
10725 			struct iwm_lmac_scan_complete_notif *notif;
10726 			SYNC_RESP_STRUCT(notif, pkt);
10727 			iwm_endscan(sc);
10728 			break;
10729 		}
10730 
10731 		case IWM_SCAN_COMPLETE_UMAC: {
10732 			struct iwm_umac_scan_complete *notif;
10733 			SYNC_RESP_STRUCT(notif, pkt);
10734 			iwm_endscan(sc);
10735 			break;
10736 		}
10737 
10738 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
10739 			struct iwm_umac_scan_iter_complete_notif *notif;
10740 			SYNC_RESP_STRUCT(notif, pkt);
10741 			iwm_endscan(sc);
10742 			break;
10743 		}
10744 
10745 		case IWM_REPLY_ERROR: {
10746 			struct iwm_error_resp *resp;
10747 			SYNC_RESP_STRUCT(resp, pkt);
10748 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
10749 				DEVNAME(sc), le32toh(resp->error_type),
10750 				resp->cmd_id);
10751 			break;
10752 		}
10753 
10754 		case IWM_TIME_EVENT_NOTIFICATION: {
10755 			struct iwm_time_event_notif *notif;
10756 			uint32_t action;
10757 			SYNC_RESP_STRUCT(notif, pkt);
10758 
10759 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
10760 				break;
10761 			action = le32toh(notif->action);
10762 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
10763 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10764 			break;
10765 		}
10766 
10767 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
10768 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
10769 		    break;
10770 
10771 		/*
10772 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10773 		 * messages. Just ignore them for now.
10774 		 */
10775 		case IWM_DEBUG_LOG_MSG:
10776 			break;
10777 
10778 		case IWM_MCAST_FILTER_CMD:
10779 			break;
10780 
10781 		case IWM_SCD_QUEUE_CFG: {
10782 			struct iwm_scd_txq_cfg_rsp *rsp;
10783 			SYNC_RESP_STRUCT(rsp, pkt);
10784 
10785 			break;
10786 		}
10787 
10788 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
10789 			break;
10790 
10791 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
10792 			break;
10793 
10794 		default:
10795 			handled = 0;
10796 			printf("%s: unhandled firmware response 0x%x/0x%x "
10797 			    "rx ring %d[%d]\n",
10798 			    DEVNAME(sc), code, pkt->len_n_flags,
10799 			    (qid & ~0x80), idx);
10800 			break;
10801 		}
10802 
10803 		/*
10804 		 * uCode sets bit 0x80 when it originates the notification,
10805 		 * i.e. when the notification is not a direct response to a
10806 		 * command sent by the driver.
10807 		 * For example, uCode issues IWM_REPLY_RX when it sends a
10808 		 * received frame to the driver.
10809 		 */
10810 		if (handled && !(qid & (1 << 7))) {
10811 			iwm_cmd_done(sc, qid, idx, code);
10812 		}
10813 
10814 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
10815 	}
10816 
10817 	if (m0 && m0 != data->m)
10818 		m_freem(m0);
10819 }
10820 
10821 void
10822 iwm_notif_intr(struct iwm_softc *sc)
10823 {
10824 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
10825 	uint32_t wreg;
10826 	uint16_t hw;
10827 	int count;
10828 
10829 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
10830 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
10831 
10832 	if (sc->sc_mqrx_supported) {
10833 		count = IWM_RX_MQ_RING_COUNT;
10834 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
10835 	} else {
10836 		count = IWM_RX_RING_COUNT;
10837 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
10838 	}
10839 
10840 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
10841 	hw &= (count - 1);
10842 	while (sc->rxq.cur != hw) {
10843 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10844 		iwm_rx_pkt(sc, data, &ml);
10845 		ADVANCE_RXQ(sc);
10846 	}
10847 	if_input(&sc->sc_ic.ic_if, &ml);
10848 
10849 	/*
10850 	 * Tell the firmware what we have processed.
10851 	 * Seems like the hardware gets upset unless we align the write by 8??
10852 	 */
10853 	hw = (hw == 0) ? count - 1 : hw - 1;
10854 	IWM_WRITE(sc, wreg, hw & ~7);
10855 }
10856 
10857 int
10858 iwm_intr(void *arg)
10859 {
10860 	struct iwm_softc *sc = arg;
10861 	struct ieee80211com *ic = &sc->sc_ic;
10862 	struct ifnet *ifp = IC2IFP(ic);
10863 	int handled = 0;
10864 	int rv = 0;
10865 	uint32_t r1, r2;
10866 
10867 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
10868 
10869 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
10870 		uint32_t *ict = sc->ict_dma.vaddr;
10871 		int tmp;
10872 
10873 		tmp = htole32(ict[sc->ict_cur]);
10874 		if (!tmp)
10875 			goto out_ena;
10876 
10877 		/*
10878 		 * ok, there was something.  keep plowing until we have all.
10879 		 */
10880 		r1 = r2 = 0;
10881 		while (tmp) {
10882 			r1 |= tmp;
10883 			ict[sc->ict_cur] = 0;
10884 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
10885 			tmp = htole32(ict[sc->ict_cur]);
10886 		}
10887 
10888 		/* this is where the fun begins.  don't ask */
10889 		if (r1 == 0xffffffff)
10890 			r1 = 0;
10891 
10892 		/*
10893 		 * Workaround for hardware bug where bits are falsely cleared
10894 		 * when using interrupt coalescing.  Bit 15 should be set if
10895 		 * bits 18 and 19 are set.
10896 		 */
10897 		if (r1 & 0xc0000)
10898 			r1 |= 0x8000;
10899 
10900 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
10901 	} else {
10902 		r1 = IWM_READ(sc, IWM_CSR_INT);
10903 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
10904 	}
10905 	if (r1 == 0 && r2 == 0) {
10906 		goto out_ena;
10907 	}
10908 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
10909 		goto out;
10910 
10911 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
10912 
10913 	/* ignored */
10914 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
10915 
10916 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
10917 		handled |= IWM_CSR_INT_BIT_RF_KILL;
10918 		iwm_check_rfkill(sc);
10919 		task_add(systq, &sc->init_task);
10920 		rv = 1;
10921 		goto out_ena;
10922 	}
10923 
10924 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
10925 		if (ifp->if_flags & IFF_DEBUG) {
10926 			iwm_nic_error(sc);
10927 			iwm_dump_driver_status(sc);
10928 		}
10929 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10930 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10931 			task_add(systq, &sc->init_task);
10932 		rv = 1;
10933 		goto out;
10934 
10935 	}
10936 
10937 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
10938 		handled |= IWM_CSR_INT_BIT_HW_ERR;
10939 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10940 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
10941 			sc->sc_flags |= IWM_FLAG_HW_ERR;
10942 			task_add(systq, &sc->init_task);
10943 		}
10944 		rv = 1;
10945 		goto out;
10946 	}
10947 
10948 	/* firmware chunk loaded */
10949 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
10950 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
10951 		handled |= IWM_CSR_INT_BIT_FH_TX;
10952 
10953 		sc->sc_fw_chunk_done = 1;
10954 		wakeup(&sc->sc_fw);
10955 	}
10956 
10957 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
10958 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
10959 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
10960 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
10961 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
10962 		}
10963 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
10964 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
10965 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
10966 		}
10967 
10968 		/* Disable periodic interrupt; we use it as just a one-shot. */
10969 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
10970 
10971 		/*
10972 		 * Enable periodic interrupt in 8 msec only if we received
10973 		 * real RX interrupt (instead of just periodic int), to catch
10974 		 * any dangling Rx interrupt.  If it was just the periodic
10975 		 * interrupt, there was no dangling Rx activity, and no need
10976 		 * to extend the periodic interrupt; one-shot is enough.
10977 		 */
10978 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
10979 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
10980 			    IWM_CSR_INT_PERIODIC_ENA);
10981 
10982 		iwm_notif_intr(sc);
10983 	}
10984 
10985 	rv = 1;
10986 
10987  out_ena:
10988 	iwm_restore_interrupts(sc);
10989  out:
10990 	return rv;
10991 }
10992 
10993 int
10994 iwm_intr_msix(void *arg)
10995 {
10996 	struct iwm_softc *sc = arg;
10997 	struct ieee80211com *ic = &sc->sc_ic;
10998 	struct ifnet *ifp = IC2IFP(ic);
10999 	uint32_t inta_fh, inta_hw;
11000 	int vector = 0;
11001 
11002 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11003 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11004 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11005 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11006 	inta_fh &= sc->sc_fh_mask;
11007 	inta_hw &= sc->sc_hw_mask;
11008 
11009 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11010 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11011 		iwm_notif_intr(sc);
11012 	}
11013 
11014 	/* firmware chunk loaded */
11015 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11016 		sc->sc_fw_chunk_done = 1;
11017 		wakeup(&sc->sc_fw);
11018 	}
11019 
11020 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11021 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11022 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11023 		if (ifp->if_flags & IFF_DEBUG) {
11024 			iwm_nic_error(sc);
11025 			iwm_dump_driver_status(sc);
11026 		}
11027 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11028 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11029 			task_add(systq, &sc->init_task);
11030 		return 1;
11031 	}
11032 
11033 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11034 		iwm_check_rfkill(sc);
11035 		task_add(systq, &sc->init_task);
11036 	}
11037 
11038 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11039 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11040 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11041 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11042 			task_add(systq, &sc->init_task);
11043 		}
11044 		return 1;
11045 	}
11046 
11047 	/*
11048 	 * Before sending the interrupt the HW disables it to prevent
11049 	 * a nested interrupt. This is done by writing 1 to the corresponding
11050 	 * bit in the mask register. After handling the interrupt, it should be
11051 	 * re-enabled by clearing this bit. This register is defined as
11052 	 * write 1 clear (W1C) register, meaning that it's being clear
11053 	 * by writing 1 to the bit.
11054 	 */
11055 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11056 	return 1;
11057 }
11058 
11059 typedef void *iwm_match_t;
11060 
11061 static const struct pci_matchid iwm_devices[] = {
11062 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
11063 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
11064 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
11065 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
11066 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
11067 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
11068 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
11069 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
11070 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
11071 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
11072 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
11073 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
11074 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
11075 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
11076 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
11077 };
11078 
11079 int
11080 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
11081 {
11082 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11083 	    nitems(iwm_devices));
11084 }
11085 
11086 int
11087 iwm_preinit(struct iwm_softc *sc)
11088 {
11089 	struct ieee80211com *ic = &sc->sc_ic;
11090 	struct ifnet *ifp = IC2IFP(ic);
11091 	int err;
11092 	static int attached;
11093 
11094 	err = iwm_prepare_card_hw(sc);
11095 	if (err) {
11096 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11097 		return err;
11098 	}
11099 
11100 	if (attached) {
11101 		/* Update MAC in case the upper layers changed it. */
11102 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11103 		    ((struct arpcom *)ifp)->ac_enaddr);
11104 		return 0;
11105 	}
11106 
11107 	err = iwm_start_hw(sc);
11108 	if (err) {
11109 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11110 		return err;
11111 	}
11112 
11113 	err = iwm_run_init_mvm_ucode(sc, 1);
11114 	iwm_stop_device(sc);
11115 	if (err)
11116 		return err;
11117 
11118 	/* Print version info and MAC address on first successful fw load. */
11119 	attached = 1;
11120 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11121 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11122 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11123 
11124 	if (sc->sc_nvm.sku_cap_11n_enable)
11125 		iwm_setup_ht_rates(sc);
11126 
11127 	/* not all hardware can do 5GHz band */
11128 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11129 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11130 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11131 
11132 	/* Configure channel information obtained from firmware. */
11133 	ieee80211_channel_init(ifp);
11134 
11135 	/* Configure MAC address. */
11136 	err = if_setlladdr(ifp, ic->ic_myaddr);
11137 	if (err)
11138 		printf("%s: could not set MAC address (error %d)\n",
11139 		    DEVNAME(sc), err);
11140 
11141 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11142 
11143 	return 0;
11144 }
11145 
11146 void
11147 iwm_attach_hook(struct device *self)
11148 {
11149 	struct iwm_softc *sc = (void *)self;
11150 
11151 	KASSERT(!cold);
11152 
11153 	iwm_preinit(sc);
11154 }
11155 
11156 void
11157 iwm_attach(struct device *parent, struct device *self, void *aux)
11158 {
11159 	struct iwm_softc *sc = (void *)self;
11160 	struct pci_attach_args *pa = aux;
11161 	pci_intr_handle_t ih;
11162 	pcireg_t reg, memtype;
11163 	struct ieee80211com *ic = &sc->sc_ic;
11164 	struct ifnet *ifp = &ic->ic_if;
11165 	const char *intrstr;
11166 	int err;
11167 	int txq_i, i, j;
11168 
11169 	sc->sc_pct = pa->pa_pc;
11170 	sc->sc_pcitag = pa->pa_tag;
11171 	sc->sc_dmat = pa->pa_dmat;
11172 
11173 	rw_init(&sc->ioctl_rwl, "iwmioctl");
11174 
11175 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11176 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11177 	if (err == 0) {
11178 		printf("%s: PCIe capability structure not found!\n",
11179 		    DEVNAME(sc));
11180 		return;
11181 	}
11182 
11183 	/* Clear device-specific "PCI retry timeout" register (41h). */
11184 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11185 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11186 
11187 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11188 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11189 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11190 	if (err) {
11191 		printf("%s: can't map mem space\n", DEVNAME(sc));
11192 		return;
11193 	}
11194 
11195 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11196 		sc->sc_msix = 1;
11197 	} else if (pci_intr_map_msi(pa, &ih)) {
11198 		if (pci_intr_map(pa, &ih)) {
11199 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11200 			return;
11201 		}
11202 		/* Hardware bug workaround. */
11203 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11204 		    PCI_COMMAND_STATUS_REG);
11205 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11206 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11207 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11208 		    PCI_COMMAND_STATUS_REG, reg);
11209 	}
11210 
11211 	intrstr = pci_intr_string(sc->sc_pct, ih);
11212 	if (sc->sc_msix)
11213 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11214 		    iwm_intr_msix, sc, DEVNAME(sc));
11215 	else
11216 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11217 		    iwm_intr, sc, DEVNAME(sc));
11218 
11219 	if (sc->sc_ih == NULL) {
11220 		printf("\n");
11221 		printf("%s: can't establish interrupt", DEVNAME(sc));
11222 		if (intrstr != NULL)
11223 			printf(" at %s", intrstr);
11224 		printf("\n");
11225 		return;
11226 	}
11227 	printf(", %s\n", intrstr);
11228 
11229 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11230 	switch (PCI_PRODUCT(pa->pa_id)) {
11231 	case PCI_PRODUCT_INTEL_WL_3160_1:
11232 	case PCI_PRODUCT_INTEL_WL_3160_2:
11233 		sc->sc_fwname = "iwm-3160-17";
11234 		sc->host_interrupt_operation_mode = 1;
11235 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11236 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11237 		sc->sc_nvm_max_section_size = 16384;
11238 		sc->nvm_type = IWM_NVM;
11239 		break;
11240 	case PCI_PRODUCT_INTEL_WL_3165_1:
11241 	case PCI_PRODUCT_INTEL_WL_3165_2:
11242 		sc->sc_fwname = "iwm-7265D-29";
11243 		sc->host_interrupt_operation_mode = 0;
11244 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11245 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11246 		sc->sc_nvm_max_section_size = 16384;
11247 		sc->nvm_type = IWM_NVM;
11248 		break;
11249 	case PCI_PRODUCT_INTEL_WL_3168_1:
11250 		sc->sc_fwname = "iwm-3168-29";
11251 		sc->host_interrupt_operation_mode = 0;
11252 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11253 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11254 		sc->sc_nvm_max_section_size = 16384;
11255 		sc->nvm_type = IWM_NVM_SDP;
11256 		break;
11257 	case PCI_PRODUCT_INTEL_WL_7260_1:
11258 	case PCI_PRODUCT_INTEL_WL_7260_2:
11259 		sc->sc_fwname = "iwm-7260-17";
11260 		sc->host_interrupt_operation_mode = 1;
11261 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11262 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11263 		sc->sc_nvm_max_section_size = 16384;
11264 		sc->nvm_type = IWM_NVM;
11265 		break;
11266 	case PCI_PRODUCT_INTEL_WL_7265_1:
11267 	case PCI_PRODUCT_INTEL_WL_7265_2:
11268 		sc->sc_fwname = "iwm-7265-17";
11269 		sc->host_interrupt_operation_mode = 0;
11270 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11271 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11272 		sc->sc_nvm_max_section_size = 16384;
11273 		sc->nvm_type = IWM_NVM;
11274 		break;
11275 	case PCI_PRODUCT_INTEL_WL_8260_1:
11276 	case PCI_PRODUCT_INTEL_WL_8260_2:
11277 		sc->sc_fwname = "iwm-8000C-36";
11278 		sc->host_interrupt_operation_mode = 0;
11279 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11280 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11281 		sc->sc_nvm_max_section_size = 32768;
11282 		sc->nvm_type = IWM_NVM_EXT;
11283 		break;
11284 	case PCI_PRODUCT_INTEL_WL_8265_1:
11285 		sc->sc_fwname = "iwm-8265-36";
11286 		sc->host_interrupt_operation_mode = 0;
11287 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11288 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11289 		sc->sc_nvm_max_section_size = 32768;
11290 		sc->nvm_type = IWM_NVM_EXT;
11291 		break;
11292 	case PCI_PRODUCT_INTEL_WL_9260_1:
11293 		sc->sc_fwname = "iwm-9260-46";
11294 		sc->host_interrupt_operation_mode = 0;
11295 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11296 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11297 		sc->sc_nvm_max_section_size = 32768;
11298 		sc->sc_mqrx_supported = 1;
11299 		break;
11300 	case PCI_PRODUCT_INTEL_WL_9560_1:
11301 	case PCI_PRODUCT_INTEL_WL_9560_2:
11302 		sc->sc_fwname = "iwm-9000-46";
11303 		sc->host_interrupt_operation_mode = 0;
11304 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11305 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11306 		sc->sc_nvm_max_section_size = 32768;
11307 		sc->sc_mqrx_supported = 1;
11308 		sc->sc_integrated = 1;
11309 		sc->sc_xtal_latency = 650;
11310 		break;
11311 	default:
11312 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11313 		return;
11314 	}
11315 
11316 	/*
11317 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11318 	 * changed, and now the revision step also includes bit 0-1 (no more
11319 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11320 	 * in the old format.
11321 	 */
11322 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11323 		uint32_t hw_step;
11324 
11325 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11326 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11327 
11328 		if (iwm_prepare_card_hw(sc) != 0) {
11329 			printf("%s: could not initialize hardware\n",
11330 			    DEVNAME(sc));
11331 			return;
11332 		}
11333 
11334 		/*
11335 		 * In order to recognize C step the driver should read the
11336 		 * chip version id located at the AUX bus MISC address.
11337 		 */
11338 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11339 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
11340 		DELAY(2);
11341 
11342 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11343 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11344 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11345 				   25000);
11346 		if (!err) {
11347 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11348 			return;
11349 		}
11350 
11351 		if (iwm_nic_lock(sc)) {
11352 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11353 			hw_step |= IWM_ENABLE_WFPM;
11354 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11355 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11356 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
11357 			if (hw_step == 0x3)
11358 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11359 						(IWM_SILICON_C_STEP << 2);
11360 			iwm_nic_unlock(sc);
11361 		} else {
11362 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11363 			return;
11364 		}
11365 	}
11366 
11367 	/*
11368 	 * Allocate DMA memory for firmware transfers.
11369 	 * Must be aligned on a 16-byte boundary.
11370 	 */
11371 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11372 	    sc->sc_fwdmasegsz, 16);
11373 	if (err) {
11374 		printf("%s: could not allocate memory for firmware\n",
11375 		    DEVNAME(sc));
11376 		return;
11377 	}
11378 
11379 	/* Allocate "Keep Warm" page, used internally by the card. */
11380 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11381 	if (err) {
11382 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11383 		goto fail1;
11384 	}
11385 
11386 	/* Allocate interrupt cause table (ICT).*/
11387 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11388 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
11389 	if (err) {
11390 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11391 		goto fail2;
11392 	}
11393 
11394 	/* TX scheduler rings must be aligned on a 1KB boundary. */
11395 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11396 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11397 	if (err) {
11398 		printf("%s: could not allocate TX scheduler rings\n",
11399 		    DEVNAME(sc));
11400 		goto fail3;
11401 	}
11402 
11403 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11404 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11405 		if (err) {
11406 			printf("%s: could not allocate TX ring %d\n",
11407 			    DEVNAME(sc), txq_i);
11408 			goto fail4;
11409 		}
11410 	}
11411 
11412 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
11413 	if (err) {
11414 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11415 		goto fail4;
11416 	}
11417 
11418 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
11419 	if (sc->sc_nswq == NULL)
11420 		goto fail4;
11421 
11422 	/* Clear pending interrupts. */
11423 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
11424 
11425 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11426 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11427 	ic->ic_state = IEEE80211_S_INIT;
11428 
11429 	/* Set device capabilities. */
11430 	ic->ic_caps =
11431 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11432 	    IEEE80211_C_WEP |		/* WEP */
11433 	    IEEE80211_C_RSN |		/* WPA/RSN */
11434 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11435 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11436 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11437 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11438 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11439 
11440 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11441 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11442 	ic->ic_htcaps |=
11443 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11444 	ic->ic_htxcaps = 0;
11445 	ic->ic_txbfcaps = 0;
11446 	ic->ic_aselcaps = 0;
11447 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11448 
11449 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11450 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11451 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11452 
11453 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11454 		sc->sc_phyctxt[i].id = i;
11455 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11456 	}
11457 
11458 	sc->sc_amrr.amrr_min_success_threshold =  1;
11459 	sc->sc_amrr.amrr_max_success_threshold = 15;
11460 
11461 	/* IBSS channel undefined for now. */
11462 	ic->ic_ibss_chan = &ic->ic_channels[1];
11463 
11464 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
11465 
11466 	ifp->if_softc = sc;
11467 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11468 	ifp->if_ioctl = iwm_ioctl;
11469 	ifp->if_start = iwm_start;
11470 	ifp->if_watchdog = iwm_watchdog;
11471 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11472 
11473 	if_attach(ifp);
11474 	ieee80211_ifattach(ifp);
11475 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11476 
11477 #if NBPFILTER > 0
11478 	iwm_radiotap_attach(sc);
11479 #endif
11480 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
11481 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
11482 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11483 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
11484 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
11485 		rxba->sc = sc;
11486 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
11487 		    rxba);
11488 		timeout_set(&rxba->reorder_buf.reorder_timer,
11489 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
11490 		for (j = 0; j < nitems(rxba->entries); j++)
11491 			ml_init(&rxba->entries[j].frames);
11492 	}
11493 	task_set(&sc->init_task, iwm_init_task, sc);
11494 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
11495 	task_set(&sc->ba_task, iwm_ba_task, sc);
11496 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
11497 	task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
11498 
11499 	ic->ic_node_alloc = iwm_node_alloc;
11500 	ic->ic_bgscan_start = iwm_bgscan;
11501 	ic->ic_set_key = iwm_set_key;
11502 	ic->ic_delete_key = iwm_delete_key;
11503 
11504 	/* Override 802.11 state transition machine. */
11505 	sc->sc_newstate = ic->ic_newstate;
11506 	ic->ic_newstate = iwm_newstate;
11507 	ic->ic_updateprot = iwm_updateprot;
11508 	ic->ic_updateslot = iwm_updateslot;
11509 	ic->ic_updateedca = iwm_updateedca;
11510 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
11511 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
11512 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
11513 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
11514 	/*
11515 	 * We cannot read the MAC address without loading the
11516 	 * firmware from disk. Postpone until mountroot is done.
11517 	 */
11518 	config_mountroot(self, iwm_attach_hook);
11519 
11520 	return;
11521 
11522 fail4:	while (--txq_i >= 0)
11523 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
11524 	iwm_free_rx_ring(sc, &sc->rxq);
11525 	iwm_dma_contig_free(&sc->sched_dma);
11526 fail3:	if (sc->ict_dma.vaddr != NULL)
11527 		iwm_dma_contig_free(&sc->ict_dma);
11528 
11529 fail2:	iwm_dma_contig_free(&sc->kw_dma);
11530 fail1:	iwm_dma_contig_free(&sc->fw_dma);
11531 	return;
11532 }
11533 
11534 #if NBPFILTER > 0
11535 void
11536 iwm_radiotap_attach(struct iwm_softc *sc)
11537 {
11538 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
11539 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
11540 
11541 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
11542 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
11543 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
11544 
11545 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
11546 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
11547 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
11548 }
11549 #endif
11550 
11551 void
11552 iwm_init_task(void *arg1)
11553 {
11554 	struct iwm_softc *sc = arg1;
11555 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11556 	int s = splnet();
11557 	int generation = sc->sc_generation;
11558 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
11559 
11560 	rw_enter_write(&sc->ioctl_rwl);
11561 	if (generation != sc->sc_generation) {
11562 		rw_exit(&sc->ioctl_rwl);
11563 		splx(s);
11564 		return;
11565 	}
11566 
11567 	if (ifp->if_flags & IFF_RUNNING)
11568 		iwm_stop(ifp);
11569 	else
11570 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
11571 
11572 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
11573 		iwm_init(ifp);
11574 
11575 	rw_exit(&sc->ioctl_rwl);
11576 	splx(s);
11577 }
11578 
11579 int
11580 iwm_resume(struct iwm_softc *sc)
11581 {
11582 	pcireg_t reg;
11583 
11584 	/* Clear device-specific "PCI retry timeout" register (41h). */
11585 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11586 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11587 
11588 	if (!sc->sc_msix) {
11589 		/* Hardware bug workaround. */
11590 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11591 		    PCI_COMMAND_STATUS_REG);
11592 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11593 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11594 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11595 		    PCI_COMMAND_STATUS_REG, reg);
11596 	}
11597 
11598 	iwm_disable_interrupts(sc);
11599 
11600 	return iwm_start_hw(sc);
11601 }
11602 
11603 int
11604 iwm_activate(struct device *self, int act)
11605 {
11606 	struct iwm_softc *sc = (struct iwm_softc *)self;
11607 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11608 	int err = 0;
11609 
11610 	switch (act) {
11611 	case DVACT_QUIESCE:
11612 		if (ifp->if_flags & IFF_RUNNING) {
11613 			rw_enter_write(&sc->ioctl_rwl);
11614 			iwm_stop(ifp);
11615 			rw_exit(&sc->ioctl_rwl);
11616 		}
11617 		break;
11618 	case DVACT_RESUME:
11619 		err = iwm_resume(sc);
11620 		if (err)
11621 			printf("%s: could not initialize hardware\n",
11622 			    DEVNAME(sc));
11623 		break;
11624 	case DVACT_WAKEUP:
11625 		/* Hardware should be up at this point. */
11626 		if (iwm_set_hw_ready(sc))
11627 			task_add(systq, &sc->init_task);
11628 		break;
11629 	}
11630 
11631 	return 0;
11632 }
11633 
11634 struct cfdriver iwm_cd = {
11635 	NULL, "iwm", DV_IFNET
11636 };
11637 
11638 struct cfattach iwm_ca = {
11639 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
11640 	NULL, iwm_activate
11641 };
11642