xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: if_iwm.c,v 1.400 2022/03/23 09:22:49 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_ra.h>
146 #include <net80211/ieee80211_ra_vht.h>
147 #include <net80211/ieee80211_radiotap.h>
148 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
149 #undef DPRINTF /* defined in ieee80211_priv.h */
150 
151 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
152 
153 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
154 
155 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
156 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
157 
158 #ifdef IWM_DEBUG
159 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
160 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
161 int iwm_debug = 1;
162 #else
163 #define DPRINTF(x)	do { ; } while (0)
164 #define DPRINTFN(n, x)	do { ; } while (0)
165 #endif
166 
167 #include <dev/pci/if_iwmreg.h>
168 #include <dev/pci/if_iwmvar.h>
169 
170 const uint8_t iwm_nvm_channels[] = {
171 	/* 2.4 GHz */
172 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
173 	/* 5 GHz */
174 	36, 40, 44 , 48, 52, 56, 60, 64,
175 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
176 	149, 153, 157, 161, 165
177 };
178 
179 const uint8_t iwm_nvm_channels_8000[] = {
180 	/* 2.4 GHz */
181 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182 	/* 5 GHz */
183 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185 	149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 
188 #define IWM_NUM_2GHZ_CHANNELS	14
189 
190 const struct iwm_rate {
191 	uint16_t rate;
192 	uint8_t plcp;
193 	uint8_t ht_plcp;
194 } iwm_rates[] = {
195 		/* Legacy */		/* HT */
196 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
200 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
201 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
202 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
203 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
204 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
205 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
206 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
207 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
208 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
209 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
210 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
211 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
212 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
213 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
214 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
215 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
216 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
217 };
218 #define IWM_RIDX_CCK	0
219 #define IWM_RIDX_OFDM	4
220 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
221 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
222 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
223 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
224 
225 /* Convert an MCS index into an iwm_rates[] index. */
226 const int iwm_ht_mcs2ridx[] = {
227 	IWM_RATE_MCS_0_INDEX,
228 	IWM_RATE_MCS_1_INDEX,
229 	IWM_RATE_MCS_2_INDEX,
230 	IWM_RATE_MCS_3_INDEX,
231 	IWM_RATE_MCS_4_INDEX,
232 	IWM_RATE_MCS_5_INDEX,
233 	IWM_RATE_MCS_6_INDEX,
234 	IWM_RATE_MCS_7_INDEX,
235 	IWM_RATE_MCS_8_INDEX,
236 	IWM_RATE_MCS_9_INDEX,
237 	IWM_RATE_MCS_10_INDEX,
238 	IWM_RATE_MCS_11_INDEX,
239 	IWM_RATE_MCS_12_INDEX,
240 	IWM_RATE_MCS_13_INDEX,
241 	IWM_RATE_MCS_14_INDEX,
242 	IWM_RATE_MCS_15_INDEX,
243 };
244 
245 struct iwm_nvm_section {
246 	uint16_t length;
247 	uint8_t *data;
248 };
249 
250 int	iwm_is_mimo_ht_plcp(uint8_t);
251 int	iwm_is_mimo_ht_mcs(int);
252 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
253 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
254 	    uint8_t *, size_t);
255 int	iwm_set_default_calib(struct iwm_softc *, const void *);
256 void	iwm_fw_info_free(struct iwm_fw_info *);
257 void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
258 int	iwm_read_firmware(struct iwm_softc *);
259 uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
260 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
261 void	iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
262 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
263 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
264 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
265 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
266 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
267 int	iwm_nic_lock(struct iwm_softc *);
268 void	iwm_nic_assert_locked(struct iwm_softc *);
269 void	iwm_nic_unlock(struct iwm_softc *);
270 int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
271 	    uint32_t);
272 int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
273 int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
274 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
275 	    bus_size_t);
276 void	iwm_dma_contig_free(struct iwm_dma_info *);
277 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
278 void	iwm_disable_rx_dma(struct iwm_softc *);
279 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
281 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
282 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
283 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
284 void	iwm_enable_rfkill_int(struct iwm_softc *);
285 int	iwm_check_rfkill(struct iwm_softc *);
286 void	iwm_enable_interrupts(struct iwm_softc *);
287 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
288 void	iwm_restore_interrupts(struct iwm_softc *);
289 void	iwm_disable_interrupts(struct iwm_softc *);
290 void	iwm_ict_reset(struct iwm_softc *);
291 int	iwm_set_hw_ready(struct iwm_softc *);
292 int	iwm_prepare_card_hw(struct iwm_softc *);
293 void	iwm_apm_config(struct iwm_softc *);
294 int	iwm_apm_init(struct iwm_softc *);
295 void	iwm_apm_stop(struct iwm_softc *);
296 int	iwm_allow_mcast(struct iwm_softc *);
297 void	iwm_init_msix_hw(struct iwm_softc *);
298 void	iwm_conf_msix_hw(struct iwm_softc *, int);
299 int	iwm_clear_persistence_bit(struct iwm_softc *);
300 int	iwm_start_hw(struct iwm_softc *);
301 void	iwm_stop_device(struct iwm_softc *);
302 void	iwm_nic_config(struct iwm_softc *);
303 int	iwm_nic_rx_init(struct iwm_softc *);
304 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
305 int	iwm_nic_rx_mq_init(struct iwm_softc *);
306 int	iwm_nic_tx_init(struct iwm_softc *);
307 int	iwm_nic_init(struct iwm_softc *);
308 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
309 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
310 	    uint16_t);
311 int	iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
312 int	iwm_post_alive(struct iwm_softc *);
313 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
314 	    uint16_t);
315 int	iwm_phy_db_set_section(struct iwm_softc *,
316 	    struct iwm_calib_res_notif_phy_db *);
317 int	iwm_is_valid_channel(uint16_t);
318 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
319 uint16_t iwm_channel_id_to_papd(uint16_t);
320 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
321 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
322 	    uint16_t *, uint16_t);
323 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
324 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
325 	    uint8_t);
326 int	iwm_send_phy_db_data(struct iwm_softc *);
327 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
328 	    uint32_t);
329 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
330 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
331 	    uint8_t *, uint16_t *);
332 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333 	    uint16_t *, size_t);
334 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
335 	    const uint8_t *nvm_channels, int nchan);
336 int	iwm_mimo_enabled(struct iwm_softc *);
337 void	iwm_setup_ht_rates(struct iwm_softc *);
338 void	iwm_setup_vht_rates(struct iwm_softc *);
339 void	iwm_mac_ctxt_task(void *);
340 void	iwm_phy_ctxt_task(void *);
341 void	iwm_updateprot(struct ieee80211com *);
342 void	iwm_updateslot(struct ieee80211com *);
343 void	iwm_updateedca(struct ieee80211com *);
344 void	iwm_updatechan(struct ieee80211com *);
345 void	iwm_updatedtim(struct ieee80211com *);
346 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
347 	    uint16_t);
348 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
349 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
350 	    uint8_t);
351 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
352 	    uint8_t);
353 void	iwm_rx_ba_session_expired(void *);
354 void	iwm_reorder_timer_expired(void *);
355 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
356 	    uint16_t, uint16_t, int, int);
357 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
358 	    uint8_t);
359 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
360 	    uint8_t);
361 void	iwm_ba_task(void *);
362 
363 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
364 	    const uint16_t *, const uint16_t *,
365 	    const uint16_t *, const uint16_t *,
366 	    const uint16_t *, int);
367 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
368 	    const uint16_t *, const uint16_t *);
369 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
370 int	iwm_nvm_init(struct iwm_softc *);
371 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
372 	    uint32_t);
373 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
374 	    uint32_t);
375 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
376 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
377 	    int , int *);
378 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
379 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
380 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
381 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
382 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
383 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
384 int	iwm_send_dqa_cmd(struct iwm_softc *);
385 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
386 int	iwm_config_ltr(struct iwm_softc *);
387 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
388 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
389 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
390 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
391 	    struct iwm_rx_data *);
392 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
393 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
394 	    struct ieee80211_rxinfo *);
395 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
396 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
397 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
398 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
399 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
400 	    int, uint8_t, int);
401 void	iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
402 	    int, int, uint8_t, int);
403 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
404 	    struct iwm_node *, int, int);
405 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
406 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
407 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
408 	    struct iwm_rx_data *);
409 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
410 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
411 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
412 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
413 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
414 	    struct iwm_rx_data *);
415 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
416 uint8_t	iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
417 int	iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
418 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
419 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
420 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
421 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
422 	    struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t);
423 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
424 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
425 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
426 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
427 	    const void *);
428 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
429 	    uint32_t *);
430 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
431 	    const void *, uint32_t *);
432 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
433 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
434 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
435 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
436 uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
437 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
438 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
439 int	iwm_flush_tx_path(struct iwm_softc *, int);
440 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
441 void	iwm_led_enable(struct iwm_softc *);
442 void	iwm_led_disable(struct iwm_softc *);
443 int	iwm_led_is_enabled(struct iwm_softc *);
444 void	iwm_led_blink_timeout(void *);
445 void	iwm_led_blink_start(struct iwm_softc *);
446 void	iwm_led_blink_stop(struct iwm_softc *);
447 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
448 	    struct iwm_beacon_filter_cmd *);
449 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
450 	    struct iwm_beacon_filter_cmd *);
451 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
452 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
453 	    struct iwm_mac_power_cmd *);
454 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
455 int	iwm_power_update_device(struct iwm_softc *);
456 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
457 int	iwm_disable_beacon_filter(struct iwm_softc *);
458 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
459 int	iwm_add_aux_sta(struct iwm_softc *);
460 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
461 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
462 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
463 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
464 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
465 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
466 	    struct iwm_scan_channel_cfg_lmac *, int, int);
467 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
468 int	iwm_lmac_scan(struct iwm_softc *, int);
469 int	iwm_config_umac_scan(struct iwm_softc *);
470 int	iwm_umac_scan(struct iwm_softc *, int);
471 void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
472 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
473 int	iwm_rval2ridx(int);
474 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
475 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
476 	    struct iwm_mac_ctx_cmd *, uint32_t);
477 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
478 	    struct iwm_mac_data_sta *, int);
479 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
480 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
481 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
482 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
483 int	iwm_scan(struct iwm_softc *);
484 int	iwm_bgscan(struct ieee80211com *);
485 void	iwm_bgscan_done(struct ieee80211com *,
486 	    struct ieee80211_node_switch_bss_arg *, size_t);
487 void	iwm_bgscan_done_task(void *);
488 int	iwm_umac_scan_abort(struct iwm_softc *);
489 int	iwm_lmac_scan_abort(struct iwm_softc *);
490 int	iwm_scan_abort(struct iwm_softc *);
491 int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
492 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
493 	    uint8_t);
494 int	iwm_auth(struct iwm_softc *);
495 int	iwm_deauth(struct iwm_softc *);
496 int	iwm_run(struct iwm_softc *);
497 int	iwm_run_stop(struct iwm_softc *);
498 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
499 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
500 	    struct ieee80211_key *);
501 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
502 	    struct ieee80211_key *);
503 void	iwm_delete_key_v1(struct ieee80211com *,
504 	    struct ieee80211_node *, struct ieee80211_key *);
505 void	iwm_delete_key(struct ieee80211com *,
506 	    struct ieee80211_node *, struct ieee80211_key *);
507 void	iwm_calib_timeout(void *);
508 void	iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *);
509 void	iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *);
510 void	iwm_setrates(struct iwm_node *, int);
511 int	iwm_media_change(struct ifnet *);
512 void	iwm_newstate_task(void *);
513 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
514 void	iwm_endscan(struct iwm_softc *);
515 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
516 	    struct ieee80211_node *);
517 int	iwm_sf_config(struct iwm_softc *, int);
518 int	iwm_send_bt_init_conf(struct iwm_softc *);
519 int	iwm_send_soc_conf(struct iwm_softc *);
520 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
521 int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
522 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
523 void	iwm_free_fw_paging(struct iwm_softc *);
524 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
525 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
526 int	iwm_init_hw(struct iwm_softc *);
527 int	iwm_init(struct ifnet *);
528 void	iwm_start(struct ifnet *);
529 void	iwm_stop(struct ifnet *);
530 void	iwm_watchdog(struct ifnet *);
531 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
532 const char *iwm_desc_lookup(uint32_t);
533 void	iwm_nic_error(struct iwm_softc *);
534 void	iwm_dump_driver_status(struct iwm_softc *);
535 void	iwm_nic_umac_error(struct iwm_softc *);
536 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
537 	    struct mbuf_list *);
538 void	iwm_flip_address(uint8_t *);
539 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
540 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
541 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
542 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
543 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
544 	    struct mbuf_list *);
545 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
546 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
547 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
548 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
549 	    struct ieee80211_rxinfo *, struct mbuf_list *);
550 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
551 	    struct mbuf_list *);
552 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
553 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
554 	    struct mbuf_list *);
555 void	iwm_notif_intr(struct iwm_softc *);
556 int	iwm_intr(void *);
557 int	iwm_intr_msix(void *);
558 int	iwm_match(struct device *, void *, void *);
559 int	iwm_preinit(struct iwm_softc *);
560 void	iwm_attach_hook(struct device *);
561 void	iwm_attach(struct device *, struct device *, void *);
562 void	iwm_init_task(void *);
563 int	iwm_activate(struct device *, int);
564 void	iwm_resume(struct iwm_softc *);
565 int	iwm_wakeup(struct iwm_softc *);
566 
567 #if NBPFILTER > 0
568 void	iwm_radiotap_attach(struct iwm_softc *);
569 #endif
570 
571 uint8_t
572 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
573 {
574 	const struct iwm_fw_cmd_version *entry;
575 	int i;
576 
577 	for (i = 0; i < sc->n_cmd_versions; i++) {
578 		entry = &sc->cmd_versions[i];
579 		if (entry->group == grp && entry->cmd == cmd)
580 			return entry->cmd_ver;
581 	}
582 
583 	return IWM_FW_CMD_VER_UNKNOWN;
584 }
585 
586 int
587 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
588 {
589 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
590 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
591 }
592 
593 int
594 iwm_is_mimo_ht_mcs(int mcs)
595 {
596 	int ridx = iwm_ht_mcs2ridx[mcs];
597 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
598 
599 }
600 
601 int
602 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
603 {
604 	struct iwm_fw_cscheme_list *l = (void *)data;
605 
606 	if (dlen < sizeof(*l) ||
607 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
608 		return EINVAL;
609 
610 	/* we don't actually store anything for now, always use s/w crypto */
611 
612 	return 0;
613 }
614 
615 int
616 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
617     uint8_t *data, size_t dlen)
618 {
619 	struct iwm_fw_sects *fws;
620 	struct iwm_fw_onesect *fwone;
621 
622 	if (type >= IWM_UCODE_TYPE_MAX)
623 		return EINVAL;
624 	if (dlen < sizeof(uint32_t))
625 		return EINVAL;
626 
627 	fws = &sc->sc_fw.fw_sects[type];
628 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
629 		return EINVAL;
630 
631 	fwone = &fws->fw_sect[fws->fw_count];
632 
633 	/* first 32bit are device load offset */
634 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
635 
636 	/* rest is data */
637 	fwone->fws_data = data + sizeof(uint32_t);
638 	fwone->fws_len = dlen - sizeof(uint32_t);
639 
640 	fws->fw_count++;
641 	fws->fw_totlen += fwone->fws_len;
642 
643 	return 0;
644 }
645 
646 #define IWM_DEFAULT_SCAN_CHANNELS	40
647 /* Newer firmware might support more channels. Raise this value if needed. */
648 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
649 
650 struct iwm_tlv_calib_data {
651 	uint32_t ucode_type;
652 	struct iwm_tlv_calib_ctrl calib;
653 } __packed;
654 
655 int
656 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
657 {
658 	const struct iwm_tlv_calib_data *def_calib = data;
659 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
660 
661 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
662 		return EINVAL;
663 
664 	sc->sc_default_calib[ucode_type].flow_trigger =
665 	    def_calib->calib.flow_trigger;
666 	sc->sc_default_calib[ucode_type].event_trigger =
667 	    def_calib->calib.event_trigger;
668 
669 	return 0;
670 }
671 
672 void
673 iwm_fw_info_free(struct iwm_fw_info *fw)
674 {
675 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
676 	fw->fw_rawdata = NULL;
677 	fw->fw_rawsize = 0;
678 	/* don't touch fw->fw_status */
679 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
680 }
681 
682 void
683 iwm_fw_version_str(char *buf, size_t bufsize,
684     uint32_t major, uint32_t minor, uint32_t api)
685 {
686 	/*
687 	 * Starting with major version 35 the Linux driver prints the minor
688 	 * version in hexadecimal.
689 	 */
690 	if (major >= 35)
691 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
692 	else
693 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
694 }
695 
696 int
697 iwm_read_firmware(struct iwm_softc *sc)
698 {
699 	struct iwm_fw_info *fw = &sc->sc_fw;
700 	struct iwm_tlv_ucode_header *uhdr;
701 	struct iwm_ucode_tlv tlv;
702 	uint32_t tlv_type;
703 	uint8_t *data;
704 	uint32_t usniffer_img;
705 	uint32_t paging_mem_size;
706 	int err;
707 	size_t len;
708 
709 	if (fw->fw_status == IWM_FW_STATUS_DONE)
710 		return 0;
711 
712 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
713 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
714 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
715 
716 	if (fw->fw_rawdata != NULL)
717 		iwm_fw_info_free(fw);
718 
719 	err = loadfirmware(sc->sc_fwname,
720 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
721 	if (err) {
722 		printf("%s: could not read firmware %s (error %d)\n",
723 		    DEVNAME(sc), sc->sc_fwname, err);
724 		goto out;
725 	}
726 
727 	sc->sc_capaflags = 0;
728 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
729 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
730 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
731 	sc->n_cmd_versions = 0;
732 
733 	uhdr = (void *)fw->fw_rawdata;
734 	if (*(uint32_t *)fw->fw_rawdata != 0
735 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
736 		printf("%s: invalid firmware %s\n",
737 		    DEVNAME(sc), sc->sc_fwname);
738 		err = EINVAL;
739 		goto out;
740 	}
741 
742 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
743 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
744 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
745 	    IWM_UCODE_API(le32toh(uhdr->ver)));
746 
747 	data = uhdr->data;
748 	len = fw->fw_rawsize - sizeof(*uhdr);
749 
750 	while (len >= sizeof(tlv)) {
751 		size_t tlv_len;
752 		void *tlv_data;
753 
754 		memcpy(&tlv, data, sizeof(tlv));
755 		tlv_len = le32toh(tlv.length);
756 		tlv_type = le32toh(tlv.type);
757 
758 		len -= sizeof(tlv);
759 		data += sizeof(tlv);
760 		tlv_data = data;
761 
762 		if (len < tlv_len) {
763 			printf("%s: firmware too short: %zu bytes\n",
764 			    DEVNAME(sc), len);
765 			err = EINVAL;
766 			goto parse_out;
767 		}
768 
769 		switch (tlv_type) {
770 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
771 			if (tlv_len < sizeof(uint32_t)) {
772 				err = EINVAL;
773 				goto parse_out;
774 			}
775 			sc->sc_capa_max_probe_len
776 			    = le32toh(*(uint32_t *)tlv_data);
777 			if (sc->sc_capa_max_probe_len >
778 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
779 				err = EINVAL;
780 				goto parse_out;
781 			}
782 			break;
783 		case IWM_UCODE_TLV_PAN:
784 			if (tlv_len) {
785 				err = EINVAL;
786 				goto parse_out;
787 			}
788 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
789 			break;
790 		case IWM_UCODE_TLV_FLAGS:
791 			if (tlv_len < sizeof(uint32_t)) {
792 				err = EINVAL;
793 				goto parse_out;
794 			}
795 			/*
796 			 * Apparently there can be many flags, but Linux driver
797 			 * parses only the first one, and so do we.
798 			 *
799 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
800 			 * Intentional or a bug?  Observations from
801 			 * current firmware file:
802 			 *  1) TLV_PAN is parsed first
803 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
804 			 * ==> this resets TLV_PAN to itself... hnnnk
805 			 */
806 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
807 			break;
808 		case IWM_UCODE_TLV_CSCHEME:
809 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
810 			if (err)
811 				goto parse_out;
812 			break;
813 		case IWM_UCODE_TLV_NUM_OF_CPU: {
814 			uint32_t num_cpu;
815 			if (tlv_len != sizeof(uint32_t)) {
816 				err = EINVAL;
817 				goto parse_out;
818 			}
819 			num_cpu = le32toh(*(uint32_t *)tlv_data);
820 			if (num_cpu < 1 || num_cpu > 2) {
821 				err = EINVAL;
822 				goto parse_out;
823 			}
824 			break;
825 		}
826 		case IWM_UCODE_TLV_SEC_RT:
827 			err = iwm_firmware_store_section(sc,
828 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
829 			if (err)
830 				goto parse_out;
831 			break;
832 		case IWM_UCODE_TLV_SEC_INIT:
833 			err = iwm_firmware_store_section(sc,
834 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
835 			if (err)
836 				goto parse_out;
837 			break;
838 		case IWM_UCODE_TLV_SEC_WOWLAN:
839 			err = iwm_firmware_store_section(sc,
840 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
841 			if (err)
842 				goto parse_out;
843 			break;
844 		case IWM_UCODE_TLV_DEF_CALIB:
845 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
846 				err = EINVAL;
847 				goto parse_out;
848 			}
849 			err = iwm_set_default_calib(sc, tlv_data);
850 			if (err)
851 				goto parse_out;
852 			break;
853 		case IWM_UCODE_TLV_PHY_SKU:
854 			if (tlv_len != sizeof(uint32_t)) {
855 				err = EINVAL;
856 				goto parse_out;
857 			}
858 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
859 			break;
860 
861 		case IWM_UCODE_TLV_API_CHANGES_SET: {
862 			struct iwm_ucode_api *api;
863 			int idx, i;
864 			if (tlv_len != sizeof(*api)) {
865 				err = EINVAL;
866 				goto parse_out;
867 			}
868 			api = (struct iwm_ucode_api *)tlv_data;
869 			idx = le32toh(api->api_index);
870 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
871 				err = EINVAL;
872 				goto parse_out;
873 			}
874 			for (i = 0; i < 32; i++) {
875 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
876 					continue;
877 				setbit(sc->sc_ucode_api, i + (32 * idx));
878 			}
879 			break;
880 		}
881 
882 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
883 			struct iwm_ucode_capa *capa;
884 			int idx, i;
885 			if (tlv_len != sizeof(*capa)) {
886 				err = EINVAL;
887 				goto parse_out;
888 			}
889 			capa = (struct iwm_ucode_capa *)tlv_data;
890 			idx = le32toh(capa->api_index);
891 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
892 				goto parse_out;
893 			}
894 			for (i = 0; i < 32; i++) {
895 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
896 					continue;
897 				setbit(sc->sc_enabled_capa, i + (32 * idx));
898 			}
899 			break;
900 		}
901 
902 		case IWM_UCODE_TLV_CMD_VERSIONS:
903 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
904 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
905 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
906 			}
907 			if (sc->n_cmd_versions != 0) {
908 				err = EINVAL;
909 				goto parse_out;
910 			}
911 			if (tlv_len > sizeof(sc->cmd_versions)) {
912 				err = EINVAL;
913 				goto parse_out;
914 			}
915 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
916 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
917 			break;
918 
919 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
920 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
921 			/* ignore, not used by current driver */
922 			break;
923 
924 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
925 			err = iwm_firmware_store_section(sc,
926 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
927 			    tlv_len);
928 			if (err)
929 				goto parse_out;
930 			break;
931 
932 		case IWM_UCODE_TLV_PAGING:
933 			if (tlv_len != sizeof(uint32_t)) {
934 				err = EINVAL;
935 				goto parse_out;
936 			}
937 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
938 
939 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
940 			    DEVNAME(sc), paging_mem_size));
941 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
942 				printf("%s: Driver only supports up to %u"
943 				    " bytes for paging image (%u requested)\n",
944 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
945 				    paging_mem_size);
946 				err = EINVAL;
947 				goto out;
948 			}
949 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
950 				printf("%s: Paging: image isn't multiple of %u\n",
951 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
952 				err = EINVAL;
953 				goto out;
954 			}
955 
956 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
957 			    paging_mem_size;
958 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
959 			fw->fw_sects[usniffer_img].paging_mem_size =
960 			    paging_mem_size;
961 			break;
962 
963 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
964 			if (tlv_len != sizeof(uint32_t)) {
965 				err = EINVAL;
966 				goto parse_out;
967 			}
968 			sc->sc_capa_n_scan_channels =
969 			  le32toh(*(uint32_t *)tlv_data);
970 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
971 				err = ERANGE;
972 				goto parse_out;
973 			}
974 			break;
975 
976 		case IWM_UCODE_TLV_FW_VERSION:
977 			if (tlv_len != sizeof(uint32_t) * 3) {
978 				err = EINVAL;
979 				goto parse_out;
980 			}
981 
982 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
983 			    le32toh(((uint32_t *)tlv_data)[0]),
984 			    le32toh(((uint32_t *)tlv_data)[1]),
985 			    le32toh(((uint32_t *)tlv_data)[2]));
986 			break;
987 
988 		case IWM_UCODE_TLV_FW_DBG_DEST:
989 		case IWM_UCODE_TLV_FW_DBG_CONF:
990 		case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
991 		case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
992 		case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
993 		case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
994 		case IWM_UCODE_TLV_TYPE_HCMD:
995 		case IWM_UCODE_TLV_TYPE_REGIONS:
996 		case IWM_UCODE_TLV_TYPE_TRIGGERS:
997 			break;
998 
999 		case IWM_UCODE_TLV_HW_TYPE:
1000 			break;
1001 
1002 		case IWM_UCODE_TLV_FW_MEM_SEG:
1003 			break;
1004 
1005 		/* undocumented TLVs found in iwm-9000-43 image */
1006 		case 0x1000003:
1007 		case 0x1000004:
1008 			break;
1009 
1010 		default:
1011 			err = EINVAL;
1012 			goto parse_out;
1013 		}
1014 
1015 		len -= roundup(tlv_len, 4);
1016 		data += roundup(tlv_len, 4);
1017 	}
1018 
1019 	KASSERT(err == 0);
1020 
1021  parse_out:
1022 	if (err) {
1023 		printf("%s: firmware parse error %d, "
1024 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1025 	}
1026 
1027  out:
1028 	if (err) {
1029 		fw->fw_status = IWM_FW_STATUS_NONE;
1030 		if (fw->fw_rawdata != NULL)
1031 			iwm_fw_info_free(fw);
1032 	} else
1033 		fw->fw_status = IWM_FW_STATUS_DONE;
1034 	wakeup(&sc->sc_fw);
1035 
1036 	return err;
1037 }
1038 
1039 uint32_t
1040 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1041 {
1042 	IWM_WRITE(sc,
1043 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1044 	IWM_BARRIER_READ_WRITE(sc);
1045 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1046 }
1047 
1048 uint32_t
1049 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1050 {
1051 	iwm_nic_assert_locked(sc);
1052 	return iwm_read_prph_unlocked(sc, addr);
1053 }
1054 
1055 void
1056 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1057 {
1058 	IWM_WRITE(sc,
1059 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1060 	IWM_BARRIER_WRITE(sc);
1061 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1062 }
1063 
1064 void
1065 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1066 {
1067 	iwm_nic_assert_locked(sc);
1068 	iwm_write_prph_unlocked(sc, addr, val);
1069 }
1070 
1071 void
1072 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1073 {
1074 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1075 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1076 }
1077 
1078 int
1079 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1080 {
1081 	int offs, err = 0;
1082 	uint32_t *vals = buf;
1083 
1084 	if (iwm_nic_lock(sc)) {
1085 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1086 		for (offs = 0; offs < dwords; offs++)
1087 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1088 		iwm_nic_unlock(sc);
1089 	} else {
1090 		err = EBUSY;
1091 	}
1092 	return err;
1093 }
1094 
1095 int
1096 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1097 {
1098 	int offs;
1099 	const uint32_t *vals = buf;
1100 
1101 	if (iwm_nic_lock(sc)) {
1102 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1103 		/* WADDR auto-increments */
1104 		for (offs = 0; offs < dwords; offs++) {
1105 			uint32_t val = vals ? vals[offs] : 0;
1106 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1107 		}
1108 		iwm_nic_unlock(sc);
1109 	} else {
1110 		return EBUSY;
1111 	}
1112 	return 0;
1113 }
1114 
1115 int
1116 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1117 {
1118 	return iwm_write_mem(sc, addr, &val, 1);
1119 }
1120 
1121 int
1122 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1123     int timo)
1124 {
1125 	for (;;) {
1126 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1127 			return 1;
1128 		}
1129 		if (timo < 10) {
1130 			return 0;
1131 		}
1132 		timo -= 10;
1133 		DELAY(10);
1134 	}
1135 }
1136 
1137 int
1138 iwm_nic_lock(struct iwm_softc *sc)
1139 {
1140 	if (sc->sc_nic_locks > 0) {
1141 		iwm_nic_assert_locked(sc);
1142 		sc->sc_nic_locks++;
1143 		return 1; /* already locked */
1144 	}
1145 
1146 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1147 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1148 
1149 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1150 		DELAY(2);
1151 
1152 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1153 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1154 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1155 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1156 		sc->sc_nic_locks++;
1157 		return 1;
1158 	}
1159 
1160 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1161 	return 0;
1162 }
1163 
1164 void
1165 iwm_nic_assert_locked(struct iwm_softc *sc)
1166 {
1167 	if (sc->sc_nic_locks <= 0)
1168 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1169 }
1170 
1171 void
1172 iwm_nic_unlock(struct iwm_softc *sc)
1173 {
1174 	if (sc->sc_nic_locks > 0) {
1175 		if (--sc->sc_nic_locks == 0)
1176 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1177 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1178 	} else
1179 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1180 }
1181 
1182 int
1183 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1184     uint32_t mask)
1185 {
1186 	uint32_t val;
1187 
1188 	if (iwm_nic_lock(sc)) {
1189 		val = iwm_read_prph(sc, reg) & mask;
1190 		val |= bits;
1191 		iwm_write_prph(sc, reg, val);
1192 		iwm_nic_unlock(sc);
1193 		return 0;
1194 	}
1195 	return EBUSY;
1196 }
1197 
1198 int
1199 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1200 {
1201 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1202 }
1203 
1204 int
1205 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1206 {
1207 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1208 }
1209 
1210 int
1211 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1212     bus_size_t size, bus_size_t alignment)
1213 {
1214 	int nsegs, err;
1215 	caddr_t va;
1216 
1217 	dma->tag = tag;
1218 	dma->size = size;
1219 
1220 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1221 	    &dma->map);
1222 	if (err)
1223 		goto fail;
1224 
1225 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1226 	    BUS_DMA_NOWAIT);
1227 	if (err)
1228 		goto fail;
1229 
1230 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1231 	    BUS_DMA_NOWAIT);
1232 	if (err)
1233 		goto fail;
1234 	dma->vaddr = va;
1235 
1236 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1237 	    BUS_DMA_NOWAIT);
1238 	if (err)
1239 		goto fail;
1240 
1241 	memset(dma->vaddr, 0, size);
1242 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1243 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1244 
1245 	return 0;
1246 
1247 fail:	iwm_dma_contig_free(dma);
1248 	return err;
1249 }
1250 
1251 void
1252 iwm_dma_contig_free(struct iwm_dma_info *dma)
1253 {
1254 	if (dma->map != NULL) {
1255 		if (dma->vaddr != NULL) {
1256 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1257 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1258 			bus_dmamap_unload(dma->tag, dma->map);
1259 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1260 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1261 			dma->vaddr = NULL;
1262 		}
1263 		bus_dmamap_destroy(dma->tag, dma->map);
1264 		dma->map = NULL;
1265 	}
1266 }
1267 
1268 int
1269 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1270 {
1271 	bus_size_t size;
1272 	size_t descsz;
1273 	int count, i, err;
1274 
1275 	ring->cur = 0;
1276 
1277 	if (sc->sc_mqrx_supported) {
1278 		count = IWM_RX_MQ_RING_COUNT;
1279 		descsz = sizeof(uint64_t);
1280 	} else {
1281 		count = IWM_RX_RING_COUNT;
1282 		descsz = sizeof(uint32_t);
1283 	}
1284 
1285 	/* Allocate RX descriptors (256-byte aligned). */
1286 	size = count * descsz;
1287 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1288 	if (err) {
1289 		printf("%s: could not allocate RX ring DMA memory\n",
1290 		    DEVNAME(sc));
1291 		goto fail;
1292 	}
1293 	ring->desc = ring->free_desc_dma.vaddr;
1294 
1295 	/* Allocate RX status area (16-byte aligned). */
1296 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1297 	    sizeof(*ring->stat), 16);
1298 	if (err) {
1299 		printf("%s: could not allocate RX status DMA memory\n",
1300 		    DEVNAME(sc));
1301 		goto fail;
1302 	}
1303 	ring->stat = ring->stat_dma.vaddr;
1304 
1305 	if (sc->sc_mqrx_supported) {
1306 		size = count * sizeof(uint32_t);
1307 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1308 		    size, 256);
1309 		if (err) {
1310 			printf("%s: could not allocate RX ring DMA memory\n",
1311 			    DEVNAME(sc));
1312 			goto fail;
1313 		}
1314 	}
1315 
1316 	for (i = 0; i < count; i++) {
1317 		struct iwm_rx_data *data = &ring->data[i];
1318 
1319 		memset(data, 0, sizeof(*data));
1320 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1321 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1322 		    &data->map);
1323 		if (err) {
1324 			printf("%s: could not create RX buf DMA map\n",
1325 			    DEVNAME(sc));
1326 			goto fail;
1327 		}
1328 
1329 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1330 		if (err)
1331 			goto fail;
1332 	}
1333 	return 0;
1334 
1335 fail:	iwm_free_rx_ring(sc, ring);
1336 	return err;
1337 }
1338 
1339 void
1340 iwm_disable_rx_dma(struct iwm_softc *sc)
1341 {
1342 	int ntries;
1343 
1344 	if (iwm_nic_lock(sc)) {
1345 		if (sc->sc_mqrx_supported) {
1346 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1347 			for (ntries = 0; ntries < 1000; ntries++) {
1348 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1349 				    IWM_RXF_DMA_IDLE)
1350 					break;
1351 				DELAY(10);
1352 			}
1353 		} else {
1354 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1355 			for (ntries = 0; ntries < 1000; ntries++) {
1356 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1357 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1358 					break;
1359 				DELAY(10);
1360 			}
1361 		}
1362 		iwm_nic_unlock(sc);
1363 	}
1364 }
1365 
1366 void
1367 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1368 {
1369 	ring->cur = 0;
1370 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1371 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1372 	memset(ring->stat, 0, sizeof(*ring->stat));
1373 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1374 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1375 
1376 }
1377 
1378 void
1379 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1380 {
1381 	int count, i;
1382 
1383 	iwm_dma_contig_free(&ring->free_desc_dma);
1384 	iwm_dma_contig_free(&ring->stat_dma);
1385 	iwm_dma_contig_free(&ring->used_desc_dma);
1386 
1387 	if (sc->sc_mqrx_supported)
1388 		count = IWM_RX_MQ_RING_COUNT;
1389 	else
1390 		count = IWM_RX_RING_COUNT;
1391 
1392 	for (i = 0; i < count; i++) {
1393 		struct iwm_rx_data *data = &ring->data[i];
1394 
1395 		if (data->m != NULL) {
1396 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1397 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1398 			bus_dmamap_unload(sc->sc_dmat, data->map);
1399 			m_freem(data->m);
1400 			data->m = NULL;
1401 		}
1402 		if (data->map != NULL)
1403 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1404 	}
1405 }
1406 
1407 int
1408 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1409 {
1410 	bus_addr_t paddr;
1411 	bus_size_t size;
1412 	int i, err;
1413 
1414 	ring->qid = qid;
1415 	ring->queued = 0;
1416 	ring->cur = 0;
1417 	ring->tail = 0;
1418 
1419 	/* Allocate TX descriptors (256-byte aligned). */
1420 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1421 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1422 	if (err) {
1423 		printf("%s: could not allocate TX ring DMA memory\n",
1424 		    DEVNAME(sc));
1425 		goto fail;
1426 	}
1427 	ring->desc = ring->desc_dma.vaddr;
1428 
1429 	/*
1430 	 * There is no need to allocate DMA buffers for unused rings.
1431 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1432 	 * than we currently need.
1433 	 *
1434 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1435 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1436 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1437 	 * in order to provide one queue per EDCA category.
1438 	 * Tx aggregation requires additional queues, one queue per TID for
1439 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1440 	 *
1441 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1442 	 * and Tx aggregation is not supported.
1443 	 *
1444 	 * Unfortunately, we cannot tell if DQA will be used until the
1445 	 * firmware gets loaded later, so just allocate sufficient rings
1446 	 * in order to satisfy both cases.
1447 	 */
1448 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1449 		return 0;
1450 
1451 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1452 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1453 	if (err) {
1454 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1455 		goto fail;
1456 	}
1457 	ring->cmd = ring->cmd_dma.vaddr;
1458 
1459 	paddr = ring->cmd_dma.paddr;
1460 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1461 		struct iwm_tx_data *data = &ring->data[i];
1462 		size_t mapsize;
1463 
1464 		data->cmd_paddr = paddr;
1465 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1466 		    + offsetof(struct iwm_tx_cmd, scratch);
1467 		paddr += sizeof(struct iwm_device_cmd);
1468 
1469 		/* FW commands may require more mapped space than packets. */
1470 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1471 			mapsize = (sizeof(struct iwm_cmd_header) +
1472 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1473 		else
1474 			mapsize = MCLBYTES;
1475 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1476 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1477 		    &data->map);
1478 		if (err) {
1479 			printf("%s: could not create TX buf DMA map\n",
1480 			    DEVNAME(sc));
1481 			goto fail;
1482 		}
1483 	}
1484 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1485 	return 0;
1486 
1487 fail:	iwm_free_tx_ring(sc, ring);
1488 	return err;
1489 }
1490 
1491 void
1492 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1493 {
1494 	int i;
1495 
1496 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1497 		struct iwm_tx_data *data = &ring->data[i];
1498 
1499 		if (data->m != NULL) {
1500 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1501 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1502 			bus_dmamap_unload(sc->sc_dmat, data->map);
1503 			m_freem(data->m);
1504 			data->m = NULL;
1505 		}
1506 	}
1507 	/* Clear TX descriptors. */
1508 	memset(ring->desc, 0, ring->desc_dma.size);
1509 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1510 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1511 	sc->qfullmsk &= ~(1 << ring->qid);
1512 	sc->qenablemsk &= ~(1 << ring->qid);
1513 	/* 7000 family NICs are locked while commands are in progress. */
1514 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1515 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1516 			iwm_nic_unlock(sc);
1517 	}
1518 	ring->queued = 0;
1519 	ring->cur = 0;
1520 	ring->tail = 0;
1521 }
1522 
1523 void
1524 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1525 {
1526 	int i;
1527 
1528 	iwm_dma_contig_free(&ring->desc_dma);
1529 	iwm_dma_contig_free(&ring->cmd_dma);
1530 
1531 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1532 		struct iwm_tx_data *data = &ring->data[i];
1533 
1534 		if (data->m != NULL) {
1535 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1536 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1537 			bus_dmamap_unload(sc->sc_dmat, data->map);
1538 			m_freem(data->m);
1539 			data->m = NULL;
1540 		}
1541 		if (data->map != NULL)
1542 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1543 	}
1544 }
1545 
1546 void
1547 iwm_enable_rfkill_int(struct iwm_softc *sc)
1548 {
1549 	if (!sc->sc_msix) {
1550 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1551 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1552 	} else {
1553 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1554 		    sc->sc_fh_init_mask);
1555 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1556 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1557 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1558 	}
1559 
1560 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1561 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1562 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1563 }
1564 
1565 int
1566 iwm_check_rfkill(struct iwm_softc *sc)
1567 {
1568 	uint32_t v;
1569 	int rv;
1570 
1571 	/*
1572 	 * "documentation" is not really helpful here:
1573 	 *  27:	HW_RF_KILL_SW
1574 	 *	Indicates state of (platform's) hardware RF-Kill switch
1575 	 *
1576 	 * But apparently when it's off, it's on ...
1577 	 */
1578 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1579 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1580 	if (rv) {
1581 		sc->sc_flags |= IWM_FLAG_RFKILL;
1582 	} else {
1583 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1584 	}
1585 
1586 	return rv;
1587 }
1588 
1589 void
1590 iwm_enable_interrupts(struct iwm_softc *sc)
1591 {
1592 	if (!sc->sc_msix) {
1593 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1594 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1595 	} else {
1596 		/*
1597 		 * fh/hw_mask keeps all the unmasked causes.
1598 		 * Unlike msi, in msix cause is enabled when it is unset.
1599 		 */
1600 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1601 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1602 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1603 		    ~sc->sc_fh_mask);
1604 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1605 		    ~sc->sc_hw_mask);
1606 	}
1607 }
1608 
1609 void
1610 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1611 {
1612 	if (!sc->sc_msix) {
1613 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1614 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1615 	} else {
1616 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1617 		    sc->sc_hw_init_mask);
1618 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1619 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1620 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1621 	}
1622 }
1623 
1624 void
1625 iwm_restore_interrupts(struct iwm_softc *sc)
1626 {
1627 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1628 }
1629 
1630 void
1631 iwm_disable_interrupts(struct iwm_softc *sc)
1632 {
1633 	if (!sc->sc_msix) {
1634 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1635 
1636 		/* acknowledge all interrupts */
1637 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1638 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1639 	} else {
1640 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1641 		    sc->sc_fh_init_mask);
1642 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1643 		    sc->sc_hw_init_mask);
1644 	}
1645 }
1646 
1647 void
1648 iwm_ict_reset(struct iwm_softc *sc)
1649 {
1650 	iwm_disable_interrupts(sc);
1651 
1652 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1653 	sc->ict_cur = 0;
1654 
1655 	/* Set physical address of ICT (4KB aligned). */
1656 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1657 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1658 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1659 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1660 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1661 
1662 	/* Switch to ICT interrupt mode in driver. */
1663 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1664 
1665 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1666 	iwm_enable_interrupts(sc);
1667 }
1668 
1669 #define IWM_HW_READY_TIMEOUT 50
1670 int
1671 iwm_set_hw_ready(struct iwm_softc *sc)
1672 {
1673 	int ready;
1674 
1675 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1676 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1677 
1678 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1679 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1680 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1681 	    IWM_HW_READY_TIMEOUT);
1682 	if (ready)
1683 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1684 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1685 
1686 	return ready;
1687 }
1688 #undef IWM_HW_READY_TIMEOUT
1689 
1690 int
1691 iwm_prepare_card_hw(struct iwm_softc *sc)
1692 {
1693 	int t = 0;
1694 	int ntries;
1695 
1696 	if (iwm_set_hw_ready(sc))
1697 		return 0;
1698 
1699 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1700 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1701 	DELAY(1000);
1702 
1703 	for (ntries = 0; ntries < 10; ntries++) {
1704 		/* If HW is not ready, prepare the conditions to check again */
1705 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1706 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1707 
1708 		do {
1709 			if (iwm_set_hw_ready(sc))
1710 				return 0;
1711 			DELAY(200);
1712 			t += 200;
1713 		} while (t < 150000);
1714 		DELAY(25000);
1715 	}
1716 
1717 	return ETIMEDOUT;
1718 }
1719 
1720 void
1721 iwm_apm_config(struct iwm_softc *sc)
1722 {
1723 	pcireg_t lctl, cap;
1724 
1725 	/*
1726 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1727 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1728 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1729 	 *    costs negligible amount of power savings.
1730 	 * If not (unlikely), enable L0S, so there is at least some
1731 	 *    power savings, even without L1.
1732 	 */
1733 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1734 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1735 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1736 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1737 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1738 	} else {
1739 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1740 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1741 	}
1742 
1743 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1744 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1745 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1746 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1747 	    DEVNAME(sc),
1748 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1749 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1750 }
1751 
1752 /*
1753  * Start up NIC's basic functionality after it has been reset
1754  * e.g. after platform boot or shutdown.
1755  * NOTE:  This does not load uCode nor start the embedded processor
1756  */
1757 int
1758 iwm_apm_init(struct iwm_softc *sc)
1759 {
1760 	int err = 0;
1761 
1762 	/* Disable L0S exit timer (platform NMI workaround) */
1763 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1764 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1765 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1766 
1767 	/*
1768 	 * Disable L0s without affecting L1;
1769 	 *  don't wait for ICH L0s (ICH bug W/A)
1770 	 */
1771 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1772 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1773 
1774 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1775 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1776 
1777 	/*
1778 	 * Enable HAP INTA (interrupt from management bus) to
1779 	 * wake device's PCI Express link L1a -> L0s
1780 	 */
1781 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1782 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1783 
1784 	iwm_apm_config(sc);
1785 
1786 #if 0 /* not for 7k/8k */
1787 	/* Configure analog phase-lock-loop before activating to D0A */
1788 	if (trans->cfg->base_params->pll_cfg_val)
1789 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1790 		    trans->cfg->base_params->pll_cfg_val);
1791 #endif
1792 
1793 	/*
1794 	 * Set "initialization complete" bit to move adapter from
1795 	 * D0U* --> D0A* (powered-up active) state.
1796 	 */
1797 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1798 
1799 	/*
1800 	 * Wait for clock stabilization; once stabilized, access to
1801 	 * device-internal resources is supported, e.g. iwm_write_prph()
1802 	 * and accesses to uCode SRAM.
1803 	 */
1804 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1805 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1806 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1807 		printf("%s: timeout waiting for clock stabilization\n",
1808 		    DEVNAME(sc));
1809 		err = ETIMEDOUT;
1810 		goto out;
1811 	}
1812 
1813 	if (sc->host_interrupt_operation_mode) {
1814 		/*
1815 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1816 		 * only check host_interrupt_operation_mode even if this is
1817 		 * not related to host_interrupt_operation_mode.
1818 		 *
1819 		 * Enable the oscillator to count wake up time for L1 exit. This
1820 		 * consumes slightly more power (100uA) - but allows to be sure
1821 		 * that we wake up from L1 on time.
1822 		 *
1823 		 * This looks weird: read twice the same register, discard the
1824 		 * value, set a bit, and yet again, read that same register
1825 		 * just to discard the value. But that's the way the hardware
1826 		 * seems to like it.
1827 		 */
1828 		if (iwm_nic_lock(sc)) {
1829 			iwm_read_prph(sc, IWM_OSC_CLK);
1830 			iwm_read_prph(sc, IWM_OSC_CLK);
1831 			iwm_nic_unlock(sc);
1832 		}
1833 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1834 		    IWM_OSC_CLK_FORCE_CONTROL);
1835 		if (err)
1836 			goto out;
1837 		if (iwm_nic_lock(sc)) {
1838 			iwm_read_prph(sc, IWM_OSC_CLK);
1839 			iwm_read_prph(sc, IWM_OSC_CLK);
1840 			iwm_nic_unlock(sc);
1841 		}
1842 	}
1843 
1844 	/*
1845 	 * Enable DMA clock and wait for it to stabilize.
1846 	 *
1847 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1848 	 * do not disable clocks.  This preserves any hardware bits already
1849 	 * set by default in "CLK_CTRL_REG" after reset.
1850 	 */
1851 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1852 		if (iwm_nic_lock(sc)) {
1853 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1854 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1855 			iwm_nic_unlock(sc);
1856 		}
1857 		DELAY(20);
1858 
1859 		/* Disable L1-Active */
1860 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1861 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1862 		if (err)
1863 			goto out;
1864 
1865 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1866 		if (iwm_nic_lock(sc)) {
1867 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1868 			    IWM_APMG_RTC_INT_STT_RFKILL);
1869 			iwm_nic_unlock(sc);
1870 		}
1871 	}
1872  out:
1873 	if (err)
1874 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1875 	return err;
1876 }
1877 
1878 void
1879 iwm_apm_stop(struct iwm_softc *sc)
1880 {
1881 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1882 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1883 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1884 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1885 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1886 	DELAY(1000);
1887 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1888 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1889 	DELAY(5000);
1890 
1891 	/* stop device's busmaster DMA activity */
1892 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1893 
1894 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1895 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1896 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1897 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1898 
1899 	/*
1900 	 * Clear "initialization complete" bit to move adapter from
1901 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1902 	 */
1903 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1904 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1905 }
1906 
1907 void
1908 iwm_init_msix_hw(struct iwm_softc *sc)
1909 {
1910 	iwm_conf_msix_hw(sc, 0);
1911 
1912 	if (!sc->sc_msix)
1913 		return;
1914 
1915 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1916 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1917 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1918 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1919 }
1920 
1921 void
1922 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1923 {
1924 	int vector = 0;
1925 
1926 	if (!sc->sc_msix) {
1927 		/* Newer chips default to MSIX. */
1928 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1929 			iwm_write_prph(sc, IWM_UREG_CHICK,
1930 			    IWM_UREG_CHICK_MSI_ENABLE);
1931 			iwm_nic_unlock(sc);
1932 		}
1933 		return;
1934 	}
1935 
1936 	if (!stopped && iwm_nic_lock(sc)) {
1937 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1938 		iwm_nic_unlock(sc);
1939 	}
1940 
1941 	/* Disable all interrupts */
1942 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1943 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1944 
1945 	/* Map fallback-queue (command/mgmt) to a single vector */
1946 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1947 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1948 	/* Map RSS queue (data) to the same vector */
1949 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1950 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1951 
1952 	/* Enable the RX queues cause interrupts */
1953 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1954 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1955 
1956 	/* Map non-RX causes to the same vector */
1957 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1958 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1959 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1960 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1961 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1962 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1963 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1964 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1965 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1966 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1967 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1968 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1969 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1970 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1971 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1972 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1973 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1974 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1975 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1976 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1977 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1978 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1979 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1980 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1981 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1982 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1983 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1984 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1985 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1986 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1987 
1988 	/* Enable non-RX causes interrupts */
1989 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1990 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1991 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1992 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1993 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1994 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1995 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1996 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1997 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1998 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1999 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2000 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2001 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2002 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
2003 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
2004 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2005 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
2006 }
2007 
2008 int
2009 iwm_clear_persistence_bit(struct iwm_softc *sc)
2010 {
2011 	uint32_t hpm, wprot;
2012 
2013 	hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2014 	if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
2015 		wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2016 		if (wprot & IWM_PREG_WFPM_ACCESS) {
2017 			printf("%s: cannot clear persistence bit\n",
2018 			    DEVNAME(sc));
2019 			return EPERM;
2020 		}
2021 		iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2022 		    hpm & ~IWM_HPM_PERSISTENCE_BIT);
2023 	}
2024 
2025 	return 0;
2026 }
2027 
2028 int
2029 iwm_start_hw(struct iwm_softc *sc)
2030 {
2031 	int err;
2032 
2033 	err = iwm_prepare_card_hw(sc);
2034 	if (err)
2035 		return err;
2036 
2037 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2038 		err = iwm_clear_persistence_bit(sc);
2039 		if (err)
2040 			return err;
2041 	}
2042 
2043 	/* Reset the entire device */
2044 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2045 	DELAY(5000);
2046 
2047 	err = iwm_apm_init(sc);
2048 	if (err)
2049 		return err;
2050 
2051 	iwm_init_msix_hw(sc);
2052 
2053 	iwm_enable_rfkill_int(sc);
2054 	iwm_check_rfkill(sc);
2055 
2056 	return 0;
2057 }
2058 
2059 
2060 void
2061 iwm_stop_device(struct iwm_softc *sc)
2062 {
2063 	int chnl, ntries;
2064 	int qid;
2065 
2066 	iwm_disable_interrupts(sc);
2067 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2068 
2069 	/* Stop all DMA channels. */
2070 	if (iwm_nic_lock(sc)) {
2071 		/* Deactivate TX scheduler. */
2072 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2073 
2074 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2075 			IWM_WRITE(sc,
2076 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2077 			for (ntries = 0; ntries < 200; ntries++) {
2078 				uint32_t r;
2079 
2080 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2081 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
2082 				    chnl))
2083 					break;
2084 				DELAY(20);
2085 			}
2086 		}
2087 		iwm_nic_unlock(sc);
2088 	}
2089 	iwm_disable_rx_dma(sc);
2090 
2091 	iwm_reset_rx_ring(sc, &sc->rxq);
2092 
2093 	for (qid = 0; qid < nitems(sc->txq); qid++)
2094 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
2095 
2096 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2097 		if (iwm_nic_lock(sc)) {
2098 			/* Power-down device's busmaster DMA clocks */
2099 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2100 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
2101 			iwm_nic_unlock(sc);
2102 		}
2103 		DELAY(5);
2104 	}
2105 
2106 	/* Make sure (redundant) we've released our request to stay awake */
2107 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2108 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2109 	if (sc->sc_nic_locks > 0)
2110 		printf("%s: %d active NIC locks forcefully cleared\n",
2111 		    DEVNAME(sc), sc->sc_nic_locks);
2112 	sc->sc_nic_locks = 0;
2113 
2114 	/* Stop the device, and put it in low power state */
2115 	iwm_apm_stop(sc);
2116 
2117 	/* Reset the on-board processor. */
2118 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2119 	DELAY(5000);
2120 
2121 	/*
2122 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2123 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2124 	 * that enables radio won't fire on the correct irq, and the
2125 	 * driver won't be able to handle the interrupt.
2126 	 * Configure the IVAR table again after reset.
2127 	 */
2128 	iwm_conf_msix_hw(sc, 1);
2129 
2130 	/*
2131 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2132 	 * Clear the interrupt again.
2133 	 */
2134 	iwm_disable_interrupts(sc);
2135 
2136 	/* Even though we stop the HW we still want the RF kill interrupt. */
2137 	iwm_enable_rfkill_int(sc);
2138 	iwm_check_rfkill(sc);
2139 
2140 	iwm_prepare_card_hw(sc);
2141 }
2142 
2143 void
2144 iwm_nic_config(struct iwm_softc *sc)
2145 {
2146 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2147 	uint32_t mask, val, reg_val = 0;
2148 
2149 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2150 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2151 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2152 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2153 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2154 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2155 
2156 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2157 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2158 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2159 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2160 
2161 	/* radio configuration */
2162 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2163 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2164 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2165 
2166 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2167 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2168 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2169 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2170 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2171 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2172 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2173 
2174 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2175 	val &= ~mask;
2176 	val |= reg_val;
2177 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2178 
2179 	/*
2180 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2181 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2182 	 * to lose ownership and not being able to obtain it back.
2183 	 */
2184 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2185 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2186 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2187 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2188 }
2189 
2190 int
2191 iwm_nic_rx_init(struct iwm_softc *sc)
2192 {
2193 	if (sc->sc_mqrx_supported)
2194 		return iwm_nic_rx_mq_init(sc);
2195 	else
2196 		return iwm_nic_rx_legacy_init(sc);
2197 }
2198 
2199 int
2200 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2201 {
2202 	int enabled;
2203 
2204 	if (!iwm_nic_lock(sc))
2205 		return EBUSY;
2206 
2207 	/* Stop RX DMA. */
2208 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2209 	/* Disable RX used and free queue operation. */
2210 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2211 
2212 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2213 	    sc->rxq.free_desc_dma.paddr);
2214 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2215 	    sc->rxq.used_desc_dma.paddr);
2216 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2217 	    sc->rxq.stat_dma.paddr);
2218 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2219 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2220 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2221 
2222 	/* We configure only queue 0 for now. */
2223 	enabled = ((1 << 0) << 16) | (1 << 0);
2224 
2225 	/* Enable RX DMA, 4KB buffer size. */
2226 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2227 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2228 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2229 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2230 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2231 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2232 
2233 	/* Enable RX DMA snooping. */
2234 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2235 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2236 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2237 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2238 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2239 
2240 	/* Enable the configured queue(s). */
2241 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2242 
2243 	iwm_nic_unlock(sc);
2244 
2245 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2246 
2247 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2248 
2249 	return 0;
2250 }
2251 
2252 int
2253 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2254 {
2255 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2256 
2257 	iwm_disable_rx_dma(sc);
2258 
2259 	if (!iwm_nic_lock(sc))
2260 		return EBUSY;
2261 
2262 	/* reset and flush pointers */
2263 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2264 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2265 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2266 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2267 
2268 	/* Set physical address of RX ring (256-byte aligned). */
2269 	IWM_WRITE(sc,
2270 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2271 
2272 	/* Set physical address of RX status (16-byte aligned). */
2273 	IWM_WRITE(sc,
2274 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2275 
2276 	/* Enable RX. */
2277 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2278 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2279 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2280 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2281 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2282 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2283 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2284 
2285 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2286 
2287 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2288 	if (sc->host_interrupt_operation_mode)
2289 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2290 
2291 	iwm_nic_unlock(sc);
2292 
2293 	/*
2294 	 * This value should initially be 0 (before preparing any RBs),
2295 	 * and should be 8 after preparing the first 8 RBs (for example).
2296 	 */
2297 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2298 
2299 	return 0;
2300 }
2301 
2302 int
2303 iwm_nic_tx_init(struct iwm_softc *sc)
2304 {
2305 	int qid, err;
2306 
2307 	if (!iwm_nic_lock(sc))
2308 		return EBUSY;
2309 
2310 	/* Deactivate TX scheduler. */
2311 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2312 
2313 	/* Set physical address of "keep warm" page (16-byte aligned). */
2314 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2315 
2316 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2317 		struct iwm_tx_ring *txq = &sc->txq[qid];
2318 
2319 		/* Set physical address of TX ring (256-byte aligned). */
2320 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2321 		    txq->desc_dma.paddr >> 8);
2322 	}
2323 
2324 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2325 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2326 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2327 
2328 	iwm_nic_unlock(sc);
2329 
2330 	return err;
2331 }
2332 
2333 int
2334 iwm_nic_init(struct iwm_softc *sc)
2335 {
2336 	int err;
2337 
2338 	iwm_apm_init(sc);
2339 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2340 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2341 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2342 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2343 
2344 	iwm_nic_config(sc);
2345 
2346 	err = iwm_nic_rx_init(sc);
2347 	if (err)
2348 		return err;
2349 
2350 	err = iwm_nic_tx_init(sc);
2351 	if (err)
2352 		return err;
2353 
2354 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2355 
2356 	return 0;
2357 }
2358 
2359 /* Map a TID to an ieee80211_edca_ac category. */
2360 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2361 	EDCA_AC_BE,
2362 	EDCA_AC_BK,
2363 	EDCA_AC_BK,
2364 	EDCA_AC_BE,
2365 	EDCA_AC_VI,
2366 	EDCA_AC_VI,
2367 	EDCA_AC_VO,
2368 	EDCA_AC_VO,
2369 };
2370 
2371 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2372 const uint8_t iwm_ac_to_tx_fifo[] = {
2373 	IWM_TX_FIFO_BE,
2374 	IWM_TX_FIFO_BK,
2375 	IWM_TX_FIFO_VI,
2376 	IWM_TX_FIFO_VO,
2377 };
2378 
2379 int
2380 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2381 {
2382 	int err;
2383 	iwm_nic_assert_locked(sc);
2384 
2385 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2386 
2387 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2388 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2389 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2390 
2391 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2392 	if (err) {
2393 		return err;
2394 	}
2395 
2396 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2397 
2398 	iwm_write_mem32(sc,
2399 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2400 
2401 	/* Set scheduler window size and frame limit. */
2402 	iwm_write_mem32(sc,
2403 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2404 	    sizeof(uint32_t),
2405 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2406 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2407 	    ((IWM_FRAME_LIMIT
2408 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2409 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2410 
2411 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2412 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2413 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2414 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2415 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2416 
2417 	if (qid == sc->cmdqid)
2418 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2419 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2420 
2421 	return 0;
2422 }
2423 
2424 int
2425 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2426     int aggregate, uint8_t tid, uint16_t ssn)
2427 {
2428 	struct iwm_tx_ring *ring = &sc->txq[qid];
2429 	struct iwm_scd_txq_cfg_cmd cmd;
2430 	int err, idx, scd_bug;
2431 
2432 	iwm_nic_assert_locked(sc);
2433 
2434 	/*
2435 	 * If we need to move the SCD write pointer by steps of
2436 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2437 	 * This is really ugly, but this is the easiest way out for
2438 	 * this sad hardware issue.
2439 	 * This bug has been fixed on devices 9000 and up.
2440 	 */
2441 	scd_bug = !sc->sc_mqrx_supported &&
2442 		!((ssn - ring->cur) & 0x3f) &&
2443 		(ssn != ring->cur);
2444 	if (scd_bug)
2445 		ssn = (ssn + 1) & 0xfff;
2446 
2447 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2448 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2449 	ring->cur = idx;
2450 	ring->tail = idx;
2451 
2452 	memset(&cmd, 0, sizeof(cmd));
2453 	cmd.tid = tid;
2454 	cmd.scd_queue = qid;
2455 	cmd.enable = 1;
2456 	cmd.sta_id = sta_id;
2457 	cmd.tx_fifo = fifo;
2458 	cmd.aggregate = aggregate;
2459 	cmd.ssn = htole16(ssn);
2460 	cmd.window = IWM_FRAME_LIMIT;
2461 
2462 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2463 	    sizeof(cmd), &cmd);
2464 	if (err)
2465 		return err;
2466 
2467 	sc->qenablemsk |= (1 << qid);
2468 	return 0;
2469 }
2470 
2471 int
2472 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2473 {
2474 	struct iwm_scd_txq_cfg_cmd cmd;
2475 	int err;
2476 
2477 	memset(&cmd, 0, sizeof(cmd));
2478 	cmd.tid = tid;
2479 	cmd.scd_queue = qid;
2480 	cmd.enable = 0;
2481 	cmd.sta_id = sta_id;
2482 
2483 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2484 	if (err)
2485 		return err;
2486 
2487 	sc->qenablemsk &= ~(1 << qid);
2488 	return 0;
2489 }
2490 
2491 int
2492 iwm_post_alive(struct iwm_softc *sc)
2493 {
2494 	int nwords;
2495 	int err, chnl;
2496 	uint32_t base;
2497 
2498 	if (!iwm_nic_lock(sc))
2499 		return EBUSY;
2500 
2501 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2502 
2503 	iwm_ict_reset(sc);
2504 
2505 	iwm_nic_unlock(sc);
2506 
2507 	/* Clear TX scheduler state in SRAM. */
2508 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2509 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2510 	    / sizeof(uint32_t);
2511 	err = iwm_write_mem(sc,
2512 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2513 	    NULL, nwords);
2514 	if (err)
2515 		return err;
2516 
2517 	if (!iwm_nic_lock(sc))
2518 		return EBUSY;
2519 
2520 	/* Set physical address of TX scheduler rings (1KB aligned). */
2521 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2522 
2523 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2524 
2525 	/* enable command channel */
2526 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2527 	if (err) {
2528 		iwm_nic_unlock(sc);
2529 		return err;
2530 	}
2531 
2532 	/* Activate TX scheduler. */
2533 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2534 
2535 	/* Enable DMA channels. */
2536 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2537 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2538 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2539 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2540 	}
2541 
2542 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2543 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2544 
2545 	iwm_nic_unlock(sc);
2546 
2547 	/* Enable L1-Active */
2548 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2549 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2550 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2551 	}
2552 
2553 	return err;
2554 }
2555 
2556 struct iwm_phy_db_entry *
2557 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2558 {
2559 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2560 
2561 	if (type >= IWM_PHY_DB_MAX)
2562 		return NULL;
2563 
2564 	switch (type) {
2565 	case IWM_PHY_DB_CFG:
2566 		return &phy_db->cfg;
2567 	case IWM_PHY_DB_CALIB_NCH:
2568 		return &phy_db->calib_nch;
2569 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2570 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2571 			return NULL;
2572 		return &phy_db->calib_ch_group_papd[chg_id];
2573 	case IWM_PHY_DB_CALIB_CHG_TXP:
2574 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2575 			return NULL;
2576 		return &phy_db->calib_ch_group_txp[chg_id];
2577 	default:
2578 		return NULL;
2579 	}
2580 	return NULL;
2581 }
2582 
2583 int
2584 iwm_phy_db_set_section(struct iwm_softc *sc,
2585     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2586 {
2587 	uint16_t type = le16toh(phy_db_notif->type);
2588 	uint16_t size  = le16toh(phy_db_notif->length);
2589 	struct iwm_phy_db_entry *entry;
2590 	uint16_t chg_id = 0;
2591 
2592 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2593 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2594 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2595 
2596 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2597 	if (!entry)
2598 		return EINVAL;
2599 
2600 	if (entry->data)
2601 		free(entry->data, M_DEVBUF, entry->size);
2602 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2603 	if (!entry->data) {
2604 		entry->size = 0;
2605 		return ENOMEM;
2606 	}
2607 	memcpy(entry->data, phy_db_notif->data, size);
2608 	entry->size = size;
2609 
2610 	return 0;
2611 }
2612 
2613 int
2614 iwm_is_valid_channel(uint16_t ch_id)
2615 {
2616 	if (ch_id <= 14 ||
2617 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2618 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2619 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2620 		return 1;
2621 	return 0;
2622 }
2623 
2624 uint8_t
2625 iwm_ch_id_to_ch_index(uint16_t ch_id)
2626 {
2627 	if (!iwm_is_valid_channel(ch_id))
2628 		return 0xff;
2629 
2630 	if (ch_id <= 14)
2631 		return ch_id - 1;
2632 	if (ch_id <= 64)
2633 		return (ch_id + 20) / 4;
2634 	if (ch_id <= 140)
2635 		return (ch_id - 12) / 4;
2636 	return (ch_id - 13) / 4;
2637 }
2638 
2639 
2640 uint16_t
2641 iwm_channel_id_to_papd(uint16_t ch_id)
2642 {
2643 	if (!iwm_is_valid_channel(ch_id))
2644 		return 0xff;
2645 
2646 	if (1 <= ch_id && ch_id <= 14)
2647 		return 0;
2648 	if (36 <= ch_id && ch_id <= 64)
2649 		return 1;
2650 	if (100 <= ch_id && ch_id <= 140)
2651 		return 2;
2652 	return 3;
2653 }
2654 
2655 uint16_t
2656 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2657 {
2658 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2659 	struct iwm_phy_db_chg_txp *txp_chg;
2660 	int i;
2661 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2662 
2663 	if (ch_index == 0xff)
2664 		return 0xff;
2665 
2666 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2667 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2668 		if (!txp_chg)
2669 			return 0xff;
2670 		/*
2671 		 * Looking for the first channel group the max channel
2672 		 * of which is higher than the requested channel.
2673 		 */
2674 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2675 			return i;
2676 	}
2677 	return 0xff;
2678 }
2679 
2680 int
2681 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2682     uint16_t *size, uint16_t ch_id)
2683 {
2684 	struct iwm_phy_db_entry *entry;
2685 	uint16_t ch_group_id = 0;
2686 
2687 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2688 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2689 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2690 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2691 
2692 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2693 	if (!entry)
2694 		return EINVAL;
2695 
2696 	*data = entry->data;
2697 	*size = entry->size;
2698 
2699 	return 0;
2700 }
2701 
2702 int
2703 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2704     void *data)
2705 {
2706 	struct iwm_phy_db_cmd phy_db_cmd;
2707 	struct iwm_host_cmd cmd = {
2708 		.id = IWM_PHY_DB_CMD,
2709 		.flags = IWM_CMD_ASYNC,
2710 	};
2711 
2712 	phy_db_cmd.type = le16toh(type);
2713 	phy_db_cmd.length = le16toh(length);
2714 
2715 	cmd.data[0] = &phy_db_cmd;
2716 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2717 	cmd.data[1] = data;
2718 	cmd.len[1] = length;
2719 
2720 	return iwm_send_cmd(sc, &cmd);
2721 }
2722 
2723 int
2724 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2725     uint8_t max_ch_groups)
2726 {
2727 	uint16_t i;
2728 	int err;
2729 	struct iwm_phy_db_entry *entry;
2730 
2731 	for (i = 0; i < max_ch_groups; i++) {
2732 		entry = iwm_phy_db_get_section(sc, type, i);
2733 		if (!entry)
2734 			return EINVAL;
2735 
2736 		if (!entry->size)
2737 			continue;
2738 
2739 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2740 		if (err)
2741 			return err;
2742 
2743 		DELAY(1000);
2744 	}
2745 
2746 	return 0;
2747 }
2748 
2749 int
2750 iwm_send_phy_db_data(struct iwm_softc *sc)
2751 {
2752 	uint8_t *data = NULL;
2753 	uint16_t size = 0;
2754 	int err;
2755 
2756 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2757 	if (err)
2758 		return err;
2759 
2760 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2761 	if (err)
2762 		return err;
2763 
2764 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2765 	    &data, &size, 0);
2766 	if (err)
2767 		return err;
2768 
2769 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2770 	if (err)
2771 		return err;
2772 
2773 	err = iwm_phy_db_send_all_channel_groups(sc,
2774 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2775 	if (err)
2776 		return err;
2777 
2778 	err = iwm_phy_db_send_all_channel_groups(sc,
2779 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2780 	if (err)
2781 		return err;
2782 
2783 	return 0;
2784 }
2785 
2786 /*
2787  * For the high priority TE use a time event type that has similar priority to
2788  * the FW's action scan priority.
2789  */
2790 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2791 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2792 
2793 int
2794 iwm_send_time_event_cmd(struct iwm_softc *sc,
2795     const struct iwm_time_event_cmd *cmd)
2796 {
2797 	struct iwm_rx_packet *pkt;
2798 	struct iwm_time_event_resp *resp;
2799 	struct iwm_host_cmd hcmd = {
2800 		.id = IWM_TIME_EVENT_CMD,
2801 		.flags = IWM_CMD_WANT_RESP,
2802 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2803 	};
2804 	uint32_t resp_len;
2805 	int err;
2806 
2807 	hcmd.data[0] = cmd;
2808 	hcmd.len[0] = sizeof(*cmd);
2809 	err = iwm_send_cmd(sc, &hcmd);
2810 	if (err)
2811 		return err;
2812 
2813 	pkt = hcmd.resp_pkt;
2814 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2815 		err = EIO;
2816 		goto out;
2817 	}
2818 
2819 	resp_len = iwm_rx_packet_payload_len(pkt);
2820 	if (resp_len != sizeof(*resp)) {
2821 		err = EIO;
2822 		goto out;
2823 	}
2824 
2825 	resp = (void *)pkt->data;
2826 	if (le32toh(resp->status) == 0)
2827 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2828 	else
2829 		err = EIO;
2830 out:
2831 	iwm_free_resp(sc, &hcmd);
2832 	return err;
2833 }
2834 
2835 void
2836 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2837     uint32_t duration, uint32_t max_delay)
2838 {
2839 	struct iwm_time_event_cmd time_cmd;
2840 
2841 	/* Do nothing if a time event is already scheduled. */
2842 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2843 		return;
2844 
2845 	memset(&time_cmd, 0, sizeof(time_cmd));
2846 
2847 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2848 	time_cmd.id_and_color =
2849 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2850 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2851 
2852 	time_cmd.apply_time = htole32(0);
2853 
2854 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2855 	time_cmd.max_delay = htole32(max_delay);
2856 	/* TODO: why do we need to interval = bi if it is not periodic? */
2857 	time_cmd.interval = htole32(1);
2858 	time_cmd.duration = htole32(duration);
2859 	time_cmd.repeat = 1;
2860 	time_cmd.policy
2861 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2862 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2863 		IWM_T2_V2_START_IMMEDIATELY);
2864 
2865 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2866 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2867 
2868 	DELAY(100);
2869 }
2870 
2871 void
2872 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2873 {
2874 	struct iwm_time_event_cmd time_cmd;
2875 
2876 	/* Do nothing if the time event has already ended. */
2877 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2878 		return;
2879 
2880 	memset(&time_cmd, 0, sizeof(time_cmd));
2881 
2882 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2883 	time_cmd.id_and_color =
2884 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2885 	time_cmd.id = htole32(sc->sc_time_event_uid);
2886 
2887 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2888 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2889 
2890 	DELAY(100);
2891 }
2892 
2893 /*
2894  * NVM read access and content parsing.  We do not support
2895  * external NVM or writing NVM.
2896  */
2897 
2898 /* list of NVM sections we are allowed/need to read */
2899 const int iwm_nvm_to_read[] = {
2900 	IWM_NVM_SECTION_TYPE_HW,
2901 	IWM_NVM_SECTION_TYPE_SW,
2902 	IWM_NVM_SECTION_TYPE_REGULATORY,
2903 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2904 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2905 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2906 	IWM_NVM_SECTION_TYPE_HW_8000,
2907 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2908 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2909 };
2910 
2911 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2912 
2913 #define IWM_NVM_WRITE_OPCODE 1
2914 #define IWM_NVM_READ_OPCODE 0
2915 
2916 int
2917 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2918     uint16_t length, uint8_t *data, uint16_t *len)
2919 {
2920 	offset = 0;
2921 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2922 		.offset = htole16(offset),
2923 		.length = htole16(length),
2924 		.type = htole16(section),
2925 		.op_code = IWM_NVM_READ_OPCODE,
2926 	};
2927 	struct iwm_nvm_access_resp *nvm_resp;
2928 	struct iwm_rx_packet *pkt;
2929 	struct iwm_host_cmd cmd = {
2930 		.id = IWM_NVM_ACCESS_CMD,
2931 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2932 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2933 		.data = { &nvm_access_cmd, },
2934 	};
2935 	int err, offset_read;
2936 	size_t bytes_read;
2937 	uint8_t *resp_data;
2938 
2939 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2940 
2941 	err = iwm_send_cmd(sc, &cmd);
2942 	if (err)
2943 		return err;
2944 
2945 	pkt = cmd.resp_pkt;
2946 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2947 		err = EIO;
2948 		goto exit;
2949 	}
2950 
2951 	/* Extract NVM response */
2952 	nvm_resp = (void *)pkt->data;
2953 	if (nvm_resp == NULL)
2954 		return EIO;
2955 
2956 	err = le16toh(nvm_resp->status);
2957 	bytes_read = le16toh(nvm_resp->length);
2958 	offset_read = le16toh(nvm_resp->offset);
2959 	resp_data = nvm_resp->data;
2960 	if (err) {
2961 		err = EINVAL;
2962 		goto exit;
2963 	}
2964 
2965 	if (offset_read != offset) {
2966 		err = EINVAL;
2967 		goto exit;
2968 	}
2969 
2970 	if (bytes_read > length) {
2971 		err = EINVAL;
2972 		goto exit;
2973 	}
2974 
2975 	memcpy(data + offset, resp_data, bytes_read);
2976 	*len = bytes_read;
2977 
2978  exit:
2979 	iwm_free_resp(sc, &cmd);
2980 	return err;
2981 }
2982 
2983 /*
2984  * Reads an NVM section completely.
2985  * NICs prior to 7000 family doesn't have a real NVM, but just read
2986  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2987  * by uCode, we need to manually check in this case that we don't
2988  * overflow and try to read more than the EEPROM size.
2989  */
2990 int
2991 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2992     uint16_t *len, size_t max_len)
2993 {
2994 	uint16_t chunklen, seglen;
2995 	int err = 0;
2996 
2997 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2998 	*len = 0;
2999 
3000 	/* Read NVM chunks until exhausted (reading less than requested) */
3001 	while (seglen == chunklen && *len < max_len) {
3002 		err = iwm_nvm_read_chunk(sc,
3003 		    section, *len, chunklen, data, &seglen);
3004 		if (err)
3005 			return err;
3006 
3007 		*len += seglen;
3008 	}
3009 
3010 	return err;
3011 }
3012 
3013 uint8_t
3014 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3015 {
3016 	uint8_t tx_ant;
3017 
3018 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3019 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
3020 
3021 	if (sc->sc_nvm.valid_tx_ant)
3022 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3023 
3024 	return tx_ant;
3025 }
3026 
3027 uint8_t
3028 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3029 {
3030 	uint8_t rx_ant;
3031 
3032 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3033 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
3034 
3035 	if (sc->sc_nvm.valid_rx_ant)
3036 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3037 
3038 	return rx_ant;
3039 }
3040 
3041 void
3042 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3043     const uint8_t *nvm_channels, int nchan)
3044 {
3045 	struct ieee80211com *ic = &sc->sc_ic;
3046 	struct iwm_nvm_data *data = &sc->sc_nvm;
3047 	int ch_idx;
3048 	struct ieee80211_channel *channel;
3049 	uint16_t ch_flags;
3050 	int is_5ghz;
3051 	int flags, hw_value;
3052 
3053 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3054 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
3055 
3056 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
3057 		    !data->sku_cap_band_52GHz_enable)
3058 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
3059 
3060 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
3061 			continue;
3062 
3063 		hw_value = nvm_channels[ch_idx];
3064 		channel = &ic->ic_channels[hw_value];
3065 
3066 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
3067 		if (!is_5ghz) {
3068 			flags = IEEE80211_CHAN_2GHZ;
3069 			channel->ic_flags
3070 			    = IEEE80211_CHAN_CCK
3071 			    | IEEE80211_CHAN_OFDM
3072 			    | IEEE80211_CHAN_DYN
3073 			    | IEEE80211_CHAN_2GHZ;
3074 		} else {
3075 			flags = IEEE80211_CHAN_5GHZ;
3076 			channel->ic_flags =
3077 			    IEEE80211_CHAN_A;
3078 		}
3079 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3080 
3081 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
3082 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3083 
3084 		if (data->sku_cap_11n_enable) {
3085 			channel->ic_flags |= IEEE80211_CHAN_HT;
3086 			if (ch_flags & IWM_NVM_CHANNEL_40MHZ)
3087 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3088 		}
3089 
3090 		if (is_5ghz && data->sku_cap_11ac_enable) {
3091 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3092 			if (ch_flags & IWM_NVM_CHANNEL_80MHZ)
3093 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3094 		}
3095 	}
3096 }
3097 
3098 int
3099 iwm_mimo_enabled(struct iwm_softc *sc)
3100 {
3101 	struct ieee80211com *ic = &sc->sc_ic;
3102 
3103 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3104 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3105 }
3106 
3107 void
3108 iwm_setup_ht_rates(struct iwm_softc *sc)
3109 {
3110 	struct ieee80211com *ic = &sc->sc_ic;
3111 	uint8_t rx_ant;
3112 
3113 	/* TX is supported with the same MCS as RX. */
3114 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3115 
3116 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3117 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3118 
3119 	if (!iwm_mimo_enabled(sc))
3120 		return;
3121 
3122 	rx_ant = iwm_fw_valid_rx_ant(sc);
3123 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3124 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
3125 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3126 }
3127 
3128 void
3129 iwm_setup_vht_rates(struct iwm_softc *sc)
3130 {
3131 	struct ieee80211com *ic = &sc->sc_ic;
3132 	uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3133 	int n;
3134 
3135 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3136 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3137 
3138 	if (iwm_mimo_enabled(sc) &&
3139 	    ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3140 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)) {
3141 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3142 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3143 	} else {
3144 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3145 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3146 	}
3147 
3148 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3149 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3150 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3151 	}
3152 
3153 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3154 }
3155 
3156 void
3157 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3158     uint16_t ssn, uint16_t buf_size)
3159 {
3160 	reorder_buf->head_sn = ssn;
3161 	reorder_buf->num_stored = 0;
3162 	reorder_buf->buf_size = buf_size;
3163 	reorder_buf->last_amsdu = 0;
3164 	reorder_buf->last_sub_index = 0;
3165 	reorder_buf->removed = 0;
3166 	reorder_buf->valid = 0;
3167 	reorder_buf->consec_oldsn_drops = 0;
3168 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3169 	reorder_buf->consec_oldsn_prev_drop = 0;
3170 }
3171 
3172 void
3173 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3174 {
3175 	int i;
3176 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3177 	struct iwm_reorder_buf_entry *entry;
3178 
3179 	for (i = 0; i < reorder_buf->buf_size; i++) {
3180 		entry = &rxba->entries[i];
3181 		ml_purge(&entry->frames);
3182 		timerclear(&entry->reorder_time);
3183 	}
3184 
3185 	reorder_buf->removed = 1;
3186 	timeout_del(&reorder_buf->reorder_timer);
3187 	timerclear(&rxba->last_rx);
3188 	timeout_del(&rxba->session_timer);
3189 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3190 }
3191 
3192 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3193 
3194 void
3195 iwm_rx_ba_session_expired(void *arg)
3196 {
3197 	struct iwm_rxba_data *rxba = arg;
3198 	struct iwm_softc *sc = rxba->sc;
3199 	struct ieee80211com *ic = &sc->sc_ic;
3200 	struct ieee80211_node *ni = ic->ic_bss;
3201 	struct timeval now, timeout, expiry;
3202 	int s;
3203 
3204 	s = splnet();
3205 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3206 	    ic->ic_state == IEEE80211_S_RUN &&
3207 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3208 		getmicrouptime(&now);
3209 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3210 		timeradd(&rxba->last_rx, &timeout, &expiry);
3211 		if (timercmp(&now, &expiry, <)) {
3212 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3213 		} else {
3214 			ic->ic_stats.is_ht_rx_ba_timeout++;
3215 			ieee80211_delba_request(ic, ni,
3216 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3217 		}
3218 	}
3219 	splx(s);
3220 }
3221 
3222 void
3223 iwm_reorder_timer_expired(void *arg)
3224 {
3225 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3226 	struct iwm_reorder_buffer *buf = arg;
3227 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3228 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3229 	struct iwm_softc *sc = rxba->sc;
3230 	struct ieee80211com *ic = &sc->sc_ic;
3231 	struct ieee80211_node *ni = ic->ic_bss;
3232 	int i, s;
3233 	uint16_t sn = 0, index = 0;
3234 	int expired = 0;
3235 	int cont = 0;
3236 	struct timeval now, timeout, expiry;
3237 
3238 	if (!buf->num_stored || buf->removed)
3239 		return;
3240 
3241 	s = splnet();
3242 	getmicrouptime(&now);
3243 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3244 
3245 	for (i = 0; i < buf->buf_size ; i++) {
3246 		index = (buf->head_sn + i) % buf->buf_size;
3247 
3248 		if (ml_empty(&entries[index].frames)) {
3249 			/*
3250 			 * If there is a hole and the next frame didn't expire
3251 			 * we want to break and not advance SN.
3252 			 */
3253 			cont = 0;
3254 			continue;
3255 		}
3256 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3257 		if (!cont && timercmp(&now, &expiry, <))
3258 			break;
3259 
3260 		expired = 1;
3261 		/* continue until next hole after this expired frame */
3262 		cont = 1;
3263 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3264 	}
3265 
3266 	if (expired) {
3267 		/* SN is set to the last expired frame + 1 */
3268 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3269 		if_input(&sc->sc_ic.ic_if, &ml);
3270 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3271 	} else {
3272 		/*
3273 		 * If no frame expired and there are stored frames, index is now
3274 		 * pointing to the first unexpired frame - modify reorder timeout
3275 		 * accordingly.
3276 		 */
3277 		timeout_add_usec(&buf->reorder_timer,
3278 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3279 	}
3280 
3281 	splx(s);
3282 }
3283 
3284 #define IWM_MAX_RX_BA_SESSIONS 16
3285 
3286 int
3287 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3288     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3289 {
3290 	struct ieee80211com *ic = &sc->sc_ic;
3291 	struct iwm_add_sta_cmd cmd;
3292 	struct iwm_node *in = (void *)ni;
3293 	int err, s;
3294 	uint32_t status;
3295 	size_t cmdsize;
3296 	struct iwm_rxba_data *rxba = NULL;
3297 	uint8_t baid = 0;
3298 
3299 	s = splnet();
3300 
3301 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3302 		ieee80211_addba_req_refuse(ic, ni, tid);
3303 		splx(s);
3304 		return 0;
3305 	}
3306 
3307 	memset(&cmd, 0, sizeof(cmd));
3308 
3309 	cmd.sta_id = IWM_STATION_ID;
3310 	cmd.mac_id_n_color
3311 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3312 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3313 
3314 	if (start) {
3315 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3316 		cmd.add_immediate_ba_ssn = ssn;
3317 		cmd.rx_ba_window = winsize;
3318 	} else {
3319 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3320 	}
3321 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3322 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3323 
3324 	status = IWM_ADD_STA_SUCCESS;
3325 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3326 		cmdsize = sizeof(cmd);
3327 	else
3328 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3329 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3330 	    &status);
3331 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3332 		err = EIO;
3333 	if (err) {
3334 		if (start)
3335 			ieee80211_addba_req_refuse(ic, ni, tid);
3336 		splx(s);
3337 		return err;
3338 	}
3339 
3340 	if (sc->sc_mqrx_supported) {
3341 		/* Deaggregation is done in hardware. */
3342 		if (start) {
3343 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3344 				ieee80211_addba_req_refuse(ic, ni, tid);
3345 				splx(s);
3346 				return EIO;
3347 			}
3348 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3349 			    IWM_ADD_STA_BAID_SHIFT;
3350 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3351 			    baid >= nitems(sc->sc_rxba_data)) {
3352 				ieee80211_addba_req_refuse(ic, ni, tid);
3353 				splx(s);
3354 				return EIO;
3355 			}
3356 			rxba = &sc->sc_rxba_data[baid];
3357 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3358 				ieee80211_addba_req_refuse(ic, ni, tid);
3359 				splx(s);
3360 				return 0;
3361 			}
3362 			rxba->sta_id = IWM_STATION_ID;
3363 			rxba->tid = tid;
3364 			rxba->baid = baid;
3365 			rxba->timeout = timeout_val;
3366 			getmicrouptime(&rxba->last_rx);
3367 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3368 			    winsize);
3369 			if (timeout_val != 0) {
3370 				struct ieee80211_rx_ba *ba;
3371 				timeout_add_usec(&rxba->session_timer,
3372 				    timeout_val);
3373 				/* XXX disable net80211's BA timeout handler */
3374 				ba = &ni->ni_rx_ba[tid];
3375 				ba->ba_timeout_val = 0;
3376 			}
3377 		} else {
3378 			int i;
3379 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3380 				rxba = &sc->sc_rxba_data[i];
3381 				if (rxba->baid ==
3382 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3383 					continue;
3384 				if (rxba->tid != tid)
3385 					continue;
3386 				iwm_clear_reorder_buffer(sc, rxba);
3387 				break;
3388 			}
3389 		}
3390 	}
3391 
3392 	if (start) {
3393 		sc->sc_rx_ba_sessions++;
3394 		ieee80211_addba_req_accept(ic, ni, tid);
3395 	} else if (sc->sc_rx_ba_sessions > 0)
3396 		sc->sc_rx_ba_sessions--;
3397 
3398 	splx(s);
3399 	return 0;
3400 }
3401 
3402 void
3403 iwm_mac_ctxt_task(void *arg)
3404 {
3405 	struct iwm_softc *sc = arg;
3406 	struct ieee80211com *ic = &sc->sc_ic;
3407 	struct iwm_node *in = (void *)ic->ic_bss;
3408 	int err, s = splnet();
3409 
3410 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3411 	    ic->ic_state != IEEE80211_S_RUN) {
3412 		refcnt_rele_wake(&sc->task_refs);
3413 		splx(s);
3414 		return;
3415 	}
3416 
3417 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3418 	if (err)
3419 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3420 
3421 	iwm_unprotect_session(sc, in);
3422 
3423 	refcnt_rele_wake(&sc->task_refs);
3424 	splx(s);
3425 }
3426 
3427 void
3428 iwm_updateprot(struct ieee80211com *ic)
3429 {
3430 	struct iwm_softc *sc = ic->ic_softc;
3431 
3432 	if (ic->ic_state == IEEE80211_S_RUN &&
3433 	    !task_pending(&sc->newstate_task))
3434 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3435 }
3436 
3437 void
3438 iwm_updateslot(struct ieee80211com *ic)
3439 {
3440 	struct iwm_softc *sc = ic->ic_softc;
3441 
3442 	if (ic->ic_state == IEEE80211_S_RUN &&
3443 	    !task_pending(&sc->newstate_task))
3444 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3445 }
3446 
3447 void
3448 iwm_updateedca(struct ieee80211com *ic)
3449 {
3450 	struct iwm_softc *sc = ic->ic_softc;
3451 
3452 	if (ic->ic_state == IEEE80211_S_RUN &&
3453 	    !task_pending(&sc->newstate_task))
3454 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3455 }
3456 
3457 void
3458 iwm_phy_ctxt_task(void *arg)
3459 {
3460 	struct iwm_softc *sc = arg;
3461 	struct ieee80211com *ic = &sc->sc_ic;
3462 	struct iwm_node *in = (void *)ic->ic_bss;
3463 	struct ieee80211_node *ni = &in->in_ni;
3464 	uint8_t chains, sco, vht_chan_width;
3465 	int err, s = splnet();
3466 
3467 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3468 	    ic->ic_state != IEEE80211_S_RUN ||
3469 	    in->in_phyctxt == NULL) {
3470 		refcnt_rele_wake(&sc->task_refs);
3471 		splx(s);
3472 		return;
3473 	}
3474 
3475 	chains = iwm_mimo_enabled(sc) ? 2 : 1;
3476 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3477 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3478 	    ieee80211_node_supports_ht_chan40(ni))
3479 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3480 	else
3481 		sco = IEEE80211_HTOP0_SCO_SCN;
3482 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3483 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3484 	    ieee80211_node_supports_vht_chan80(ni))
3485 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3486 	else
3487 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3488 	if (in->in_phyctxt->sco != sco ||
3489 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3490 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3491 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3492 		    vht_chan_width);
3493 		if (err)
3494 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3495 		iwm_setrates(in, 0);
3496 	}
3497 
3498 	refcnt_rele_wake(&sc->task_refs);
3499 	splx(s);
3500 }
3501 
3502 void
3503 iwm_updatechan(struct ieee80211com *ic)
3504 {
3505 	struct iwm_softc *sc = ic->ic_softc;
3506 
3507 	if (ic->ic_state == IEEE80211_S_RUN &&
3508 	    !task_pending(&sc->newstate_task))
3509 		iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3510 }
3511 
3512 void
3513 iwm_updatedtim(struct ieee80211com *ic)
3514 {
3515 	struct iwm_softc *sc = ic->ic_softc;
3516 
3517 	if (ic->ic_state == IEEE80211_S_RUN &&
3518 	    !task_pending(&sc->newstate_task))
3519 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3520 }
3521 
3522 int
3523 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3524     uint16_t ssn, uint16_t winsize, int start)
3525 {
3526 	struct iwm_add_sta_cmd cmd;
3527 	struct ieee80211com *ic = &sc->sc_ic;
3528 	struct iwm_node *in = (void *)ni;
3529 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3530 	struct iwm_tx_ring *ring;
3531 	enum ieee80211_edca_ac ac;
3532 	int fifo;
3533 	uint32_t status;
3534 	int err;
3535 	size_t cmdsize;
3536 
3537 	/* Ensure we can map this TID to an aggregation queue. */
3538 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3539 		return ENOSPC;
3540 
3541 	if (start) {
3542 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3543 			return 0;
3544 	} else {
3545 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3546 			return 0;
3547 	}
3548 
3549 	ring = &sc->txq[qid];
3550 	ac = iwm_tid_to_ac[tid];
3551 	fifo = iwm_ac_to_tx_fifo[ac];
3552 
3553 	memset(&cmd, 0, sizeof(cmd));
3554 
3555 	cmd.sta_id = IWM_STATION_ID;
3556 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3557 	    in->in_color));
3558 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3559 
3560 	if (start) {
3561 		/* Enable Tx aggregation for this queue. */
3562 		in->tid_disable_ampdu &= ~(1 << tid);
3563 		in->tfd_queue_msk |= (1 << qid);
3564 	} else {
3565 		in->tid_disable_ampdu |= (1 << tid);
3566 		/*
3567 		 * Queue remains enabled in the TFD queue mask
3568 		 * until we leave RUN state.
3569 		 */
3570 		err = iwm_flush_sta(sc, in);
3571 		if (err)
3572 			return err;
3573 	}
3574 
3575 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3576 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3577 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3578 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3579 
3580 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3581 		if (!iwm_nic_lock(sc)) {
3582 			if (start)
3583 				ieee80211_addba_resp_refuse(ic, ni, tid,
3584 				    IEEE80211_STATUS_UNSPECIFIED);
3585 			return EBUSY;
3586 		}
3587 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3588 		    ssn);
3589 		iwm_nic_unlock(sc);
3590 		if (err) {
3591 			printf("%s: could not enable Tx queue %d (error %d)\n",
3592 			    DEVNAME(sc), qid, err);
3593 			if (start)
3594 				ieee80211_addba_resp_refuse(ic, ni, tid,
3595 				    IEEE80211_STATUS_UNSPECIFIED);
3596 			return err;
3597 		}
3598 		/*
3599 		 * If iwm_enable_txq() employed the SCD hardware bug
3600 		 * workaround we must skip the frame with seqnum SSN.
3601 		 */
3602 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3603 			ssn = (ssn + 1) & 0xfff;
3604 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3605 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3606 			ni->ni_qos_txseqs[tid] = ssn;
3607 		}
3608 	}
3609 
3610 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3611 		cmdsize = sizeof(cmd);
3612 	else
3613 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3614 
3615 	status = 0;
3616 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3617 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3618 		err = EIO;
3619 	if (err) {
3620 		printf("%s: could not update sta (error %d)\n",
3621 		    DEVNAME(sc), err);
3622 		if (start)
3623 			ieee80211_addba_resp_refuse(ic, ni, tid,
3624 			    IEEE80211_STATUS_UNSPECIFIED);
3625 		return err;
3626 	}
3627 
3628 	if (start) {
3629 		sc->tx_ba_queue_mask |= (1 << qid);
3630 		ieee80211_addba_resp_accept(ic, ni, tid);
3631 	} else {
3632 		sc->tx_ba_queue_mask &= ~(1 << qid);
3633 
3634 		/*
3635 		 * Clear pending frames but keep the queue enabled.
3636 		 * Firmware panics if we disable the queue here.
3637 		 */
3638 		iwm_txq_advance(sc, ring, ring->cur);
3639 		iwm_clear_oactive(sc, ring);
3640 	}
3641 
3642 	return 0;
3643 }
3644 
3645 void
3646 iwm_ba_task(void *arg)
3647 {
3648 	struct iwm_softc *sc = arg;
3649 	struct ieee80211com *ic = &sc->sc_ic;
3650 	struct ieee80211_node *ni = ic->ic_bss;
3651 	int s = splnet();
3652 	int tid, err = 0;
3653 
3654 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3655 	    ic->ic_state != IEEE80211_S_RUN) {
3656 		refcnt_rele_wake(&sc->task_refs);
3657 		splx(s);
3658 		return;
3659 	}
3660 
3661 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3662 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3663 			break;
3664 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3665 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3666 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3667 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3668 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3669 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3670 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3671 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3672 		}
3673 	}
3674 
3675 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3676 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3677 			break;
3678 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3679 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3680 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3681 			    ba->ba_winsize, 1);
3682 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3683 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3684 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3685 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3686 		}
3687 	}
3688 
3689 	/*
3690 	 * We "recover" from failure to start or stop a BA session
3691 	 * by resetting the device.
3692 	 */
3693 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3694 		task_add(systq, &sc->init_task);
3695 
3696 	refcnt_rele_wake(&sc->task_refs);
3697 	splx(s);
3698 }
3699 
3700 /*
3701  * This function is called by upper layer when an ADDBA request is received
3702  * from another STA and before the ADDBA response is sent.
3703  */
3704 int
3705 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3706     uint8_t tid)
3707 {
3708 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3709 
3710 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3711 	    tid > IWM_MAX_TID_COUNT)
3712 		return ENOSPC;
3713 
3714 	if (sc->ba_rx.start_tidmask & (1 << tid))
3715 		return EBUSY;
3716 
3717 	sc->ba_rx.start_tidmask |= (1 << tid);
3718 	iwm_add_task(sc, systq, &sc->ba_task);
3719 
3720 	return EBUSY;
3721 }
3722 
3723 /*
3724  * This function is called by upper layer on teardown of an HT-immediate
3725  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3726  */
3727 void
3728 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3729     uint8_t tid)
3730 {
3731 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3732 
3733 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3734 		return;
3735 
3736 	sc->ba_rx.stop_tidmask |= (1 << tid);
3737 	iwm_add_task(sc, systq, &sc->ba_task);
3738 }
3739 
3740 int
3741 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3742     uint8_t tid)
3743 {
3744 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3745 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3746 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3747 
3748 	/* We only implement Tx aggregation with DQA-capable firmware. */
3749 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3750 		return ENOTSUP;
3751 
3752 	/* Ensure we can map this TID to an aggregation queue. */
3753 	if (tid >= IWM_MAX_TID_COUNT)
3754 		return EINVAL;
3755 
3756 	/* We only support a fixed Tx aggregation window size, for now. */
3757 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3758 		return ENOTSUP;
3759 
3760 	/* Is firmware already using Tx aggregation on this queue? */
3761 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3762 		return ENOSPC;
3763 
3764 	/* Are we already processing an ADDBA request? */
3765 	if (sc->ba_tx.start_tidmask & (1 << tid))
3766 		return EBUSY;
3767 
3768 	sc->ba_tx.start_tidmask |= (1 << tid);
3769 	iwm_add_task(sc, systq, &sc->ba_task);
3770 
3771 	return EBUSY;
3772 }
3773 
3774 void
3775 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3776     uint8_t tid)
3777 {
3778 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3779 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3780 
3781 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3782 		return;
3783 
3784 	/* Is firmware currently using Tx aggregation on this queue? */
3785 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3786 		return;
3787 
3788 	sc->ba_tx.stop_tidmask |= (1 << tid);
3789 	iwm_add_task(sc, systq, &sc->ba_task);
3790 }
3791 
3792 void
3793 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3794     const uint16_t *mac_override, const uint16_t *nvm_hw)
3795 {
3796 	const uint8_t *hw_addr;
3797 
3798 	if (mac_override) {
3799 		static const uint8_t reserved_mac[] = {
3800 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3801 		};
3802 
3803 		hw_addr = (const uint8_t *)(mac_override +
3804 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3805 
3806 		/*
3807 		 * Store the MAC address from MAO section.
3808 		 * No byte swapping is required in MAO section
3809 		 */
3810 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3811 
3812 		/*
3813 		 * Force the use of the OTP MAC address in case of reserved MAC
3814 		 * address in the NVM, or if address is given but invalid.
3815 		 */
3816 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3817 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3818 		    sizeof(etherbroadcastaddr)) != 0) &&
3819 		    (memcmp(etheranyaddr, data->hw_addr,
3820 		    sizeof(etheranyaddr)) != 0) &&
3821 		    !ETHER_IS_MULTICAST(data->hw_addr))
3822 			return;
3823 	}
3824 
3825 	if (nvm_hw) {
3826 		/* Read the mac address from WFMP registers. */
3827 		uint32_t mac_addr0, mac_addr1;
3828 
3829 		if (!iwm_nic_lock(sc))
3830 			goto out;
3831 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3832 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3833 		iwm_nic_unlock(sc);
3834 
3835 		hw_addr = (const uint8_t *)&mac_addr0;
3836 		data->hw_addr[0] = hw_addr[3];
3837 		data->hw_addr[1] = hw_addr[2];
3838 		data->hw_addr[2] = hw_addr[1];
3839 		data->hw_addr[3] = hw_addr[0];
3840 
3841 		hw_addr = (const uint8_t *)&mac_addr1;
3842 		data->hw_addr[4] = hw_addr[1];
3843 		data->hw_addr[5] = hw_addr[0];
3844 
3845 		return;
3846 	}
3847 out:
3848 	printf("%s: mac address not found\n", DEVNAME(sc));
3849 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3850 }
3851 
3852 int
3853 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3854     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3855     const uint16_t *mac_override, const uint16_t *phy_sku,
3856     const uint16_t *regulatory, int n_regulatory)
3857 {
3858 	struct iwm_nvm_data *data = &sc->sc_nvm;
3859 	uint8_t hw_addr[ETHER_ADDR_LEN];
3860 	uint32_t sku;
3861 	uint16_t lar_config;
3862 
3863 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3864 
3865 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3866 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3867 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3868 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3869 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3870 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3871 
3872 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3873 	} else {
3874 		uint32_t radio_cfg =
3875 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3876 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3877 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3878 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3879 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3880 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3881 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3882 
3883 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3884 	}
3885 
3886 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3887 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3888 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3889 	data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE;
3890 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3891 
3892 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3893 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3894 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3895 				       IWM_NVM_LAR_OFFSET_8000;
3896 
3897 		lar_config = le16_to_cpup(regulatory + lar_offset);
3898 		data->lar_enabled = !!(lar_config &
3899 				       IWM_NVM_LAR_ENABLED_8000);
3900 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3901 	} else
3902 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3903 
3904 
3905 	/* The byte order is little endian 16 bit, meaning 214365 */
3906 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3907 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3908 		data->hw_addr[0] = hw_addr[1];
3909 		data->hw_addr[1] = hw_addr[0];
3910 		data->hw_addr[2] = hw_addr[3];
3911 		data->hw_addr[3] = hw_addr[2];
3912 		data->hw_addr[4] = hw_addr[5];
3913 		data->hw_addr[5] = hw_addr[4];
3914 	} else
3915 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3916 
3917 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3918 		if (sc->nvm_type == IWM_NVM_SDP) {
3919 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3920 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3921 		} else {
3922 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3923 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3924 		}
3925 	} else
3926 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3927 		    iwm_nvm_channels_8000,
3928 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3929 
3930 	data->calib_version = 255;   /* TODO:
3931 					this value will prevent some checks from
3932 					failing, we need to check if this
3933 					field is still needed, and if it does,
3934 					where is it in the NVM */
3935 
3936 	return 0;
3937 }
3938 
3939 int
3940 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3941 {
3942 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3943 	const uint16_t *regulatory = NULL;
3944 	int n_regulatory = 0;
3945 
3946 	/* Checking for required sections */
3947 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3948 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3949 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3950 			return ENOENT;
3951 		}
3952 
3953 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3954 
3955 		if (sc->nvm_type == IWM_NVM_SDP) {
3956 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3957 				return ENOENT;
3958 			regulatory = (const uint16_t *)
3959 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3960 			n_regulatory =
3961 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3962 		}
3963 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3964 		/* SW and REGULATORY sections are mandatory */
3965 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3966 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3967 			return ENOENT;
3968 		}
3969 		/* MAC_OVERRIDE or at least HW section must exist */
3970 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3971 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3972 			return ENOENT;
3973 		}
3974 
3975 		/* PHY_SKU section is mandatory in B0 */
3976 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3977 			return ENOENT;
3978 		}
3979 
3980 		regulatory = (const uint16_t *)
3981 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3982 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3983 		hw = (const uint16_t *)
3984 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3985 		mac_override =
3986 			(const uint16_t *)
3987 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3988 		phy_sku = (const uint16_t *)
3989 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3990 	} else {
3991 		panic("unknown device family %d", sc->sc_device_family);
3992 	}
3993 
3994 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3995 	calib = (const uint16_t *)
3996 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3997 
3998 	/* XXX should pass in the length of every section */
3999 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4000 	    phy_sku, regulatory, n_regulatory);
4001 }
4002 
4003 int
4004 iwm_nvm_init(struct iwm_softc *sc)
4005 {
4006 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
4007 	int i, section, err;
4008 	uint16_t len;
4009 	uint8_t *buf;
4010 	const size_t bufsz = sc->sc_nvm_max_section_size;
4011 
4012 	memset(nvm_sections, 0, sizeof(nvm_sections));
4013 
4014 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
4015 	if (buf == NULL)
4016 		return ENOMEM;
4017 
4018 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
4019 		section = iwm_nvm_to_read[i];
4020 		KASSERT(section <= nitems(nvm_sections));
4021 
4022 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4023 		if (err) {
4024 			err = 0;
4025 			continue;
4026 		}
4027 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
4028 		if (nvm_sections[section].data == NULL) {
4029 			err = ENOMEM;
4030 			break;
4031 		}
4032 		memcpy(nvm_sections[section].data, buf, len);
4033 		nvm_sections[section].length = len;
4034 	}
4035 	free(buf, M_DEVBUF, bufsz);
4036 	if (err == 0)
4037 		err = iwm_parse_nvm_sections(sc, nvm_sections);
4038 
4039 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
4040 		if (nvm_sections[i].data != NULL)
4041 			free(nvm_sections[i].data, M_DEVBUF,
4042 			    nvm_sections[i].length);
4043 	}
4044 
4045 	return err;
4046 }
4047 
4048 int
4049 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4050     const uint8_t *section, uint32_t byte_cnt)
4051 {
4052 	int err = EINVAL;
4053 	uint32_t chunk_sz, offset;
4054 
4055 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
4056 
4057 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
4058 		uint32_t addr, len;
4059 		const uint8_t *data;
4060 
4061 		addr = dst_addr + offset;
4062 		len = MIN(chunk_sz, byte_cnt - offset);
4063 		data = section + offset;
4064 
4065 		err = iwm_firmware_load_chunk(sc, addr, data, len);
4066 		if (err)
4067 			break;
4068 	}
4069 
4070 	return err;
4071 }
4072 
4073 int
4074 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4075     const uint8_t *chunk, uint32_t byte_cnt)
4076 {
4077 	struct iwm_dma_info *dma = &sc->fw_dma;
4078 	int err;
4079 
4080 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
4081 	memcpy(dma->vaddr, chunk, byte_cnt);
4082 	bus_dmamap_sync(sc->sc_dmat,
4083 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
4084 
4085 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4086 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4087 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4088 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4089 		if (err)
4090 			return err;
4091 	}
4092 
4093 	sc->sc_fw_chunk_done = 0;
4094 
4095 	if (!iwm_nic_lock(sc))
4096 		return EBUSY;
4097 
4098 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4099 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4100 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4101 	    dst_addr);
4102 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4103 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4104 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4105 	    (iwm_get_dma_hi_addr(dma->paddr)
4106 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4107 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4108 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4109 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4110 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4111 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4112 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
4113 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4114 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4115 
4116 	iwm_nic_unlock(sc);
4117 
4118 	/* Wait for this segment to load. */
4119 	err = 0;
4120 	while (!sc->sc_fw_chunk_done) {
4121 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4122 		if (err)
4123 			break;
4124 	}
4125 
4126 	if (!sc->sc_fw_chunk_done)
4127 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4128 		    DEVNAME(sc), dst_addr, byte_cnt);
4129 
4130 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4131 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4132 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4133 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4134 		if (!err)
4135 			err = err2;
4136 	}
4137 
4138 	return err;
4139 }
4140 
4141 int
4142 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4143 {
4144 	struct iwm_fw_sects *fws;
4145 	int err, i;
4146 	void *data;
4147 	uint32_t dlen;
4148 	uint32_t offset;
4149 
4150 	fws = &sc->sc_fw.fw_sects[ucode_type];
4151 	for (i = 0; i < fws->fw_count; i++) {
4152 		data = fws->fw_sect[i].fws_data;
4153 		dlen = fws->fw_sect[i].fws_len;
4154 		offset = fws->fw_sect[i].fws_devoff;
4155 		if (dlen > sc->sc_fwdmasegsz) {
4156 			err = EFBIG;
4157 		} else
4158 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4159 		if (err) {
4160 			printf("%s: could not load firmware chunk %u of %u\n",
4161 			    DEVNAME(sc), i, fws->fw_count);
4162 			return err;
4163 		}
4164 	}
4165 
4166 	iwm_enable_interrupts(sc);
4167 
4168 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
4169 
4170 	return 0;
4171 }
4172 
4173 int
4174 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4175     int cpu, int *first_ucode_section)
4176 {
4177 	int shift_param;
4178 	int i, err = 0, sec_num = 0x1;
4179 	uint32_t val, last_read_idx = 0;
4180 	void *data;
4181 	uint32_t dlen;
4182 	uint32_t offset;
4183 
4184 	if (cpu == 1) {
4185 		shift_param = 0;
4186 		*first_ucode_section = 0;
4187 	} else {
4188 		shift_param = 16;
4189 		(*first_ucode_section)++;
4190 	}
4191 
4192 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
4193 		last_read_idx = i;
4194 		data = fws->fw_sect[i].fws_data;
4195 		dlen = fws->fw_sect[i].fws_len;
4196 		offset = fws->fw_sect[i].fws_devoff;
4197 
4198 		/*
4199 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4200 		 * CPU1 to CPU2.
4201 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
4202 		 * CPU2 non paged to CPU2 paging sec.
4203 		 */
4204 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
4205 		    offset == IWM_PAGING_SEPARATOR_SECTION)
4206 			break;
4207 
4208 		if (dlen > sc->sc_fwdmasegsz) {
4209 			err = EFBIG;
4210 		} else
4211 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4212 		if (err) {
4213 			printf("%s: could not load firmware chunk %d "
4214 			    "(error %d)\n", DEVNAME(sc), i, err);
4215 			return err;
4216 		}
4217 
4218 		/* Notify the ucode of the loaded section number and status */
4219 		if (iwm_nic_lock(sc)) {
4220 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4221 			val = val | (sec_num << shift_param);
4222 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4223 			sec_num = (sec_num << 1) | 0x1;
4224 			iwm_nic_unlock(sc);
4225 		} else {
4226 			err = EBUSY;
4227 			printf("%s: could not load firmware chunk %d "
4228 			    "(error %d)\n", DEVNAME(sc), i, err);
4229 			return err;
4230 		}
4231 	}
4232 
4233 	*first_ucode_section = last_read_idx;
4234 
4235 	if (iwm_nic_lock(sc)) {
4236 		if (cpu == 1)
4237 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4238 		else
4239 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4240 		iwm_nic_unlock(sc);
4241 	} else {
4242 		err = EBUSY;
4243 		printf("%s: could not finalize firmware loading (error %d)\n",
4244 		    DEVNAME(sc), err);
4245 		return err;
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 int
4252 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4253 {
4254 	struct iwm_fw_sects *fws;
4255 	int err = 0;
4256 	int first_ucode_section;
4257 
4258 	fws = &sc->sc_fw.fw_sects[ucode_type];
4259 
4260 	/* configure the ucode to be ready to get the secured image */
4261 	/* release CPU reset */
4262 	if (iwm_nic_lock(sc)) {
4263 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4264 		    IWM_RELEASE_CPU_RESET_BIT);
4265 		iwm_nic_unlock(sc);
4266 	}
4267 
4268 	/* load to FW the binary Secured sections of CPU1 */
4269 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4270 	if (err)
4271 		return err;
4272 
4273 	/* load to FW the binary sections of CPU2 */
4274 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4275 	if (err)
4276 		return err;
4277 
4278 	iwm_enable_interrupts(sc);
4279 	return 0;
4280 }
4281 
4282 int
4283 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4284 {
4285 	int err;
4286 
4287 	splassert(IPL_NET);
4288 
4289 	sc->sc_uc.uc_intr = 0;
4290 	sc->sc_uc.uc_ok = 0;
4291 
4292 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4293 		err = iwm_load_firmware_8000(sc, ucode_type);
4294 	else
4295 		err = iwm_load_firmware_7000(sc, ucode_type);
4296 
4297 	if (err)
4298 		return err;
4299 
4300 	/* wait for the firmware to load */
4301 	err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4302 	if (err || !sc->sc_uc.uc_ok)
4303 		printf("%s: could not load firmware\n", DEVNAME(sc));
4304 
4305 	return err;
4306 }
4307 
4308 int
4309 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4310 {
4311 	int err;
4312 
4313 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4314 
4315 	err = iwm_nic_init(sc);
4316 	if (err) {
4317 		printf("%s: unable to init nic\n", DEVNAME(sc));
4318 		return err;
4319 	}
4320 
4321 	/* make sure rfkill handshake bits are cleared */
4322 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4323 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4324 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4325 
4326 	/* clear (again), then enable firmware load interrupt */
4327 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4328 	iwm_enable_fwload_interrupt(sc);
4329 
4330 	/* really make sure rfkill handshake bits are cleared */
4331 	/* maybe we should write a few times more?  just to make sure */
4332 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4333 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4334 
4335 	return iwm_load_firmware(sc, ucode_type);
4336 }
4337 
4338 int
4339 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4340 {
4341 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4342 		.valid = htole32(valid_tx_ant),
4343 	};
4344 
4345 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4346 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4347 }
4348 
4349 int
4350 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4351 {
4352 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4353 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4354 
4355 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |
4356 	    sc->sc_extra_phy_config);
4357 	phy_cfg_cmd.calib_control.event_trigger =
4358 	    sc->sc_default_calib[ucode_type].event_trigger;
4359 	phy_cfg_cmd.calib_control.flow_trigger =
4360 	    sc->sc_default_calib[ucode_type].flow_trigger;
4361 
4362 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4363 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4364 }
4365 
4366 int
4367 iwm_send_dqa_cmd(struct iwm_softc *sc)
4368 {
4369 	struct iwm_dqa_enable_cmd dqa_cmd = {
4370 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4371 	};
4372 	uint32_t cmd_id;
4373 
4374 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4375 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4376 }
4377 
4378 int
4379 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4380 	enum iwm_ucode_type ucode_type)
4381 {
4382 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4383 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4384 	int err;
4385 
4386 	err = iwm_read_firmware(sc);
4387 	if (err)
4388 		return err;
4389 
4390 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4391 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4392 	else
4393 		sc->cmdqid = IWM_CMD_QUEUE;
4394 
4395 	sc->sc_uc_current = ucode_type;
4396 	err = iwm_start_fw(sc, ucode_type);
4397 	if (err) {
4398 		sc->sc_uc_current = old_type;
4399 		return err;
4400 	}
4401 
4402 	err = iwm_post_alive(sc);
4403 	if (err)
4404 		return err;
4405 
4406 	/*
4407 	 * configure and operate fw paging mechanism.
4408 	 * driver configures the paging flow only once, CPU2 paging image
4409 	 * included in the IWM_UCODE_INIT image.
4410 	 */
4411 	if (fw->paging_mem_size) {
4412 		err = iwm_save_fw_paging(sc, fw);
4413 		if (err) {
4414 			printf("%s: failed to save the FW paging image\n",
4415 			    DEVNAME(sc));
4416 			return err;
4417 		}
4418 
4419 		err = iwm_send_paging_cmd(sc, fw);
4420 		if (err) {
4421 			printf("%s: failed to send the paging cmd\n",
4422 			    DEVNAME(sc));
4423 			iwm_free_fw_paging(sc);
4424 			return err;
4425 		}
4426 	}
4427 
4428 	return 0;
4429 }
4430 
4431 int
4432 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4433 {
4434 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4435 	int err, s;
4436 
4437 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4438 		printf("%s: radio is disabled by hardware switch\n",
4439 		    DEVNAME(sc));
4440 		return EPERM;
4441 	}
4442 
4443 	s = splnet();
4444 	sc->sc_init_complete = 0;
4445 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4446 	if (err) {
4447 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4448 		splx(s);
4449 		return err;
4450 	}
4451 
4452 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4453 		err = iwm_send_bt_init_conf(sc);
4454 		if (err) {
4455 			printf("%s: could not init bt coex (error %d)\n",
4456 			    DEVNAME(sc), err);
4457 			splx(s);
4458 			return err;
4459 		}
4460 	}
4461 
4462 	if (justnvm) {
4463 		err = iwm_nvm_init(sc);
4464 		if (err) {
4465 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4466 			splx(s);
4467 			return err;
4468 		}
4469 
4470 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4471 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4472 			    sc->sc_nvm.hw_addr);
4473 
4474 		splx(s);
4475 		return 0;
4476 	}
4477 
4478 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4479 	if (err) {
4480 		splx(s);
4481 		return err;
4482 	}
4483 
4484 	/* Send TX valid antennas before triggering calibrations */
4485 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4486 	if (err) {
4487 		splx(s);
4488 		return err;
4489 	}
4490 
4491 	/*
4492 	 * Send phy configurations command to init uCode
4493 	 * to start the 16.0 uCode init image internal calibrations.
4494 	 */
4495 	err = iwm_send_phy_cfg_cmd(sc);
4496 	if (err) {
4497 		splx(s);
4498 		return err;
4499 	}
4500 
4501 	/*
4502 	 * Nothing to do but wait for the init complete and phy DB
4503 	 * notifications from the firmware.
4504 	 */
4505 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4506 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4507 		    SEC_TO_NSEC(2));
4508 		if (err)
4509 			break;
4510 	}
4511 
4512 	splx(s);
4513 	return err;
4514 }
4515 
4516 int
4517 iwm_config_ltr(struct iwm_softc *sc)
4518 {
4519 	struct iwm_ltr_config_cmd cmd = {
4520 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4521 	};
4522 
4523 	if (!sc->sc_ltr_enabled)
4524 		return 0;
4525 
4526 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4527 }
4528 
4529 int
4530 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4531 {
4532 	struct iwm_rx_ring *ring = &sc->rxq;
4533 	struct iwm_rx_data *data = &ring->data[idx];
4534 	struct mbuf *m;
4535 	int err;
4536 	int fatal = 0;
4537 
4538 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4539 	if (m == NULL)
4540 		return ENOBUFS;
4541 
4542 	if (size <= MCLBYTES) {
4543 		MCLGET(m, M_DONTWAIT);
4544 	} else {
4545 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4546 	}
4547 	if ((m->m_flags & M_EXT) == 0) {
4548 		m_freem(m);
4549 		return ENOBUFS;
4550 	}
4551 
4552 	if (data->m != NULL) {
4553 		bus_dmamap_unload(sc->sc_dmat, data->map);
4554 		fatal = 1;
4555 	}
4556 
4557 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4558 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4559 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4560 	if (err) {
4561 		/* XXX */
4562 		if (fatal)
4563 			panic("iwm: could not load RX mbuf");
4564 		m_freem(m);
4565 		return err;
4566 	}
4567 	data->m = m;
4568 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4569 
4570 	/* Update RX descriptor. */
4571 	if (sc->sc_mqrx_supported) {
4572 		((uint64_t *)ring->desc)[idx] =
4573 		    htole64(data->map->dm_segs[0].ds_addr);
4574 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4575 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4576 		    BUS_DMASYNC_PREWRITE);
4577 	} else {
4578 		((uint32_t *)ring->desc)[idx] =
4579 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4580 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4581 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4582 		    BUS_DMASYNC_PREWRITE);
4583 	}
4584 
4585 	return 0;
4586 }
4587 
4588 /*
4589  * RSSI values are reported by the FW as positive values - need to negate
4590  * to obtain their dBM.  Account for missing antennas by replacing 0
4591  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4592  */
4593 int
4594 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4595 {
4596 	int energy_a, energy_b, energy_c, max_energy;
4597 	uint32_t val;
4598 
4599 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4600 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4601 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4602 	energy_a = energy_a ? -energy_a : -256;
4603 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4604 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4605 	energy_b = energy_b ? -energy_b : -256;
4606 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4607 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4608 	energy_c = energy_c ? -energy_c : -256;
4609 	max_energy = MAX(energy_a, energy_b);
4610 	max_energy = MAX(max_energy, energy_c);
4611 
4612 	return max_energy;
4613 }
4614 
4615 int
4616 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4617     struct iwm_rx_mpdu_desc *desc)
4618 {
4619 	int energy_a, energy_b;
4620 
4621 	energy_a = desc->v1.energy_a;
4622 	energy_b = desc->v1.energy_b;
4623 	energy_a = energy_a ? -energy_a : -256;
4624 	energy_b = energy_b ? -energy_b : -256;
4625 	return MAX(energy_a, energy_b);
4626 }
4627 
4628 void
4629 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4630     struct iwm_rx_data *data)
4631 {
4632 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4633 
4634 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4635 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4636 
4637 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4638 }
4639 
4640 /*
4641  * Retrieve the average noise (in dBm) among receivers.
4642  */
4643 int
4644 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4645 {
4646 	int i, total, nbant, noise;
4647 
4648 	total = nbant = noise = 0;
4649 	for (i = 0; i < 3; i++) {
4650 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4651 		if (noise) {
4652 			total += noise;
4653 			nbant++;
4654 		}
4655 	}
4656 
4657 	/* There should be at least one antenna but check anyway. */
4658 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4659 }
4660 
4661 int
4662 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4663     struct ieee80211_rxinfo *rxi)
4664 {
4665 	struct ieee80211com *ic = &sc->sc_ic;
4666 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4667 	struct ieee80211_frame *wh;
4668 	uint64_t pn, *prsc;
4669 	uint8_t *ivp;
4670 	uint8_t tid;
4671 	int hdrlen, hasqos;
4672 
4673 	wh = mtod(m, struct ieee80211_frame *);
4674 	hdrlen = ieee80211_get_hdrlen(wh);
4675 	ivp = (uint8_t *)wh + hdrlen;
4676 
4677 	/* Check that ExtIV bit is set. */
4678 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4679 		return 1;
4680 
4681 	hasqos = ieee80211_has_qos(wh);
4682 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4683 	prsc = &k->k_rsc[tid];
4684 
4685 	/* Extract the 48-bit PN from the CCMP header. */
4686 	pn = (uint64_t)ivp[0]       |
4687 	     (uint64_t)ivp[1] <<  8 |
4688 	     (uint64_t)ivp[4] << 16 |
4689 	     (uint64_t)ivp[5] << 24 |
4690 	     (uint64_t)ivp[6] << 32 |
4691 	     (uint64_t)ivp[7] << 40;
4692 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4693 		if (pn < *prsc) {
4694 			ic->ic_stats.is_ccmp_replays++;
4695 			return 1;
4696 		}
4697 	} else if (pn <= *prsc) {
4698 		ic->ic_stats.is_ccmp_replays++;
4699 		return 1;
4700 	}
4701 	/* Last seen packet number is updated in ieee80211_inputm(). */
4702 
4703 	/*
4704 	 * Some firmware versions strip the MIC, and some don't. It is not
4705 	 * clear which of the capability flags could tell us what to expect.
4706 	 * For now, keep things simple and just leave the MIC in place if
4707 	 * it is present.
4708 	 *
4709 	 * The IV will be stripped by ieee80211_inputm().
4710 	 */
4711 	return 0;
4712 }
4713 
4714 int
4715 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4716     struct ieee80211_rxinfo *rxi)
4717 {
4718 	struct ieee80211com *ic = &sc->sc_ic;
4719 	struct ifnet *ifp = IC2IFP(ic);
4720 	struct ieee80211_frame *wh;
4721 	struct ieee80211_node *ni;
4722 	int ret = 0;
4723 	uint8_t type, subtype;
4724 
4725 	wh = mtod(m, struct ieee80211_frame *);
4726 
4727 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4728 	if (type == IEEE80211_FC0_TYPE_CTL)
4729 		return 0;
4730 
4731 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4732 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4733 		return 0;
4734 
4735 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4736 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4737 		return 0;
4738 
4739 	ni = ieee80211_find_rxnode(ic, wh);
4740 	/* Handle hardware decryption. */
4741 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4742 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4743 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4744 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4745 			ic->ic_stats.is_ccmp_dec_errs++;
4746 			ret = 1;
4747 			goto out;
4748 		}
4749 		/* Check whether decryption was successful or not. */
4750 		if ((rx_pkt_status &
4751 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4752 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4753 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4754 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4755 			ic->ic_stats.is_ccmp_dec_errs++;
4756 			ret = 1;
4757 			goto out;
4758 		}
4759 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4760 	}
4761 out:
4762 	if (ret)
4763 		ifp->if_ierrors++;
4764 	ieee80211_release_node(ic, ni);
4765 	return ret;
4766 }
4767 
4768 void
4769 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4770     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4771     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4772     struct mbuf_list *ml)
4773 {
4774 	struct ieee80211com *ic = &sc->sc_ic;
4775 	struct ifnet *ifp = IC2IFP(ic);
4776 	struct ieee80211_frame *wh;
4777 	struct ieee80211_node *ni;
4778 
4779 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4780 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4781 
4782 	wh = mtod(m, struct ieee80211_frame *);
4783 	ni = ieee80211_find_rxnode(ic, wh);
4784 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4785 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4786 		ifp->if_ierrors++;
4787 		m_freem(m);
4788 		ieee80211_release_node(ic, ni);
4789 		return;
4790 	}
4791 
4792 #if NBPFILTER > 0
4793 	if (sc->sc_drvbpf != NULL) {
4794 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4795 		uint16_t chan_flags;
4796 
4797 		tap->wr_flags = 0;
4798 		if (is_shortpre)
4799 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4800 		tap->wr_chan_freq =
4801 		    htole16(ic->ic_channels[chanidx].ic_freq);
4802 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4803 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4804 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4805 			chan_flags &= ~IEEE80211_CHAN_HT;
4806 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4807 		}
4808 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4809 			chan_flags &= ~IEEE80211_CHAN_VHT;
4810 		tap->wr_chan_flags = htole16(chan_flags);
4811 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4812 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4813 		tap->wr_tsft = device_timestamp;
4814 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4815 			uint8_t mcs = (rate_n_flags &
4816 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4817 			    IWM_RATE_HT_MCS_NSS_MSK));
4818 			tap->wr_rate = (0x80 | mcs);
4819 		} else {
4820 			uint8_t rate = (rate_n_flags &
4821 			    IWM_RATE_LEGACY_RATE_MSK);
4822 			switch (rate) {
4823 			/* CCK rates. */
4824 			case  10: tap->wr_rate =   2; break;
4825 			case  20: tap->wr_rate =   4; break;
4826 			case  55: tap->wr_rate =  11; break;
4827 			case 110: tap->wr_rate =  22; break;
4828 			/* OFDM rates. */
4829 			case 0xd: tap->wr_rate =  12; break;
4830 			case 0xf: tap->wr_rate =  18; break;
4831 			case 0x5: tap->wr_rate =  24; break;
4832 			case 0x7: tap->wr_rate =  36; break;
4833 			case 0x9: tap->wr_rate =  48; break;
4834 			case 0xb: tap->wr_rate =  72; break;
4835 			case 0x1: tap->wr_rate =  96; break;
4836 			case 0x3: tap->wr_rate = 108; break;
4837 			/* Unknown rate: should not happen. */
4838 			default:  tap->wr_rate =   0;
4839 			}
4840 		}
4841 
4842 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4843 		    m, BPF_DIRECTION_IN);
4844 	}
4845 #endif
4846 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4847 	ieee80211_release_node(ic, ni);
4848 }
4849 
4850 void
4851 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4852     size_t maxlen, struct mbuf_list *ml)
4853 {
4854 	struct ieee80211com *ic = &sc->sc_ic;
4855 	struct ieee80211_rxinfo rxi;
4856 	struct iwm_rx_phy_info *phy_info;
4857 	struct iwm_rx_mpdu_res_start *rx_res;
4858 	int device_timestamp;
4859 	uint16_t phy_flags;
4860 	uint32_t len;
4861 	uint32_t rx_pkt_status;
4862 	int rssi, chanidx, rate_n_flags;
4863 
4864 	memset(&rxi, 0, sizeof(rxi));
4865 
4866 	phy_info = &sc->sc_last_phy_info;
4867 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4868 	len = le16toh(rx_res->byte_count);
4869 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4870 		/* Allow control frames in monitor mode. */
4871 		if (len < sizeof(struct ieee80211_frame_cts)) {
4872 			ic->ic_stats.is_rx_tooshort++;
4873 			IC2IFP(ic)->if_ierrors++;
4874 			m_freem(m);
4875 			return;
4876 		}
4877 	} else if (len < sizeof(struct ieee80211_frame)) {
4878 		ic->ic_stats.is_rx_tooshort++;
4879 		IC2IFP(ic)->if_ierrors++;
4880 		m_freem(m);
4881 		return;
4882 	}
4883 	if (len > maxlen - sizeof(*rx_res)) {
4884 		IC2IFP(ic)->if_ierrors++;
4885 		m_freem(m);
4886 		return;
4887 	}
4888 
4889 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4890 		m_freem(m);
4891 		return;
4892 	}
4893 
4894 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4895 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4896 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4897 		m_freem(m);
4898 		return; /* drop */
4899 	}
4900 
4901 	m->m_data = pktdata + sizeof(*rx_res);
4902 	m->m_pkthdr.len = m->m_len = len;
4903 
4904 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4905 		m_freem(m);
4906 		return;
4907 	}
4908 
4909 	chanidx = letoh32(phy_info->channel);
4910 	device_timestamp = le32toh(phy_info->system_timestamp);
4911 	phy_flags = letoh16(phy_info->phy_flags);
4912 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4913 
4914 	rssi = iwm_get_signal_strength(sc, phy_info);
4915 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4916 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4917 
4918 	rxi.rxi_rssi = rssi;
4919 	rxi.rxi_tstamp = device_timestamp;
4920 	rxi.rxi_chan = chanidx;
4921 
4922 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4923 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4924 	    rate_n_flags, device_timestamp, &rxi, ml);
4925 }
4926 
4927 void
4928 iwm_flip_address(uint8_t *addr)
4929 {
4930 	int i;
4931 	uint8_t mac_addr[ETHER_ADDR_LEN];
4932 
4933 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4934 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4935 	IEEE80211_ADDR_COPY(addr, mac_addr);
4936 }
4937 
4938 /*
4939  * Drop duplicate 802.11 retransmissions
4940  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4941  * and handle pseudo-duplicate frames which result from deaggregation
4942  * of A-MSDU frames in hardware.
4943  */
4944 int
4945 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4946     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4947 {
4948 	struct ieee80211com *ic = &sc->sc_ic;
4949 	struct iwm_node *in = (void *)ic->ic_bss;
4950 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4951 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4952 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4953 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4954 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4955 	int hasqos = ieee80211_has_qos(wh);
4956 	uint16_t seq;
4957 
4958 	if (type == IEEE80211_FC0_TYPE_CTL ||
4959 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4960 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4961 		return 0;
4962 
4963 	if (hasqos) {
4964 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4965 		if (tid > IWM_MAX_TID_COUNT)
4966 			tid = IWM_MAX_TID_COUNT;
4967 	}
4968 
4969 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4970 	subframe_idx = desc->amsdu_info &
4971 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4972 
4973 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4974 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4975 	    dup_data->last_seq[tid] == seq &&
4976 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4977 		return 1;
4978 
4979 	/*
4980 	 * Allow the same frame sequence number for all A-MSDU subframes
4981 	 * following the first subframe.
4982 	 * Otherwise these subframes would be discarded as replays.
4983 	 */
4984 	if (dup_data->last_seq[tid] == seq &&
4985 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4986 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
4987 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4988 	}
4989 
4990 	dup_data->last_seq[tid] = seq;
4991 	dup_data->last_sub_frame[tid] = subframe_idx;
4992 
4993 	return 0;
4994 }
4995 
4996 /*
4997  * Returns true if sn2 - buffer_size < sn1 < sn2.
4998  * To be used only in order to compare reorder buffer head with NSSN.
4999  * We fully trust NSSN unless it is behind us due to reorder timeout.
5000  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
5001  */
5002 int
5003 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
5004 {
5005 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
5006 }
5007 
5008 void
5009 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5010     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
5011     uint16_t nssn, struct mbuf_list *ml)
5012 {
5013 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
5014 	uint16_t ssn = reorder_buf->head_sn;
5015 
5016 	/* ignore nssn smaller than head sn - this can happen due to timeout */
5017 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
5018 		goto set_timer;
5019 
5020 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
5021 		int index = ssn % reorder_buf->buf_size;
5022 		struct mbuf *m;
5023 		int chanidx, is_shortpre;
5024 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
5025 		struct ieee80211_rxinfo *rxi;
5026 
5027 		/* This data is the same for all A-MSDU subframes. */
5028 		chanidx = entries[index].chanidx;
5029 		rx_pkt_status = entries[index].rx_pkt_status;
5030 		is_shortpre = entries[index].is_shortpre;
5031 		rate_n_flags = entries[index].rate_n_flags;
5032 		device_timestamp = entries[index].device_timestamp;
5033 		rxi = &entries[index].rxi;
5034 
5035 		/*
5036 		 * Empty the list. Will have more than one frame for A-MSDU.
5037 		 * Empty list is valid as well since nssn indicates frames were
5038 		 * received.
5039 		 */
5040 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
5041 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5042 			    rate_n_flags, device_timestamp, rxi, ml);
5043 			reorder_buf->num_stored--;
5044 
5045 			/*
5046 			 * Allow the same frame sequence number and CCMP PN for
5047 			 * all A-MSDU subframes following the first subframe.
5048 			 * Otherwise they would be discarded as replays.
5049 			 */
5050 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5051 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5052 		}
5053 
5054 		ssn = (ssn + 1) & 0xfff;
5055 	}
5056 	reorder_buf->head_sn = nssn;
5057 
5058 set_timer:
5059 	if (reorder_buf->num_stored && !reorder_buf->removed) {
5060 		timeout_add_usec(&reorder_buf->reorder_timer,
5061 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
5062 	} else
5063 		timeout_del(&reorder_buf->reorder_timer);
5064 }
5065 
5066 int
5067 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5068     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5069 {
5070 	struct ieee80211com *ic = &sc->sc_ic;
5071 
5072 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5073 		/* we have a new (A-)MPDU ... */
5074 
5075 		/*
5076 		 * reset counter to 0 if we didn't have any oldsn in
5077 		 * the last A-MPDU (as detected by GP2 being identical)
5078 		 */
5079 		if (!buffer->consec_oldsn_prev_drop)
5080 			buffer->consec_oldsn_drops = 0;
5081 
5082 		/* either way, update our tracking state */
5083 		buffer->consec_oldsn_ampdu_gp2 = gp2;
5084 	} else if (buffer->consec_oldsn_prev_drop) {
5085 		/*
5086 		 * tracking state didn't change, and we had an old SN
5087 		 * indication before - do nothing in this case, we
5088 		 * already noted this one down and are waiting for the
5089 		 * next A-MPDU (by GP2)
5090 		 */
5091 		return 0;
5092 	}
5093 
5094 	/* return unless this MPDU has old SN */
5095 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
5096 		return 0;
5097 
5098 	/* update state */
5099 	buffer->consec_oldsn_prev_drop = 1;
5100 	buffer->consec_oldsn_drops++;
5101 
5102 	/* if limit is reached, send del BA and reset state */
5103 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
5104 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5105 		    0, tid);
5106 		buffer->consec_oldsn_prev_drop = 0;
5107 		buffer->consec_oldsn_drops = 0;
5108 		return 1;
5109 	}
5110 
5111 	return 0;
5112 }
5113 
5114 /*
5115  * Handle re-ordering of frames which were de-aggregated in hardware.
5116  * Returns 1 if the MPDU was consumed (buffered or dropped).
5117  * Returns 0 if the MPDU should be passed to upper layer.
5118  */
5119 int
5120 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5121     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5122     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5123     struct mbuf_list *ml)
5124 {
5125 	struct ieee80211com *ic = &sc->sc_ic;
5126 	struct ieee80211_frame *wh;
5127 	struct ieee80211_node *ni;
5128 	struct iwm_rxba_data *rxba;
5129 	struct iwm_reorder_buffer *buffer;
5130 	uint32_t reorder_data = le32toh(desc->reorder_data);
5131 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
5132 	int last_subframe =
5133 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
5134 	uint8_t tid;
5135 	uint8_t subframe_idx = (desc->amsdu_info &
5136 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5137 	struct iwm_reorder_buf_entry *entries;
5138 	int index;
5139 	uint16_t nssn, sn;
5140 	uint8_t baid, type, subtype;
5141 	int hasqos;
5142 
5143 	wh = mtod(m, struct ieee80211_frame *);
5144 	hasqos = ieee80211_has_qos(wh);
5145 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5146 
5147 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5148 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5149 
5150 	/*
5151 	 * We are only interested in Block Ack requests and unicast QoS data.
5152 	 */
5153 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5154 		return 0;
5155 	if (hasqos) {
5156 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5157 			return 0;
5158 	} else {
5159 		if (type != IEEE80211_FC0_TYPE_CTL ||
5160 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5161 			return 0;
5162 	}
5163 
5164 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
5165 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
5166 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5167 	    baid >= nitems(sc->sc_rxba_data))
5168 		return 0;
5169 
5170 	rxba = &sc->sc_rxba_data[baid];
5171 	if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5172 	    tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
5173 		return 0;
5174 
5175 	if (rxba->timeout != 0)
5176 		getmicrouptime(&rxba->last_rx);
5177 
5178 	/* Bypass A-MPDU re-ordering in net80211. */
5179 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5180 
5181 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
5182 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
5183 		IWM_RX_MPDU_REORDER_SN_SHIFT;
5184 
5185 	buffer = &rxba->reorder_buf;
5186 	entries = &rxba->entries[0];
5187 
5188 	if (!buffer->valid) {
5189 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
5190 			return 0;
5191 		buffer->valid = 1;
5192 	}
5193 
5194 	ni = ieee80211_find_rxnode(ic, wh);
5195 	if (type == IEEE80211_FC0_TYPE_CTL &&
5196 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5197 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5198 		goto drop;
5199 	}
5200 
5201 	/*
5202 	 * If there was a significant jump in the nssn - adjust.
5203 	 * If the SN is smaller than the NSSN it might need to first go into
5204 	 * the reorder buffer, in which case we just release up to it and the
5205 	 * rest of the function will take care of storing it and releasing up to
5206 	 * the nssn.
5207 	 */
5208 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5209 	    buffer->buf_size) ||
5210 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5211 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5212 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5213 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5214 	}
5215 
5216 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5217 	    device_timestamp)) {
5218 		 /* BA session will be torn down. */
5219 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5220 		goto drop;
5221 
5222 	}
5223 
5224 	/* drop any outdated packets */
5225 	if (SEQ_LT(sn, buffer->head_sn)) {
5226 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5227 		goto drop;
5228 	}
5229 
5230 	/* release immediately if allowed by nssn and no stored frames */
5231 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5232 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5233 		   (!is_amsdu || last_subframe))
5234 			buffer->head_sn = nssn;
5235 		ieee80211_release_node(ic, ni);
5236 		return 0;
5237 	}
5238 
5239 	/*
5240 	 * release immediately if there are no stored frames, and the sn is
5241 	 * equal to the head.
5242 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5243 	 * When we released everything, and we got the next frame in the
5244 	 * sequence, according to the NSSN we can't release immediately,
5245 	 * while technically there is no hole and we can move forward.
5246 	 */
5247 	if (!buffer->num_stored && sn == buffer->head_sn) {
5248 		if (!is_amsdu || last_subframe)
5249 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5250 		ieee80211_release_node(ic, ni);
5251 		return 0;
5252 	}
5253 
5254 	index = sn % buffer->buf_size;
5255 
5256 	/*
5257 	 * Check if we already stored this frame
5258 	 * As AMSDU is either received or not as whole, logic is simple:
5259 	 * If we have frames in that position in the buffer and the last frame
5260 	 * originated from AMSDU had a different SN then it is a retransmission.
5261 	 * If it is the same SN then if the subframe index is incrementing it
5262 	 * is the same AMSDU - otherwise it is a retransmission.
5263 	 */
5264 	if (!ml_empty(&entries[index].frames)) {
5265 		if (!is_amsdu) {
5266 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5267 			goto drop;
5268 		} else if (sn != buffer->last_amsdu ||
5269 		    buffer->last_sub_index >= subframe_idx) {
5270 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5271 			goto drop;
5272 		}
5273 	} else {
5274 		/* This data is the same for all A-MSDU subframes. */
5275 		entries[index].chanidx = chanidx;
5276 		entries[index].is_shortpre = is_shortpre;
5277 		entries[index].rate_n_flags = rate_n_flags;
5278 		entries[index].device_timestamp = device_timestamp;
5279 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5280 	}
5281 
5282 	/* put in reorder buffer */
5283 	ml_enqueue(&entries[index].frames, m);
5284 	buffer->num_stored++;
5285 	getmicrouptime(&entries[index].reorder_time);
5286 
5287 	if (is_amsdu) {
5288 		buffer->last_amsdu = sn;
5289 		buffer->last_sub_index = subframe_idx;
5290 	}
5291 
5292 	/*
5293 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5294 	 * The reason is that NSSN advances on the first sub-frame, and may
5295 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5296 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5297 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5298 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5299 	 * already ahead and it will be dropped.
5300 	 * If the last sub-frame is not on this queue - we will get frame
5301 	 * release notification with up to date NSSN.
5302 	 */
5303 	if (!is_amsdu || last_subframe)
5304 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5305 
5306 	ieee80211_release_node(ic, ni);
5307 	return 1;
5308 
5309 drop:
5310 	m_freem(m);
5311 	ieee80211_release_node(ic, ni);
5312 	return 1;
5313 }
5314 
5315 void
5316 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5317     size_t maxlen, struct mbuf_list *ml)
5318 {
5319 	struct ieee80211com *ic = &sc->sc_ic;
5320 	struct ieee80211_rxinfo rxi;
5321 	struct iwm_rx_mpdu_desc *desc;
5322 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5323 	int rssi;
5324 	uint8_t chanidx;
5325 	uint16_t phy_info;
5326 
5327 	memset(&rxi, 0, sizeof(rxi));
5328 
5329 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5330 
5331 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5332 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5333 		m_freem(m);
5334 		return; /* drop */
5335 	}
5336 
5337 	len = le16toh(desc->mpdu_len);
5338 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5339 		/* Allow control frames in monitor mode. */
5340 		if (len < sizeof(struct ieee80211_frame_cts)) {
5341 			ic->ic_stats.is_rx_tooshort++;
5342 			IC2IFP(ic)->if_ierrors++;
5343 			m_freem(m);
5344 			return;
5345 		}
5346 	} else if (len < sizeof(struct ieee80211_frame)) {
5347 		ic->ic_stats.is_rx_tooshort++;
5348 		IC2IFP(ic)->if_ierrors++;
5349 		m_freem(m);
5350 		return;
5351 	}
5352 	if (len > maxlen - sizeof(*desc)) {
5353 		IC2IFP(ic)->if_ierrors++;
5354 		m_freem(m);
5355 		return;
5356 	}
5357 
5358 	m->m_data = pktdata + sizeof(*desc);
5359 	m->m_pkthdr.len = m->m_len = len;
5360 
5361 	/* Account for padding following the frame header. */
5362 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5363 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5364 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5365 		if (type == IEEE80211_FC0_TYPE_CTL) {
5366 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5367 			case IEEE80211_FC0_SUBTYPE_CTS:
5368 				hdrlen = sizeof(struct ieee80211_frame_cts);
5369 				break;
5370 			case IEEE80211_FC0_SUBTYPE_ACK:
5371 				hdrlen = sizeof(struct ieee80211_frame_ack);
5372 				break;
5373 			default:
5374 				hdrlen = sizeof(struct ieee80211_frame_min);
5375 				break;
5376 			}
5377 		} else
5378 			hdrlen = ieee80211_get_hdrlen(wh);
5379 
5380 		if ((le16toh(desc->status) &
5381 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5382 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5383 			/* Padding is inserted after the IV. */
5384 			hdrlen += IEEE80211_CCMP_HDRLEN;
5385 		}
5386 
5387 		memmove(m->m_data + 2, m->m_data, hdrlen);
5388 		m_adj(m, 2);
5389 	}
5390 
5391 	/*
5392 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5393 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5394 	 * bit set in the frame header. We need to clear this bit ourselves.
5395 	 *
5396 	 * And we must allow the same CCMP PN for subframes following the
5397 	 * first subframe. Otherwise they would be discarded as replays.
5398 	 */
5399 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5400 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5401 		uint8_t subframe_idx = (desc->amsdu_info &
5402 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5403 		if (subframe_idx > 0)
5404 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5405 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5406 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5407 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5408 			    struct ieee80211_qosframe_addr4 *);
5409 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5410 
5411 			/* HW reverses addr3 and addr4. */
5412 			iwm_flip_address(qwh4->i_addr3);
5413 			iwm_flip_address(qwh4->i_addr4);
5414 		} else if (ieee80211_has_qos(wh) &&
5415 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5416 			struct ieee80211_qosframe *qwh = mtod(m,
5417 			    struct ieee80211_qosframe *);
5418 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5419 
5420 			/* HW reverses addr3. */
5421 			iwm_flip_address(qwh->i_addr3);
5422 		}
5423 	}
5424 
5425 	/*
5426 	 * Verify decryption before duplicate detection. The latter uses
5427 	 * the TID supplied in QoS frame headers and this TID is implicitly
5428 	 * verified as part of the CCMP nonce.
5429 	 */
5430 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5431 		m_freem(m);
5432 		return;
5433 	}
5434 
5435 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5436 		m_freem(m);
5437 		return;
5438 	}
5439 
5440 	phy_info = le16toh(desc->phy_info);
5441 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5442 	chanidx = desc->v1.channel;
5443 	device_timestamp = desc->v1.gp2_on_air_rise;
5444 
5445 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5446 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5447 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5448 
5449 	rxi.rxi_rssi = rssi;
5450 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5451 	rxi.rxi_chan = chanidx;
5452 
5453 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5454 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5455 	    rate_n_flags, device_timestamp, &rxi, ml))
5456 		return;
5457 
5458 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5459 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5460 	    rate_n_flags, device_timestamp, &rxi, ml);
5461 }
5462 
5463 void
5464 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5465 {
5466 	struct ieee80211com *ic = &sc->sc_ic;
5467 	struct iwm_node *in = (void *)ni;
5468 	int old_txmcs = ni->ni_txmcs;
5469 	int old_nss = ni->ni_vht_ss;
5470 
5471 	if (ni->ni_flags & IEEE80211_NODE_VHT)
5472 		ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni);
5473 	else
5474 		ieee80211_ra_choose(&in->in_rn, ic, ni);
5475 
5476 	/*
5477 	 * If RA has chosen a new TX rate we must update
5478 	 * the firmware's LQ rate table.
5479 	 */
5480 	if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss)
5481 		iwm_setrates(in, 1);
5482 }
5483 
5484 void
5485 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5486     int txmcs, uint8_t failure_frame, int txfail)
5487 {
5488 	struct ieee80211com *ic = &sc->sc_ic;
5489 	struct iwm_node *in = (void *)ni;
5490 
5491 	/* Ignore Tx reports which don't match our last LQ command. */
5492 	if (txmcs != ni->ni_txmcs) {
5493 		if (++in->lq_rate_mismatch > 15) {
5494 			/* Try to sync firmware with the driver... */
5495 			iwm_setrates(in, 1);
5496 			in->lq_rate_mismatch = 0;
5497 		}
5498 	} else {
5499 		int mcs = txmcs;
5500 		const struct ieee80211_ht_rateset *rs =
5501 		    ieee80211_ra_get_ht_rateset(txmcs,
5502 		        ieee80211_node_supports_ht_chan40(ni),
5503 			ieee80211_ra_use_ht_sgi(ni));
5504 		unsigned int retries = 0, i;
5505 
5506 		in->lq_rate_mismatch = 0;
5507 
5508 		for (i = 0; i < failure_frame; i++) {
5509 			if (mcs > rs->min_mcs) {
5510 				ieee80211_ra_add_stats_ht(&in->in_rn,
5511 				    ic, ni, mcs, 1, 1);
5512 				mcs--;
5513 			} else
5514 				retries++;
5515 		}
5516 
5517 		if (txfail && failure_frame == 0) {
5518 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5519 			    txmcs, 1, 1);
5520 		} else {
5521 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5522 			    mcs, retries + 1, retries);
5523 		}
5524 
5525 		iwm_ra_choose(sc, ni);
5526 	}
5527 }
5528 
5529 void
5530 iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5531     int txmcs, int nss, uint8_t failure_frame, int txfail)
5532 {
5533 	struct ieee80211com *ic = &sc->sc_ic;
5534 	struct iwm_node *in = (void *)ni;
5535 	uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5536 	uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5537 
5538 	/* Ignore Tx reports which don't match our last LQ command. */
5539 	if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) {
5540 		if (++in->lq_rate_mismatch > 15) {
5541 			/* Try to sync firmware with the driver... */
5542 			iwm_setrates(in, 1);
5543 			in->lq_rate_mismatch = 0;
5544 		}
5545 	} else {
5546 		int mcs = txmcs;
5547 		unsigned int retries = 0, i;
5548 
5549 		if (in->in_phyctxt) {
5550 			vht_chan_width = in->in_phyctxt->vht_chan_width;
5551 			sco = in->in_phyctxt->sco;
5552 		}
5553 		in->lq_rate_mismatch = 0;
5554 
5555 		for (i = 0; i < failure_frame; i++) {
5556 			if (mcs > 0) {
5557 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5558 				    ic, ni, mcs, nss, 1, 1);
5559 				if (vht_chan_width >=
5560 				    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5561 					/*
5562 					 * First 4 Tx attempts used same MCS,
5563 					 * twice at 80MHz and twice at 40MHz.
5564 					 */
5565 					if (i >= 4)
5566 						mcs--;
5567 				} else if (sco == IEEE80211_HTOP0_SCO_SCA ||
5568 				    sco == IEEE80211_HTOP0_SCO_SCB) {
5569 					/*
5570 					 * First 4 Tx attempts used same MCS,
5571 					 * four times at 40MHz.
5572 					 */
5573 					if (i >= 4)
5574 						mcs--;
5575 				} else
5576 					mcs--;
5577 			} else
5578 				retries++;
5579 		}
5580 
5581 		if (txfail && failure_frame == 0) {
5582 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5583 			    txmcs, nss, 1, 1);
5584 		} else {
5585 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5586 			    mcs, nss, retries + 1, retries);
5587 		}
5588 
5589 		iwm_ra_choose(sc, ni);
5590 	}
5591 }
5592 
5593 void
5594 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5595     struct iwm_node *in, int txmcs, int txrate)
5596 {
5597 	struct ieee80211com *ic = &sc->sc_ic;
5598 	struct ieee80211_node *ni = &in->in_ni;
5599 	struct ifnet *ifp = IC2IFP(ic);
5600 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5601 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5602 	uint32_t initial_rate = le32toh(tx_resp->initial_rate);
5603 	int txfail;
5604 
5605 	KASSERT(tx_resp->frame_count == 1);
5606 
5607 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5608 	    status != IWM_TX_STATUS_DIRECT_DONE);
5609 
5610 	/*
5611 	 * Update rate control statistics.
5612 	 * Only report frames which were actually queued with the currently
5613 	 * selected Tx rate. Because Tx queues are relatively long we may
5614 	 * encounter previously selected rates here during Tx bursts.
5615 	 * Providing feedback based on such frames can lead to suboptimal
5616 	 * Tx rate control decisions.
5617 	 */
5618 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5619 		if (txrate != ni->ni_txrate) {
5620 			if (++in->lq_rate_mismatch > 15) {
5621 				/* Try to sync firmware with the driver... */
5622 				iwm_setrates(in, 1);
5623 				in->lq_rate_mismatch = 0;
5624 			}
5625 		} else {
5626 			in->lq_rate_mismatch = 0;
5627 
5628 			in->in_amn.amn_txcnt++;
5629 			if (txfail)
5630 				in->in_amn.amn_retrycnt++;
5631 			if (tx_resp->failure_frame > 0)
5632 				in->in_amn.amn_retrycnt++;
5633 		}
5634 	} else if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5635 	    ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5636 	    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5637 		int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5638 		int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK) >>
5639 		    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5640 		iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5641 		    tx_resp->failure_frame, txfail);
5642 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5643 	    (initial_rate & IWM_RATE_MCS_HT_MSK)) {
5644 		int txmcs = initial_rate &
5645 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5646 		iwm_ht_single_rate_control(sc, ni, txmcs,
5647 		    tx_resp->failure_frame, txfail);
5648 	}
5649 
5650 	if (txfail)
5651 		ifp->if_oerrors++;
5652 }
5653 
5654 void
5655 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5656 {
5657 	struct ieee80211com *ic = &sc->sc_ic;
5658 
5659 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5660 	    BUS_DMASYNC_POSTWRITE);
5661 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5662 	m_freem(txd->m);
5663 	txd->m = NULL;
5664 
5665 	KASSERT(txd->in);
5666 	ieee80211_release_node(ic, &txd->in->in_ni);
5667 	txd->in = NULL;
5668 	txd->ampdu_nframes = 0;
5669 	txd->ampdu_txmcs = 0;
5670 	txd->ampdu_txnss = 0;
5671 }
5672 
5673 void
5674 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5675 {
5676 	struct iwm_tx_data *txd;
5677 
5678 	while (ring->tail != idx) {
5679 		txd = &ring->data[ring->tail];
5680 		if (txd->m != NULL) {
5681 			if (ring->qid < IWM_FIRST_AGG_TX_QUEUE)
5682 				DPRINTF(("%s: missed Tx completion: tail=%d "
5683 				    "idx=%d\n", __func__, ring->tail, idx));
5684 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5685 			iwm_txd_done(sc, txd);
5686 			ring->queued--;
5687 		}
5688 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5689 	}
5690 
5691 	wakeup(ring);
5692 }
5693 
5694 void
5695 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5696     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5697     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5698     struct iwm_agg_tx_status *agg_status)
5699 {
5700 	struct ieee80211com *ic = &sc->sc_ic;
5701 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5702 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5703 	struct ieee80211_node *ni = &in->in_ni;
5704 	struct ieee80211_tx_ba *ba;
5705 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5706 	    status != IWM_TX_STATUS_DIRECT_DONE);
5707 	uint16_t seq;
5708 
5709 	if (ic->ic_state != IEEE80211_S_RUN)
5710 		return;
5711 
5712 	if (nframes > 1) {
5713 		int i;
5714  		/*
5715 		 * Collect information about this A-MPDU.
5716 		 */
5717 
5718 		for (i = 0; i < nframes; i++) {
5719 			uint8_t qid = agg_status[i].qid;
5720 			uint8_t idx = agg_status[i].idx;
5721 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5722 			    IWM_AGG_TX_STATE_STATUS_MSK);
5723 
5724 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5725 				continue;
5726 
5727 			if (qid != cmd_hdr->qid)
5728 				continue;
5729 
5730 			txdata = &txq->data[idx];
5731 			if (txdata->m == NULL)
5732 				continue;
5733 
5734 			/* The Tx rate was the same for all subframes. */
5735 			if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5736 			    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5737 				txdata->ampdu_txmcs = initial_rate &
5738 				    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5739 				txdata->ampdu_txnss = ((initial_rate &
5740 				    IWM_RATE_VHT_MCS_NSS_MSK) >>
5741 				    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5742 				txdata->ampdu_nframes = nframes;
5743 			} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5744 				txdata->ampdu_txmcs = initial_rate &
5745 				    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5746 				    IWM_RATE_HT_MCS_NSS_MSK);
5747 				txdata->ampdu_nframes = nframes;
5748 			}
5749 		}
5750 		return;
5751 	}
5752 
5753 	ba = &ni->ni_tx_ba[tid];
5754 	if (ba->ba_state != IEEE80211_BA_AGREED)
5755 		return;
5756 	if (SEQ_LT(ssn, ba->ba_winstart))
5757 		return;
5758 
5759 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5760 	seq = (ssn - 1) & 0xfff;
5761 
5762 	/*
5763 	 * Skip rate control if our Tx rate is fixed.
5764 	 * Don't report frames to MiRA which were sent at a different
5765 	 * Tx rate than ni->ni_txmcs.
5766 	 */
5767 	if (ic->ic_fixed_mcs == -1) {
5768 		if (txdata->ampdu_nframes > 1) {
5769 			/*
5770 			 * This frame was once part of an A-MPDU.
5771 			 * Report one failed A-MPDU Tx attempt.
5772 			 * The firmware might have made several such
5773 			 * attempts but we don't keep track of this.
5774 			 */
5775 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5776 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5777 				    ic, ni, txdata->ampdu_txmcs,
5778 				    txdata->ampdu_txnss, 1, 1);
5779 			} else {
5780 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5781 				    txdata->ampdu_txmcs, 1, 1);
5782 			}
5783 		}
5784 
5785 		/* Report the final single-frame Tx attempt. */
5786 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5787 		    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5788 			int txmcs = initial_rate &
5789 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5790 			int nss = ((initial_rate &
5791 			    IWM_RATE_VHT_MCS_NSS_MSK) >>
5792 			    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5793 			iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5794 			    failure_frame, txfail);
5795 		} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5796 			int txmcs = initial_rate &
5797 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5798 			   IWM_RATE_HT_MCS_NSS_MSK);
5799 			iwm_ht_single_rate_control(sc, ni, txmcs,
5800 			    failure_frame, txfail);
5801 		}
5802 	}
5803 
5804 	if (txfail)
5805 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5806 
5807 	/*
5808 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5809 	 * in firmware's BA window. Firmware is not going to retransmit any
5810 	 * frames before its BA window so mark them all as done.
5811 	 */
5812 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5813 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5814 	iwm_clear_oactive(sc, txq);
5815 }
5816 
5817 void
5818 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5819     struct iwm_rx_data *data)
5820 {
5821 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5822 	int idx = cmd_hdr->idx;
5823 	int qid = cmd_hdr->qid;
5824 	struct iwm_tx_ring *ring = &sc->txq[qid];
5825 	struct iwm_tx_data *txd;
5826 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5827 	uint32_t ssn;
5828 	uint32_t len = iwm_rx_packet_len(pkt);
5829 
5830 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5831 	    BUS_DMASYNC_POSTREAD);
5832 
5833 	/* Sanity checks. */
5834 	if (sizeof(*tx_resp) > len)
5835 		return;
5836 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5837 		return;
5838 	if (qid > IWM_LAST_AGG_TX_QUEUE)
5839 		return;
5840 	if (sizeof(*tx_resp) + sizeof(ssn) +
5841 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5842 		return;
5843 
5844 	sc->sc_tx_timer[qid] = 0;
5845 
5846 	txd = &ring->data[idx];
5847 	if (txd->m == NULL)
5848 		return;
5849 
5850 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5851 	ssn = le32toh(ssn) & 0xfff;
5852 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5853 		int status;
5854 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5855 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5856 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5857 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5858 	} else {
5859 		/*
5860 		 * Even though this is not an agg queue, we must only free
5861 		 * frames before the firmware's starting sequence number.
5862 		 */
5863 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5864 		iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5865 		iwm_clear_oactive(sc, ring);
5866 	}
5867 }
5868 
5869 void
5870 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5871 {
5872 	struct ieee80211com *ic = &sc->sc_ic;
5873 	struct ifnet *ifp = IC2IFP(ic);
5874 
5875 	if (ring->queued < IWM_TX_RING_LOMARK) {
5876 		sc->qfullmsk &= ~(1 << ring->qid);
5877 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5878 			ifq_clr_oactive(&ifp->if_snd);
5879 			/*
5880 			 * Well, we're in interrupt context, but then again
5881 			 * I guess net80211 does all sorts of stunts in
5882 			 * interrupt context, so maybe this is no biggie.
5883 			 */
5884 			(*ifp->if_start)(ifp);
5885 		}
5886 	}
5887 }
5888 
5889 void
5890 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5891     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5892 {
5893 	struct ieee80211com *ic = &sc->sc_ic;
5894 	struct iwm_node *in = (void *)ni;
5895 	int idx, end_idx;
5896 
5897 	/*
5898 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5899 	 */
5900 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5901 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5902 	while (idx != end_idx) {
5903 		struct iwm_tx_data *txdata = &txq->data[idx];
5904 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5905 			/*
5906 			 * We can assume that this subframe has been ACKed
5907 			 * because ACK failures come as single frames and
5908 			 * before failing an A-MPDU subframe the firmware
5909 			 * sends it as a single frame at least once.
5910 			 */
5911 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5912 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5913 				    ic, ni, txdata->ampdu_txmcs,
5914 				    txdata->ampdu_txnss, 1, 0);
5915 			} else {
5916 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5917 				    txdata->ampdu_txmcs, 1, 0);
5918 			}
5919 			/* Report this frame only once. */
5920 			txdata->ampdu_nframes = 0;
5921 		}
5922 
5923 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5924 	}
5925 
5926 	iwm_ra_choose(sc, ni);
5927 }
5928 
5929 void
5930 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5931 {
5932 	struct iwm_ba_notif *ban = (void *)pkt->data;
5933 	struct ieee80211com *ic = &sc->sc_ic;
5934 	struct ieee80211_node *ni = ic->ic_bss;
5935 	struct iwm_node *in = (void *)ni;
5936 	struct ieee80211_tx_ba *ba;
5937 	struct iwm_tx_ring *ring;
5938 	uint16_t seq, ssn;
5939 	int qid;
5940 
5941 	if (ic->ic_state != IEEE80211_S_RUN)
5942 		return;
5943 
5944 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5945 		return;
5946 
5947 	if (ban->sta_id != IWM_STATION_ID ||
5948 	    !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr))
5949 		return;
5950 
5951 	qid = le16toh(ban->scd_flow);
5952 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5953 		return;
5954 
5955 	/* Protect against a firmware bug where the queue/TID are off. */
5956 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5957 		return;
5958 
5959 	sc->sc_tx_timer[qid] = 0;
5960 
5961 	ba = &ni->ni_tx_ba[ban->tid];
5962 	if (ba->ba_state != IEEE80211_BA_AGREED)
5963 		return;
5964 
5965 	ring = &sc->txq[qid];
5966 
5967 	/*
5968 	 * The first bit in ban->bitmap corresponds to the sequence number
5969 	 * stored in the sequence control field ban->seq_ctl.
5970 	 * Multiple BA notifications in a row may be using this number, with
5971 	 * additional bits being set in cba->bitmap. It is unclear how the
5972 	 * firmware decides to shift this window forward.
5973 	 * We rely on ba->ba_winstart instead.
5974 	 */
5975 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
5976 
5977 	/*
5978 	 * The firmware's new BA window starting sequence number
5979 	 * corresponds to the first hole in ban->scd_ssn, implying
5980 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
5981 	 * have been acked.
5982 	 */
5983 	ssn = le16toh(ban->scd_ssn);
5984 
5985 	if (SEQ_LT(ssn, ba->ba_winstart))
5986 		return;
5987 
5988 	/* Skip rate control if our Tx rate is fixed. */
5989 	if (ic->ic_fixed_mcs == -1)
5990 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
5991 		    ba->ba_winstart, ssn);
5992 
5993 	/*
5994 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5995 	 * in firmware's BA window. Firmware is not going to retransmit any
5996 	 * frames before its BA window so mark them all as done.
5997 	 */
5998 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
5999 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
6000 	iwm_clear_oactive(sc, ring);
6001 }
6002 
6003 void
6004 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6005     struct iwm_rx_data *data)
6006 {
6007 	struct ieee80211com *ic = &sc->sc_ic;
6008 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
6009 	uint32_t missed;
6010 
6011 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
6012 	    (ic->ic_state != IEEE80211_S_RUN))
6013 		return;
6014 
6015 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
6016 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
6017 
6018 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
6019 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
6020 		if (ic->ic_if.if_flags & IFF_DEBUG)
6021 			printf("%s: receiving no beacons from %s; checking if "
6022 			    "this AP is still responding to probe requests\n",
6023 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
6024 		/*
6025 		 * Rather than go directly to scan state, try to send a
6026 		 * directed probe request first. If that fails then the
6027 		 * state machine will drop us into scanning after timing
6028 		 * out waiting for a probe response.
6029 		 */
6030 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
6031 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
6032 	}
6033 
6034 }
6035 
6036 int
6037 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6038 {
6039 	struct iwm_binding_cmd cmd;
6040 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
6041 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
6042 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
6043 	uint32_t status;
6044 	size_t len;
6045 
6046 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6047 		panic("binding already added");
6048 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6049 		panic("binding already removed");
6050 
6051 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
6052 		return EINVAL;
6053 
6054 	memset(&cmd, 0, sizeof(cmd));
6055 
6056 	cmd.id_and_color
6057 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6058 	cmd.action = htole32(action);
6059 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6060 
6061 	cmd.macs[0] = htole32(mac_id);
6062 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
6063 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
6064 
6065 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
6066 	    !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
6067 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
6068 	else
6069 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
6070 
6071 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
6072 		len = sizeof(cmd);
6073 	else
6074 		len = sizeof(struct iwm_binding_cmd_v1);
6075 	status = 0;
6076 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
6077 	    &status);
6078 	if (err == 0 && status != 0)
6079 		err = EIO;
6080 
6081 	return err;
6082 }
6083 
6084 void
6085 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6086     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
6087 {
6088 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
6089 
6090 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6091 	    ctxt->color));
6092 	cmd->action = htole32(action);
6093 	cmd->apply_time = htole32(apply_time);
6094 }
6095 
6096 void
6097 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6098     struct ieee80211_channel *chan, uint8_t chains_static,
6099     uint8_t chains_dynamic, uint8_t sco, uint8_t vht_chan_width)
6100 {
6101 	struct ieee80211com *ic = &sc->sc_ic;
6102 	uint8_t active_cnt, idle_cnt;
6103 
6104 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6105 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6106 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
6107 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6108 		cmd->ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6109 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6110 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6111 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6112 			/* secondary chan above -> control chan below */
6113 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6114 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6115 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6116 			/* secondary chan below -> control chan above */
6117 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6118 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6119 		} else {
6120 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6121 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6122 		}
6123 	} else {
6124 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6125 		cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6126 	}
6127 
6128 	/* Set rx the chains */
6129 	idle_cnt = chains_static;
6130 	active_cnt = chains_dynamic;
6131 
6132 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6133 					IWM_PHY_RX_CHAIN_VALID_POS);
6134 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6135 	cmd->rxchain_info |= htole32(active_cnt <<
6136 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6137 
6138 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6139 }
6140 
6141 uint8_t
6142 iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
6143 {
6144 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
6145 	int primary_idx = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
6146 	/*
6147 	 * The FW is expected to check the control channel position only
6148 	 * when in HT/VHT and the channel width is not 20MHz. Return
6149 	 * this value as the default one:
6150 	 */
6151 	uint8_t pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6152 
6153 	switch (primary_idx - center_idx) {
6154 	case -6:
6155 		pos = IWM_PHY_VHT_CTRL_POS_2_BELOW;
6156 		break;
6157 	case -2:
6158 		pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6159 		break;
6160 	case 2:
6161 		pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6162 		break;
6163 	case 6:
6164 		pos = IWM_PHY_VHT_CTRL_POS_2_ABOVE;
6165 		break;
6166 	default:
6167 		break;
6168 	}
6169 
6170 	return pos;
6171 }
6172 
6173 int
6174 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6175     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6176     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6177 {
6178 	struct ieee80211com *ic = &sc->sc_ic;
6179 	struct iwm_phy_context_cmd_uhb cmd;
6180 	uint8_t active_cnt, idle_cnt;
6181 	struct ieee80211_channel *chan = ctxt->channel;
6182 
6183 	memset(&cmd, 0, sizeof(cmd));
6184 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6185 	    ctxt->color));
6186 	cmd.action = htole32(action);
6187 	cmd.apply_time = htole32(apply_time);
6188 
6189 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6190 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6191 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
6192 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6193 		cmd.ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6194 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6195 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6196 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6197 			/* secondary chan above -> control chan below */
6198 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6199 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6200 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6201 			/* secondary chan below -> control chan above */
6202 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6203 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6204 		} else {
6205 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6206 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6207 		}
6208 	} else {
6209 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6210 		cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6211 	}
6212 
6213 	idle_cnt = chains_static;
6214 	active_cnt = chains_dynamic;
6215 	cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6216 					IWM_PHY_RX_CHAIN_VALID_POS);
6217 	cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6218 	cmd.rxchain_info |= htole32(active_cnt <<
6219 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6220 	cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6221 
6222 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6223 }
6224 
6225 int
6226 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6227     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6228     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6229 {
6230 	struct iwm_phy_context_cmd cmd;
6231 
6232 	/*
6233 	 * Intel increased the size of the fw_channel_info struct and neglected
6234 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6235 	 * member in the middle.
6236 	 * To keep things simple we use a separate function to handle the larger
6237 	 * variant of the phy context command.
6238 	 */
6239 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6240 		return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6241 		    chains_dynamic, action, apply_time, sco, vht_chan_width);
6242 
6243 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6244 
6245 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6246 	    chains_static, chains_dynamic, sco, vht_chan_width);
6247 
6248 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6249 	    sizeof(struct iwm_phy_context_cmd), &cmd);
6250 }
6251 
6252 int
6253 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6254 {
6255 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6256 	struct iwm_tfd *desc;
6257 	struct iwm_tx_data *txdata;
6258 	struct iwm_device_cmd *cmd;
6259 	struct mbuf *m;
6260 	bus_addr_t paddr;
6261 	uint32_t addr_lo;
6262 	int err = 0, i, paylen, off, s;
6263 	int idx, code, async, group_id;
6264 	size_t hdrlen, datasz;
6265 	uint8_t *data;
6266 	int generation = sc->sc_generation;
6267 
6268 	code = hcmd->id;
6269 	async = hcmd->flags & IWM_CMD_ASYNC;
6270 	idx = ring->cur;
6271 
6272 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
6273 		paylen += hcmd->len[i];
6274 	}
6275 
6276 	/* If this command waits for a response, allocate response buffer. */
6277 	hcmd->resp_pkt = NULL;
6278 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
6279 		uint8_t *resp_buf;
6280 		KASSERT(!async);
6281 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
6282 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
6283 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
6284 			return ENOSPC;
6285 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
6286 		    M_NOWAIT | M_ZERO);
6287 		if (resp_buf == NULL)
6288 			return ENOMEM;
6289 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
6290 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6291 	} else {
6292 		sc->sc_cmd_resp_pkt[idx] = NULL;
6293 	}
6294 
6295 	s = splnet();
6296 
6297 	desc = &ring->desc[idx];
6298 	txdata = &ring->data[idx];
6299 
6300 	group_id = iwm_cmd_groupid(code);
6301 	if (group_id != 0) {
6302 		hdrlen = sizeof(cmd->hdr_wide);
6303 		datasz = sizeof(cmd->data_wide);
6304 	} else {
6305 		hdrlen = sizeof(cmd->hdr);
6306 		datasz = sizeof(cmd->data);
6307 	}
6308 
6309 	if (paylen > datasz) {
6310 		/* Command is too large to fit in pre-allocated space. */
6311 		size_t totlen = hdrlen + paylen;
6312 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
6313 			printf("%s: firmware command too long (%zd bytes)\n",
6314 			    DEVNAME(sc), totlen);
6315 			err = EINVAL;
6316 			goto out;
6317 		}
6318 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
6319 		if (m == NULL) {
6320 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6321 			    DEVNAME(sc), totlen);
6322 			err = ENOMEM;
6323 			goto out;
6324 		}
6325 		cmd = mtod(m, struct iwm_device_cmd *);
6326 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6327 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6328 		if (err) {
6329 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6330 			    DEVNAME(sc), totlen);
6331 			m_freem(m);
6332 			goto out;
6333 		}
6334 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6335 		paddr = txdata->map->dm_segs[0].ds_addr;
6336 	} else {
6337 		cmd = &ring->cmd[idx];
6338 		paddr = txdata->cmd_paddr;
6339 	}
6340 
6341 	if (group_id != 0) {
6342 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6343 		cmd->hdr_wide.group_id = group_id;
6344 		cmd->hdr_wide.qid = ring->qid;
6345 		cmd->hdr_wide.idx = idx;
6346 		cmd->hdr_wide.length = htole16(paylen);
6347 		cmd->hdr_wide.version = iwm_cmd_version(code);
6348 		data = cmd->data_wide;
6349 	} else {
6350 		cmd->hdr.code = code;
6351 		cmd->hdr.flags = 0;
6352 		cmd->hdr.qid = ring->qid;
6353 		cmd->hdr.idx = idx;
6354 		data = cmd->data;
6355 	}
6356 
6357 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
6358 		if (hcmd->len[i] == 0)
6359 			continue;
6360 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
6361 		off += hcmd->len[i];
6362 	}
6363 	KASSERT(off == paylen);
6364 
6365 	/* lo field is not aligned */
6366 	addr_lo = htole32((uint32_t)paddr);
6367 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
6368 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
6369 	    | ((hdrlen + paylen) << 4));
6370 	desc->num_tbs = 1;
6371 
6372 	if (paylen > datasz) {
6373 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6374 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6375 	} else {
6376 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6377 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6378 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6379 	}
6380 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6381 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6382 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6383 
6384 	/*
6385 	 * Wake up the NIC to make sure that the firmware will see the host
6386 	 * command - we will let the NIC sleep once all the host commands
6387 	 * returned. This needs to be done only on 7000 family NICs.
6388 	 */
6389 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6390 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6391 			err = EBUSY;
6392 			goto out;
6393 		}
6394 	}
6395 
6396 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6397 
6398 	/* Kick command ring. */
6399 	ring->queued++;
6400 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6401 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6402 
6403 	if (!async) {
6404 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
6405 		if (err == 0) {
6406 			/* if hardware is no longer up, return error */
6407 			if (generation != sc->sc_generation) {
6408 				err = ENXIO;
6409 				goto out;
6410 			}
6411 
6412 			/* Response buffer will be freed in iwm_free_resp(). */
6413 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6414 			sc->sc_cmd_resp_pkt[idx] = NULL;
6415 		} else if (generation == sc->sc_generation) {
6416 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6417 			    sc->sc_cmd_resp_len[idx]);
6418 			sc->sc_cmd_resp_pkt[idx] = NULL;
6419 		}
6420 	}
6421  out:
6422 	splx(s);
6423 
6424 	return err;
6425 }
6426 
6427 int
6428 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6429     uint16_t len, const void *data)
6430 {
6431 	struct iwm_host_cmd cmd = {
6432 		.id = id,
6433 		.len = { len, },
6434 		.data = { data, },
6435 		.flags = flags,
6436 	};
6437 
6438 	return iwm_send_cmd(sc, &cmd);
6439 }
6440 
6441 int
6442 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6443     uint32_t *status)
6444 {
6445 	struct iwm_rx_packet *pkt;
6446 	struct iwm_cmd_response *resp;
6447 	int err, resp_len;
6448 
6449 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
6450 	cmd->flags |= IWM_CMD_WANT_RESP;
6451 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6452 
6453 	err = iwm_send_cmd(sc, cmd);
6454 	if (err)
6455 		return err;
6456 
6457 	pkt = cmd->resp_pkt;
6458 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
6459 		return EIO;
6460 
6461 	resp_len = iwm_rx_packet_payload_len(pkt);
6462 	if (resp_len != sizeof(*resp)) {
6463 		iwm_free_resp(sc, cmd);
6464 		return EIO;
6465 	}
6466 
6467 	resp = (void *)pkt->data;
6468 	*status = le32toh(resp->status);
6469 	iwm_free_resp(sc, cmd);
6470 	return err;
6471 }
6472 
6473 int
6474 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6475     const void *data, uint32_t *status)
6476 {
6477 	struct iwm_host_cmd cmd = {
6478 		.id = id,
6479 		.len = { len, },
6480 		.data = { data, },
6481 	};
6482 
6483 	return iwm_send_cmd_status(sc, &cmd, status);
6484 }
6485 
6486 void
6487 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6488 {
6489 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
6490 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6491 	hcmd->resp_pkt = NULL;
6492 }
6493 
6494 void
6495 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6496 {
6497 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6498 	struct iwm_tx_data *data;
6499 
6500 	if (qid != sc->cmdqid) {
6501 		return;	/* Not a command ack. */
6502 	}
6503 
6504 	data = &ring->data[idx];
6505 
6506 	if (data->m != NULL) {
6507 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6508 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6509 		bus_dmamap_unload(sc->sc_dmat, data->map);
6510 		m_freem(data->m);
6511 		data->m = NULL;
6512 	}
6513 	wakeup(&ring->desc[idx]);
6514 
6515 	if (ring->queued == 0) {
6516 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6517 		    DEVNAME(sc), code));
6518 	} else if (--ring->queued == 0) {
6519 		/*
6520 		 * 7000 family NICs are locked while commands are in progress.
6521 		 * All commands are now done so we may unlock the NIC again.
6522 		 */
6523 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6524 			iwm_nic_unlock(sc);
6525 	}
6526 }
6527 
6528 void
6529 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6530     uint16_t len)
6531 {
6532 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6533 	uint16_t val;
6534 
6535 	scd_bc_tbl = sc->sched_dma.vaddr;
6536 
6537 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6538 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6539 		len = roundup(len, 4) / 4;
6540 
6541 	val = htole16(sta_id << 12 | len);
6542 
6543 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6544 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6545 
6546 	/* Update TX scheduler. */
6547 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6548 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6549 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6550 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6551 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6552 }
6553 
6554 void
6555 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6556 {
6557 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6558 	uint16_t val;
6559 
6560 	scd_bc_tbl = sc->sched_dma.vaddr;
6561 
6562 	val = htole16(1 | (sta_id << 12));
6563 
6564 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6565 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6566 
6567 	/* Update TX scheduler. */
6568 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6569 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6570 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6571 
6572 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6573 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6574 }
6575 
6576 /*
6577  * Fill in various bit for management frames, and leave them
6578  * unfilled for data frames (firmware takes care of that).
6579  * Return the selected legacy TX rate, or zero if HT/VHT is used.
6580  */
6581 uint8_t
6582 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6583     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6584 {
6585 	struct ieee80211com *ic = &sc->sc_ic;
6586 	struct ieee80211_node *ni = &in->in_ni;
6587 	const struct iwm_rate *rinfo;
6588 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6589 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6590 	int ridx, rate_flags;
6591 	uint8_t rate = 0;
6592 
6593 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6594 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6595 
6596 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6597 	    type != IEEE80211_FC0_TYPE_DATA) {
6598 		/* for non-data, use the lowest supported rate */
6599 		ridx = min_ridx;
6600 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6601 	} else if (ic->ic_fixed_mcs != -1) {
6602 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6603 			ridx = IWM_FIRST_OFDM_RATE;
6604 		else
6605 			ridx = sc->sc_fixed_ridx;
6606 	} else if (ic->ic_fixed_rate != -1) {
6607 		ridx = sc->sc_fixed_ridx;
6608  	} else {
6609 		int i;
6610 		/* Use firmware rateset retry table. */
6611 		tx->initial_rate_index = 0;
6612 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6613 		if (ni->ni_flags & IEEE80211_NODE_HT) /* VHT implies HT */
6614 			return 0;
6615 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6616 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6617 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6618 			if (iwm_rates[i].rate == (ni->ni_txrate &
6619 			    IEEE80211_RATE_VAL)) {
6620 				ridx = i;
6621 				break;
6622 			}
6623 		}
6624 		return iwm_rates[ridx].rate & 0xff;
6625 	}
6626 
6627 	rinfo = &iwm_rates[ridx];
6628 	if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 &&
6629 	    iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6630 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6631 	else if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
6632 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
6633 	else
6634 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
6635 	if (IWM_RIDX_IS_CCK(ridx))
6636 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6637 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6638 	    type == IEEE80211_FC0_TYPE_DATA &&
6639 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6640 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
6641 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
6642 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
6643 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
6644 		    ieee80211_node_supports_vht_chan80(ni))
6645 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
6646 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
6647 		    ieee80211_node_supports_ht_chan40(ni))
6648 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6649 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6650 			rate_flags |= IWM_RATE_MCS_VHT_MSK;
6651 		else
6652 			rate_flags |= IWM_RATE_MCS_HT_MSK;
6653 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
6654 		    in->in_phyctxt != NULL &&
6655 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
6656 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_80;
6657 			if (ieee80211_node_supports_vht_sgi80(ni))
6658 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6659 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
6660 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
6661 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
6662 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40;
6663 			if (ieee80211_node_supports_ht_sgi40(ni))
6664 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6665 		} else if (ieee80211_node_supports_ht_sgi20(ni))
6666 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6667 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6668 			/*
6669 			 * ifmedia only provides an MCS index, no NSS.
6670 			 * Use a fixed SISO rate.
6671 			 */
6672 			tx->rate_n_flags = htole32(rate_flags |
6673 			    (ic->ic_fixed_mcs &
6674 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK));
6675 		} else
6676 			tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6677 	} else
6678 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6679 
6680 	return rate;
6681 }
6682 
6683 #define TB0_SIZE 16
6684 int
6685 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6686 {
6687 	struct ieee80211com *ic = &sc->sc_ic;
6688 	struct iwm_node *in = (void *)ni;
6689 	struct iwm_tx_ring *ring;
6690 	struct iwm_tx_data *data;
6691 	struct iwm_tfd *desc;
6692 	struct iwm_device_cmd *cmd;
6693 	struct iwm_tx_cmd *tx;
6694 	struct ieee80211_frame *wh;
6695 	struct ieee80211_key *k = NULL;
6696 	uint8_t rate;
6697 	uint8_t *ivp;
6698 	uint32_t flags;
6699 	u_int hdrlen;
6700 	bus_dma_segment_t *seg;
6701 	uint8_t tid, type, subtype;
6702 	int i, totlen, err, pad;
6703 	int qid, hasqos;
6704 
6705 	wh = mtod(m, struct ieee80211_frame *);
6706 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6707 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6708 	if (type == IEEE80211_FC0_TYPE_CTL)
6709 		hdrlen = sizeof(struct ieee80211_frame_min);
6710 	else
6711 		hdrlen = ieee80211_get_hdrlen(wh);
6712 
6713 	hasqos = ieee80211_has_qos(wh);
6714 	if (type == IEEE80211_FC0_TYPE_DATA)
6715 		tid = IWM_TID_NON_QOS;
6716 	else
6717 		tid = IWM_MAX_TID_COUNT;
6718 
6719 	/*
6720 	 * Map EDCA categories to Tx data queues.
6721 	 *
6722 	 * We use static data queue assignments even in DQA mode. We do not
6723 	 * need to share Tx queues between stations because we only implement
6724 	 * client mode; the firmware's station table contains only one entry
6725 	 * which represents our access point.
6726 	 */
6727 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6728 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6729 	else
6730 		qid = ac;
6731 
6732 	/* If possible, put this frame on an aggregation queue. */
6733 	if (hasqos) {
6734 		struct ieee80211_tx_ba *ba;
6735 		uint16_t qos = ieee80211_get_qos(wh);
6736 		int qostid = qos & IEEE80211_QOS_TID;
6737 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6738 
6739 		ba = &ni->ni_tx_ba[qostid];
6740 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6741 		    type == IEEE80211_FC0_TYPE_DATA &&
6742 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6743 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6744 		    ba->ba_state == IEEE80211_BA_AGREED) {
6745 			qid = agg_qid;
6746 			tid = qostid;
6747 			ac = ieee80211_up_to_ac(ic, qostid);
6748 		}
6749 	}
6750 
6751 	ring = &sc->txq[qid];
6752 	desc = &ring->desc[ring->cur];
6753 	memset(desc, 0, sizeof(*desc));
6754 	data = &ring->data[ring->cur];
6755 
6756 	cmd = &ring->cmd[ring->cur];
6757 	cmd->hdr.code = IWM_TX_CMD;
6758 	cmd->hdr.flags = 0;
6759 	cmd->hdr.qid = ring->qid;
6760 	cmd->hdr.idx = ring->cur;
6761 
6762 	tx = (void *)cmd->data;
6763 	memset(tx, 0, sizeof(*tx));
6764 
6765 	rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6766 
6767 #if NBPFILTER > 0
6768 	if (sc->sc_drvbpf != NULL) {
6769 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6770 		uint16_t chan_flags;
6771 
6772 		tap->wt_flags = 0;
6773 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6774 		chan_flags = ni->ni_chan->ic_flags;
6775 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6776 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6777 			chan_flags &= ~IEEE80211_CHAN_HT;
6778 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6779 		}
6780 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6781 			chan_flags &= ~IEEE80211_CHAN_VHT;
6782 		tap->wt_chan_flags = htole16(chan_flags);
6783 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6784 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6785 		    type == IEEE80211_FC0_TYPE_DATA) {
6786 			tap->wt_rate = (0x80 | ni->ni_txmcs);
6787 		} else
6788 			tap->wt_rate = rate;
6789 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6790 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6791 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6792 
6793 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6794 		    m, BPF_DIRECTION_OUT);
6795 	}
6796 #endif
6797 	totlen = m->m_pkthdr.len;
6798 
6799 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6800 		k = ieee80211_get_txkey(ic, wh, ni);
6801 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6802 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6803 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6804 				return ENOBUFS;
6805 			/* 802.11 header may have moved. */
6806 			wh = mtod(m, struct ieee80211_frame *);
6807 			totlen = m->m_pkthdr.len;
6808 			k = NULL; /* skip hardware crypto below */
6809 		} else {
6810 			/* HW appends CCMP MIC */
6811 			totlen += IEEE80211_CCMP_HDRLEN;
6812 		}
6813 	}
6814 
6815 	flags = 0;
6816 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6817 		flags |= IWM_TX_CMD_FLG_ACK;
6818 	}
6819 
6820 	if (type == IEEE80211_FC0_TYPE_DATA &&
6821 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6822 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6823 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6824 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6825 
6826 	tx->sta_id = IWM_STATION_ID;
6827 
6828 	if (type == IEEE80211_FC0_TYPE_MGT) {
6829 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6830 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6831 			tx->pm_frame_timeout = htole16(3);
6832 		else
6833 			tx->pm_frame_timeout = htole16(2);
6834 	} else {
6835 		if (type == IEEE80211_FC0_TYPE_CTL &&
6836 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6837 			struct ieee80211_frame_min *mwh;
6838 			uint8_t *barfrm;
6839 			uint16_t ctl;
6840 			mwh = mtod(m, struct ieee80211_frame_min *);
6841 			barfrm = (uint8_t *)&mwh[1];
6842 			ctl = LE_READ_2(barfrm);
6843 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6844 			    IEEE80211_BA_TID_INFO_SHIFT;
6845 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6846 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6847 		}
6848 
6849 		tx->pm_frame_timeout = htole16(0);
6850 	}
6851 
6852 	if (hdrlen & 3) {
6853 		/* First segment length must be a multiple of 4. */
6854 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6855 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6856 		pad = 4 - (hdrlen & 3);
6857 	} else
6858 		pad = 0;
6859 
6860 	tx->len = htole16(totlen);
6861 	tx->tid_tspec = tid;
6862 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6863 
6864 	/* Set physical address of "scratch area". */
6865 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6866 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6867 
6868 	/* Copy 802.11 header in TX command. */
6869 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6870 
6871 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6872 		/* Trim 802.11 header and prepend CCMP IV. */
6873 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6874 		ivp = mtod(m, u_int8_t *);
6875 		k->k_tsc++;	/* increment the 48-bit PN */
6876 		ivp[0] = k->k_tsc; /* PN0 */
6877 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6878 		ivp[2] = 0;        /* Rsvd */
6879 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6880 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6881 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6882 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6883 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6884 
6885 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6886 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6887 		/* TX scheduler includes CCMP MIC length. */
6888 		totlen += IEEE80211_CCMP_MICLEN;
6889 	} else {
6890 		/* Trim 802.11 header. */
6891 		m_adj(m, hdrlen);
6892 		tx->sec_ctl = 0;
6893 	}
6894 
6895 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6896 	if (!hasqos)
6897 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6898 
6899 	tx->tx_flags |= htole32(flags);
6900 
6901 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6902 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6903 	if (err && err != EFBIG) {
6904 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6905 		m_freem(m);
6906 		return err;
6907 	}
6908 	if (err) {
6909 		/* Too many DMA segments, linearize mbuf. */
6910 		if (m_defrag(m, M_DONTWAIT)) {
6911 			m_freem(m);
6912 			return ENOBUFS;
6913 		}
6914 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6915 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6916 		if (err) {
6917 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6918 			    err);
6919 			m_freem(m);
6920 			return err;
6921 		}
6922 	}
6923 	data->m = m;
6924 	data->in = in;
6925 	data->txmcs = ni->ni_txmcs;
6926 	data->txrate = ni->ni_txrate;
6927 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6928 	data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
6929 
6930 	/* Fill TX descriptor. */
6931 	desc->num_tbs = 2 + data->map->dm_nsegs;
6932 
6933 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6934 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6935 	    (TB0_SIZE << 4));
6936 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6937 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6938 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6939 	      + hdrlen + pad - TB0_SIZE) << 4));
6940 
6941 	/* Other DMA segments are for data payload. */
6942 	seg = data->map->dm_segs;
6943 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6944 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6945 		desc->tbs[i+2].hi_n_len = \
6946 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6947 		    | ((seg->ds_len) << 4));
6948 	}
6949 
6950 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6951 	    BUS_DMASYNC_PREWRITE);
6952 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6953 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6954 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6955 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6956 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6957 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6958 
6959 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6960 
6961 	/* Kick TX ring. */
6962 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6963 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6964 
6965 	/* Mark TX ring as full if we reach a certain threshold. */
6966 	if (++ring->queued > IWM_TX_RING_HIMARK) {
6967 		sc->qfullmsk |= 1 << ring->qid;
6968 	}
6969 
6970 	if (ic->ic_if.if_flags & IFF_UP)
6971 		sc->sc_tx_timer[ring->qid] = 15;
6972 
6973 	return 0;
6974 }
6975 
6976 int
6977 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
6978 {
6979 	struct iwm_tx_path_flush_cmd flush_cmd = {
6980 		.sta_id = htole32(IWM_STATION_ID),
6981 		.tid_mask = htole16(0xffff),
6982 	};
6983 	int err;
6984 
6985 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
6986 	    sizeof(flush_cmd), &flush_cmd);
6987 	if (err)
6988                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
6989 	return err;
6990 }
6991 
6992 #define IWM_FLUSH_WAIT_MS	2000
6993 
6994 int
6995 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
6996 {
6997 	int i, err;
6998 
6999 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
7000 		struct iwm_tx_ring *ring = &sc->txq[i];
7001 
7002 		if (i == sc->cmdqid)
7003 			continue;
7004 
7005 		while (ring->queued > 0) {
7006 			err = tsleep_nsec(ring, 0, "iwmflush",
7007 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
7008 			if (err)
7009 				return err;
7010 		}
7011 	}
7012 
7013 	return 0;
7014 }
7015 
7016 void
7017 iwm_led_enable(struct iwm_softc *sc)
7018 {
7019 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
7020 }
7021 
7022 void
7023 iwm_led_disable(struct iwm_softc *sc)
7024 {
7025 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
7026 }
7027 
7028 int
7029 iwm_led_is_enabled(struct iwm_softc *sc)
7030 {
7031 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
7032 }
7033 
7034 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
7035 
7036 void
7037 iwm_led_blink_timeout(void *arg)
7038 {
7039 	struct iwm_softc *sc = arg;
7040 
7041 	if (iwm_led_is_enabled(sc))
7042 		iwm_led_disable(sc);
7043 	else
7044 		iwm_led_enable(sc);
7045 
7046 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7047 }
7048 
7049 void
7050 iwm_led_blink_start(struct iwm_softc *sc)
7051 {
7052 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7053 	iwm_led_enable(sc);
7054 }
7055 
7056 void
7057 iwm_led_blink_stop(struct iwm_softc *sc)
7058 {
7059 	timeout_del(&sc->sc_led_blink_to);
7060 	iwm_led_disable(sc);
7061 }
7062 
7063 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
7064 
7065 int
7066 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7067     struct iwm_beacon_filter_cmd *cmd)
7068 {
7069 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
7070 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
7071 }
7072 
7073 void
7074 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7075     struct iwm_beacon_filter_cmd *cmd)
7076 {
7077 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
7078 }
7079 
7080 int
7081 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7082 {
7083 	struct iwm_beacon_filter_cmd cmd = {
7084 		IWM_BF_CMD_CONFIG_DEFAULTS,
7085 		.bf_enable_beacon_filter = htole32(1),
7086 		.ba_enable_beacon_abort = htole32(enable),
7087 	};
7088 
7089 	if (!sc->sc_bf.bf_enabled)
7090 		return 0;
7091 
7092 	sc->sc_bf.ba_enabled = enable;
7093 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7094 	return iwm_beacon_filter_send_cmd(sc, &cmd);
7095 }
7096 
7097 void
7098 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7099     struct iwm_mac_power_cmd *cmd)
7100 {
7101 	struct ieee80211com *ic = &sc->sc_ic;
7102 	struct ieee80211_node *ni = &in->in_ni;
7103 	int dtim_period, dtim_msec, keep_alive;
7104 
7105 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7106 	    in->in_color));
7107 	if (ni->ni_dtimperiod)
7108 		dtim_period = ni->ni_dtimperiod;
7109 	else
7110 		dtim_period = 1;
7111 
7112 	/*
7113 	 * Regardless of power management state the driver must set
7114 	 * keep alive period. FW will use it for sending keep alive NDPs
7115 	 * immediately after association. Check that keep alive period
7116 	 * is at least 3 * DTIM.
7117 	 */
7118 	dtim_msec = dtim_period * ni->ni_intval;
7119 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
7120 	keep_alive = roundup(keep_alive, 1000) / 1000;
7121 	cmd->keep_alive_seconds = htole16(keep_alive);
7122 
7123 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7124 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7125 }
7126 
7127 int
7128 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7129 {
7130 	int err;
7131 	int ba_enable;
7132 	struct iwm_mac_power_cmd cmd;
7133 
7134 	memset(&cmd, 0, sizeof(cmd));
7135 
7136 	iwm_power_build_cmd(sc, in, &cmd);
7137 
7138 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
7139 	    sizeof(cmd), &cmd);
7140 	if (err != 0)
7141 		return err;
7142 
7143 	ba_enable = !!(cmd.flags &
7144 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
7145 	return iwm_update_beacon_abort(sc, in, ba_enable);
7146 }
7147 
7148 int
7149 iwm_power_update_device(struct iwm_softc *sc)
7150 {
7151 	struct iwm_device_power_cmd cmd = { };
7152 	struct ieee80211com *ic = &sc->sc_ic;
7153 
7154 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7155 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7156 
7157 	return iwm_send_cmd_pdu(sc,
7158 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
7159 }
7160 
7161 int
7162 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7163 {
7164 	struct iwm_beacon_filter_cmd cmd = {
7165 		IWM_BF_CMD_CONFIG_DEFAULTS,
7166 		.bf_enable_beacon_filter = htole32(1),
7167 	};
7168 	int err;
7169 
7170 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7171 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7172 
7173 	if (err == 0)
7174 		sc->sc_bf.bf_enabled = 1;
7175 
7176 	return err;
7177 }
7178 
7179 int
7180 iwm_disable_beacon_filter(struct iwm_softc *sc)
7181 {
7182 	struct iwm_beacon_filter_cmd cmd;
7183 	int err;
7184 
7185 	memset(&cmd, 0, sizeof(cmd));
7186 
7187 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7188 	if (err == 0)
7189 		sc->sc_bf.bf_enabled = 0;
7190 
7191 	return err;
7192 }
7193 
7194 int
7195 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7196 {
7197 	struct iwm_add_sta_cmd add_sta_cmd;
7198 	int err;
7199 	uint32_t status, aggsize;
7200 	const uint32_t max_aggsize = (IWM_STA_FLG_MAX_AGG_SIZE_64K >>
7201 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT);
7202 	size_t cmdsize;
7203 	struct ieee80211com *ic = &sc->sc_ic;
7204 
7205 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
7206 		panic("STA already added");
7207 
7208 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
7209 
7210 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7211 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7212 	else
7213 		add_sta_cmd.sta_id = IWM_STATION_ID;
7214 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
7215 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7216 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
7217 		else
7218 			add_sta_cmd.station_type = IWM_STA_LINK;
7219 	}
7220 	add_sta_cmd.mac_id_n_color
7221 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
7222 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7223 		int qid;
7224 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
7225 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7226 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7227 		else
7228 			qid = IWM_AUX_QUEUE;
7229 		in->tfd_queue_msk |= (1 << qid);
7230 	} else {
7231 		int ac;
7232 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7233 			int qid = ac;
7234 			if (isset(sc->sc_enabled_capa,
7235 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7236 				qid += IWM_DQA_MIN_MGMT_QUEUE;
7237 			in->tfd_queue_msk |= (1 << qid);
7238 		}
7239 	}
7240 	if (!update) {
7241 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7242 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7243 			    etherbroadcastaddr);
7244 		else
7245 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7246 			    in->in_macaddr);
7247 	}
7248 	add_sta_cmd.add_modify = update ? 1 : 0;
7249 	add_sta_cmd.station_flags_msk
7250 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
7251 	if (update) {
7252 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
7253 		    IWM_STA_MODIFY_TID_DISABLE_TX);
7254 	}
7255 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
7256 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
7257 
7258 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7259 		add_sta_cmd.station_flags_msk
7260 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
7261 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
7262 
7263 		if (iwm_mimo_enabled(sc)) {
7264 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7265 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
7266 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
7267 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
7268 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
7269 					add_sta_cmd.station_flags |=
7270 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7271 				}
7272 			} else {
7273 				if (in->in_ni.ni_rxmcs[1] != 0) {
7274 					add_sta_cmd.station_flags |=
7275 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7276 				}
7277 				if (in->in_ni.ni_rxmcs[2] != 0) {
7278 					add_sta_cmd.station_flags |=
7279 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
7280 				}
7281 			}
7282 		}
7283 
7284 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7285 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7286 			add_sta_cmd.station_flags |= htole32(
7287 			    IWM_STA_FLG_FAT_EN_40MHZ);
7288 		}
7289 
7290 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7291 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7292 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
7293 				add_sta_cmd.station_flags |= htole32(
7294 				    IWM_STA_FLG_FAT_EN_80MHZ);
7295 			}
7296 			aggsize = (in->in_ni.ni_vhtcaps &
7297 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
7298 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
7299 		} else {
7300 			aggsize = (in->in_ni.ni_ampdu_param &
7301 			    IEEE80211_AMPDU_PARAM_LE);
7302 		}
7303 		if (aggsize > max_aggsize)
7304 			aggsize = max_aggsize;
7305 		add_sta_cmd.station_flags |= htole32((aggsize <<
7306 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT) &
7307 		    IWM_STA_FLG_MAX_AGG_SIZE_MSK);
7308 
7309 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
7310 		case IEEE80211_AMPDU_PARAM_SS_2:
7311 			add_sta_cmd.station_flags
7312 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
7313 			break;
7314 		case IEEE80211_AMPDU_PARAM_SS_4:
7315 			add_sta_cmd.station_flags
7316 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
7317 			break;
7318 		case IEEE80211_AMPDU_PARAM_SS_8:
7319 			add_sta_cmd.station_flags
7320 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
7321 			break;
7322 		case IEEE80211_AMPDU_PARAM_SS_16:
7323 			add_sta_cmd.station_flags
7324 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
7325 			break;
7326 		default:
7327 			break;
7328 		}
7329 	}
7330 
7331 	status = IWM_ADD_STA_SUCCESS;
7332 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7333 		cmdsize = sizeof(add_sta_cmd);
7334 	else
7335 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7336 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7337 	    &add_sta_cmd, &status);
7338 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7339 		err = EIO;
7340 
7341 	return err;
7342 }
7343 
7344 int
7345 iwm_add_aux_sta(struct iwm_softc *sc)
7346 {
7347 	struct iwm_add_sta_cmd cmd;
7348 	int err, qid;
7349 	uint32_t status;
7350 	size_t cmdsize;
7351 
7352 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7353 		qid = IWM_DQA_AUX_QUEUE;
7354 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7355 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
7356 	} else {
7357 		qid = IWM_AUX_QUEUE;
7358 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7359 	}
7360 	if (err)
7361 		return err;
7362 
7363 	memset(&cmd, 0, sizeof(cmd));
7364 	cmd.sta_id = IWM_AUX_STA_ID;
7365 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7366 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
7367 	cmd.mac_id_n_color =
7368 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
7369 	cmd.tfd_queue_msk = htole32(1 << qid);
7370 	cmd.tid_disable_tx = htole16(0xffff);
7371 
7372 	status = IWM_ADD_STA_SUCCESS;
7373 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7374 		cmdsize = sizeof(cmd);
7375 	else
7376 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7377 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7378 	    &status);
7379 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7380 		err = EIO;
7381 
7382 	return err;
7383 }
7384 
7385 int
7386 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7387 {
7388 	struct iwm_add_sta_cmd cmd;
7389 	int err;
7390 	uint32_t status;
7391 	size_t cmdsize;
7392 
7393 	memset(&cmd, 0, sizeof(cmd));
7394 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7395 	    in->in_color));
7396 	cmd.sta_id = IWM_STATION_ID;
7397 	cmd.add_modify = IWM_STA_MODE_MODIFY;
7398 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
7399 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
7400 
7401 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7402 		cmdsize = sizeof(cmd);
7403 	else
7404 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7405 
7406 	status = IWM_ADD_STA_SUCCESS;
7407 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7408 	    cmdsize, &cmd, &status);
7409 	if (err) {
7410 		printf("%s: could not update sta (error %d)\n",
7411 		    DEVNAME(sc), err);
7412 		return err;
7413 	}
7414 
7415 	switch (status & IWM_ADD_STA_STATUS_MASK) {
7416 	case IWM_ADD_STA_SUCCESS:
7417 		break;
7418 	default:
7419 		err = EIO;
7420 		printf("%s: Couldn't %s draining for station\n",
7421 		    DEVNAME(sc), drain ? "enable" : "disable");
7422 		break;
7423 	}
7424 
7425 	return err;
7426 }
7427 
7428 int
7429 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7430 {
7431 	int err;
7432 
7433 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
7434 
7435 	err = iwm_drain_sta(sc, in, 1);
7436 	if (err)
7437 		goto done;
7438 
7439 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7440 	if (err) {
7441 		printf("%s: could not flush Tx path (error %d)\n",
7442 		    DEVNAME(sc), err);
7443 		goto done;
7444 	}
7445 
7446 	/*
7447 	 * Flushing Tx rings may fail if the AP has disappeared.
7448 	 * We can rely on iwm_newstate_task() to reset everything and begin
7449 	 * scanning again if we are left with outstanding frames on queues.
7450 	 */
7451 	err = iwm_wait_tx_queues_empty(sc);
7452 	if (err)
7453 		goto done;
7454 
7455 	err = iwm_drain_sta(sc, in, 0);
7456 done:
7457 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7458 	return err;
7459 }
7460 
7461 int
7462 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7463 {
7464 	struct ieee80211com *ic = &sc->sc_ic;
7465 	struct iwm_rm_sta_cmd rm_sta_cmd;
7466 	int err;
7467 
7468 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7469 		panic("sta already removed");
7470 
7471 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7472 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7473 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7474 	else
7475 		rm_sta_cmd.sta_id = IWM_STATION_ID;
7476 
7477 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7478 	    &rm_sta_cmd);
7479 
7480 	return err;
7481 }
7482 
7483 uint16_t
7484 iwm_scan_rx_chain(struct iwm_softc *sc)
7485 {
7486 	uint16_t rx_chain;
7487 	uint8_t rx_ant;
7488 
7489 	rx_ant = iwm_fw_valid_rx_ant(sc);
7490 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
7491 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
7492 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
7493 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
7494 	return htole16(rx_chain);
7495 }
7496 
7497 uint32_t
7498 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7499 {
7500 	uint32_t tx_ant;
7501 	int i, ind;
7502 
7503 	for (i = 0, ind = sc->sc_scan_last_antenna;
7504 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
7505 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
7506 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7507 			sc->sc_scan_last_antenna = ind;
7508 			break;
7509 		}
7510 	}
7511 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7512 
7513 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
7514 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
7515 				   tx_ant);
7516 	else
7517 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
7518 }
7519 
7520 uint8_t
7521 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7522     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7523 {
7524 	struct ieee80211com *ic = &sc->sc_ic;
7525 	struct ieee80211_channel *c;
7526 	uint8_t nchan;
7527 
7528 	for (nchan = 0, c = &ic->ic_channels[1];
7529 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7530 	    nchan < sc->sc_capa_n_scan_channels;
7531 	    c++) {
7532 		if (c->ic_flags == 0)
7533 			continue;
7534 
7535 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
7536 		chan->iter_count = htole16(1);
7537 		chan->iter_interval = 0;
7538 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
7539 		if (n_ssids != 0 && !bgscan)
7540 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
7541 		chan++;
7542 		nchan++;
7543 	}
7544 
7545 	return nchan;
7546 }
7547 
7548 uint8_t
7549 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7550     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7551 {
7552 	struct ieee80211com *ic = &sc->sc_ic;
7553 	struct ieee80211_channel *c;
7554 	uint8_t nchan;
7555 
7556 	for (nchan = 0, c = &ic->ic_channels[1];
7557 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7558 	    nchan < sc->sc_capa_n_scan_channels;
7559 	    c++) {
7560 		if (c->ic_flags == 0)
7561 			continue;
7562 
7563 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7564 		chan->iter_count = 1;
7565 		chan->iter_interval = htole16(0);
7566 		if (n_ssids != 0 && !bgscan)
7567 			chan->flags = htole32(1 << 0); /* select SSID 0 */
7568 		chan++;
7569 		nchan++;
7570 	}
7571 
7572 	return nchan;
7573 }
7574 
7575 int
7576 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7577 {
7578 	struct iwm_scan_probe_req preq2;
7579 	int err, i;
7580 
7581 	err = iwm_fill_probe_req(sc, &preq2);
7582 	if (err)
7583 		return err;
7584 
7585 	preq1->mac_header = preq2.mac_header;
7586 	for (i = 0; i < nitems(preq1->band_data); i++)
7587 		preq1->band_data[i] = preq2.band_data[i];
7588 	preq1->common_data = preq2.common_data;
7589 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7590 	return 0;
7591 }
7592 
7593 int
7594 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7595 {
7596 	struct ieee80211com *ic = &sc->sc_ic;
7597 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7598 	struct ieee80211_rateset *rs;
7599 	size_t remain = sizeof(preq->buf);
7600 	uint8_t *frm, *pos;
7601 
7602 	memset(preq, 0, sizeof(*preq));
7603 
7604 	if (remain < sizeof(*wh) + 2)
7605 		return ENOBUFS;
7606 
7607 	/*
7608 	 * Build a probe request frame.  Most of the following code is a
7609 	 * copy & paste of what is done in net80211.
7610 	 */
7611 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7612 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7613 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7614 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7615 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7616 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7617 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7618 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7619 
7620 	frm = (uint8_t *)(wh + 1);
7621 
7622 	*frm++ = IEEE80211_ELEMID_SSID;
7623 	*frm++ = 0;
7624 	/* hardware inserts SSID */
7625 
7626 	/* Tell firmware where the MAC header and SSID IE are. */
7627 	preq->mac_header.offset = 0;
7628 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7629 	remain -= frm - (uint8_t *)wh;
7630 
7631 	/* Fill in 2GHz IEs and tell firmware where they are. */
7632 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7633 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7634 		if (remain < 4 + rs->rs_nrates)
7635 			return ENOBUFS;
7636 	} else if (remain < 2 + rs->rs_nrates)
7637 		return ENOBUFS;
7638 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7639 	pos = frm;
7640 	frm = ieee80211_add_rates(frm, rs);
7641 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7642 		frm = ieee80211_add_xrates(frm, rs);
7643 	remain -= frm - pos;
7644 
7645 	if (isset(sc->sc_enabled_capa,
7646 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7647 		if (remain < 3)
7648 			return ENOBUFS;
7649 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7650 		*frm++ = 1;
7651 		*frm++ = 0;
7652 		remain -= 3;
7653 	}
7654 	preq->band_data[0].len = htole16(frm - pos);
7655 
7656 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7657 		/* Fill in 5GHz IEs. */
7658 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7659 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7660 			if (remain < 4 + rs->rs_nrates)
7661 				return ENOBUFS;
7662 		} else if (remain < 2 + rs->rs_nrates)
7663 			return ENOBUFS;
7664 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7665 		pos = frm;
7666 		frm = ieee80211_add_rates(frm, rs);
7667 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7668 			frm = ieee80211_add_xrates(frm, rs);
7669 		preq->band_data[1].len = htole16(frm - pos);
7670 		remain -= frm - pos;
7671 		if (ic->ic_flags & IEEE80211_F_VHTON) {
7672 			if (remain < 14)
7673 				return ENOBUFS;
7674 			frm = ieee80211_add_vhtcaps(frm, ic);
7675 			remain -= frm - pos;
7676 		}
7677 	}
7678 
7679 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7680 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7681 	pos = frm;
7682 	if (ic->ic_flags & IEEE80211_F_HTON) {
7683 		if (remain < 28)
7684 			return ENOBUFS;
7685 		frm = ieee80211_add_htcaps(frm, ic);
7686 		/* XXX add WME info? */
7687 		remain -= frm - pos;
7688 	}
7689 
7690 	preq->common_data.len = htole16(frm - pos);
7691 
7692 	return 0;
7693 }
7694 
7695 int
7696 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7697 {
7698 	struct ieee80211com *ic = &sc->sc_ic;
7699 	struct iwm_host_cmd hcmd = {
7700 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7701 		.len = { 0, },
7702 		.data = { NULL, },
7703 		.flags = 0,
7704 	};
7705 	struct iwm_scan_req_lmac *req;
7706 	struct iwm_scan_probe_req_v1 *preq;
7707 	size_t req_len;
7708 	int err, async = bgscan;
7709 
7710 	req_len = sizeof(struct iwm_scan_req_lmac) +
7711 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7712 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7713 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7714 		return ENOMEM;
7715 	req = malloc(req_len, M_DEVBUF,
7716 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7717 	if (req == NULL)
7718 		return ENOMEM;
7719 
7720 	hcmd.len[0] = (uint16_t)req_len;
7721 	hcmd.data[0] = (void *)req;
7722 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7723 
7724 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7725 	req->active_dwell = 10;
7726 	req->passive_dwell = 110;
7727 	req->fragmented_dwell = 44;
7728 	req->extended_dwell = 90;
7729 	if (bgscan) {
7730 		req->max_out_time = htole32(120);
7731 		req->suspend_time = htole32(120);
7732 	} else {
7733 		req->max_out_time = htole32(0);
7734 		req->suspend_time = htole32(0);
7735 	}
7736 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7737 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7738 	req->iter_num = htole32(1);
7739 	req->delay = 0;
7740 
7741 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7742 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7743 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7744 	if (ic->ic_des_esslen == 0)
7745 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7746 	else
7747 		req->scan_flags |=
7748 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7749 	if (isset(sc->sc_enabled_capa,
7750 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7751 	    isset(sc->sc_enabled_capa,
7752 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7753 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7754 
7755 	req->flags = htole32(IWM_PHY_BAND_24);
7756 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7757 		req->flags |= htole32(IWM_PHY_BAND_5);
7758 	req->filter_flags =
7759 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7760 
7761 	/* Tx flags 2 GHz. */
7762 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7763 	    IWM_TX_CMD_FLG_BT_DIS);
7764 	req->tx_cmd[0].rate_n_flags =
7765 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7766 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7767 
7768 	/* Tx flags 5 GHz. */
7769 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7770 	    IWM_TX_CMD_FLG_BT_DIS);
7771 	req->tx_cmd[1].rate_n_flags =
7772 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7773 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7774 
7775 	/* Check if we're doing an active directed scan. */
7776 	if (ic->ic_des_esslen != 0) {
7777 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7778 		req->direct_scan[0].len = ic->ic_des_esslen;
7779 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7780 		    ic->ic_des_esslen);
7781 	}
7782 
7783 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7784 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7785 	    ic->ic_des_esslen != 0, bgscan);
7786 
7787 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7788 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7789 	    sc->sc_capa_n_scan_channels));
7790 	err = iwm_fill_probe_req_v1(sc, preq);
7791 	if (err) {
7792 		free(req, M_DEVBUF, req_len);
7793 		return err;
7794 	}
7795 
7796 	/* Specify the scan plan: We'll do one iteration. */
7797 	req->schedule[0].iterations = 1;
7798 	req->schedule[0].full_scan_mul = 1;
7799 
7800 	/* Disable EBS. */
7801 	req->channel_opt[0].non_ebs_ratio = 1;
7802 	req->channel_opt[1].non_ebs_ratio = 1;
7803 
7804 	err = iwm_send_cmd(sc, &hcmd);
7805 	free(req, M_DEVBUF, req_len);
7806 	return err;
7807 }
7808 
7809 int
7810 iwm_config_umac_scan(struct iwm_softc *sc)
7811 {
7812 	struct ieee80211com *ic = &sc->sc_ic;
7813 	struct iwm_scan_config *scan_config;
7814 	int err, nchan;
7815 	size_t cmd_size;
7816 	struct ieee80211_channel *c;
7817 	struct iwm_host_cmd hcmd = {
7818 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7819 		.flags = 0,
7820 	};
7821 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7822 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7823 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7824 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7825 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7826 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7827 	    IWM_SCAN_CONFIG_RATE_54M);
7828 
7829 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7830 
7831 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7832 	if (scan_config == NULL)
7833 		return ENOMEM;
7834 
7835 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7836 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7837 	scan_config->legacy_rates = htole32(rates |
7838 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7839 
7840 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7841 	scan_config->dwell_active = 10;
7842 	scan_config->dwell_passive = 110;
7843 	scan_config->dwell_fragmented = 44;
7844 	scan_config->dwell_extended = 90;
7845 	scan_config->out_of_channel_time = htole32(0);
7846 	scan_config->suspend_time = htole32(0);
7847 
7848 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7849 
7850 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7851 	scan_config->channel_flags = 0;
7852 
7853 	for (c = &ic->ic_channels[1], nchan = 0;
7854 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7855 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7856 		if (c->ic_flags == 0)
7857 			continue;
7858 		scan_config->channel_array[nchan++] =
7859 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7860 	}
7861 
7862 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7863 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7864 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7865 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7866 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7867 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7868 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7869 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7870 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7871 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7872 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7873 
7874 	hcmd.data[0] = scan_config;
7875 	hcmd.len[0] = cmd_size;
7876 
7877 	err = iwm_send_cmd(sc, &hcmd);
7878 	free(scan_config, M_DEVBUF, cmd_size);
7879 	return err;
7880 }
7881 
7882 int
7883 iwm_umac_scan_size(struct iwm_softc *sc)
7884 {
7885 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7886 	int tail_size;
7887 
7888 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7889 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7890 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7891 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7892 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7893 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7894 	else
7895 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7896 
7897 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7898 	    sc->sc_capa_n_scan_channels + tail_size;
7899 }
7900 
7901 struct iwm_scan_umac_chan_param *
7902 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7903     struct iwm_scan_req_umac *req)
7904 {
7905 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7906 		return &req->v8.channel;
7907 
7908 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7909 		return &req->v7.channel;
7910 
7911 	return &req->v1.channel;
7912 }
7913 
7914 void *
7915 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7916 {
7917 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7918 		return (void *)&req->v8.data;
7919 
7920 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7921 		return (void *)&req->v7.data;
7922 
7923 	return (void *)&req->v1.data;
7924 
7925 }
7926 
7927 /* adaptive dwell max budget time [TU] for full scan */
7928 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7929 /* adaptive dwell max budget time [TU] for directed scan */
7930 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7931 /* adaptive dwell default high band APs number */
7932 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7933 /* adaptive dwell default low band APs number */
7934 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7935 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7936 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7937 
7938 int
7939 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7940 {
7941 	struct ieee80211com *ic = &sc->sc_ic;
7942 	struct iwm_host_cmd hcmd = {
7943 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7944 		.len = { 0, },
7945 		.data = { NULL, },
7946 		.flags = 0,
7947 	};
7948 	struct iwm_scan_req_umac *req;
7949 	void *cmd_data, *tail_data;
7950 	struct iwm_scan_req_umac_tail_v2 *tail;
7951 	struct iwm_scan_req_umac_tail_v1 *tailv1;
7952 	struct iwm_scan_umac_chan_param *chanparam;
7953 	size_t req_len;
7954 	int err, async = bgscan;
7955 
7956 	req_len = iwm_umac_scan_size(sc);
7957 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
7958 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7959 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7960 		return ERANGE;
7961 	req = malloc(req_len, M_DEVBUF,
7962 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7963 	if (req == NULL)
7964 		return ENOMEM;
7965 
7966 	hcmd.len[0] = (uint16_t)req_len;
7967 	hcmd.data[0] = (void *)req;
7968 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7969 
7970 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7971 		req->v7.adwell_default_n_aps_social =
7972 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7973 		req->v7.adwell_default_n_aps =
7974 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
7975 
7976 		if (ic->ic_des_esslen != 0)
7977 			req->v7.adwell_max_budget =
7978 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7979 		else
7980 			req->v7.adwell_max_budget =
7981 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7982 
7983 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7984 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
7985 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
7986 
7987 		if (isset(sc->sc_ucode_api,
7988 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
7989 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
7990 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
7991 		} else {
7992 			req->v7.active_dwell = 10;
7993 			req->v7.passive_dwell = 110;
7994 			req->v7.fragmented_dwell = 44;
7995 		}
7996 	} else {
7997 		/* These timings correspond to iwlwifi's UNASSOC scan. */
7998 		req->v1.active_dwell = 10;
7999 		req->v1.passive_dwell = 110;
8000 		req->v1.fragmented_dwell = 44;
8001 		req->v1.extended_dwell = 90;
8002 
8003 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8004 	}
8005 
8006 	if (bgscan) {
8007 		const uint32_t timeout = htole32(120);
8008 		if (isset(sc->sc_ucode_api,
8009 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8010 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8011 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8012 		} else if (isset(sc->sc_ucode_api,
8013 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8014 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8015 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8016 		} else {
8017 			req->v1.max_out_time = timeout;
8018 			req->v1.suspend_time = timeout;
8019 		}
8020 	}
8021 
8022 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8023 
8024 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
8025 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8026 	chanparam->count = iwm_umac_scan_fill_channels(sc,
8027 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
8028 	    ic->ic_des_esslen != 0, bgscan);
8029 	chanparam->flags = 0;
8030 
8031 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
8032 	    sc->sc_capa_n_scan_channels;
8033 	tail = tail_data;
8034 	/* tail v1 layout differs in preq and direct_scan member fields. */
8035 	tailv1 = tail_data;
8036 
8037 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
8038 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
8039 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8040 		req->v8.general_flags2 =
8041 			IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
8042 	}
8043 
8044 	if (ic->ic_des_esslen != 0) {
8045 		if (isset(sc->sc_ucode_api,
8046 		    IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
8047 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8048 			tail->direct_scan[0].len = ic->ic_des_esslen;
8049 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
8050 			    ic->ic_des_esslen);
8051 		} else {
8052 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8053 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
8054 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
8055 			    ic->ic_des_esslen);
8056 		}
8057 		req->general_flags |=
8058 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
8059 	} else
8060 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
8061 
8062 	if (isset(sc->sc_enabled_capa,
8063 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
8064 	    isset(sc->sc_enabled_capa,
8065 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
8066 		req->general_flags |=
8067 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
8068 
8069 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8070 		req->general_flags |=
8071 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
8072 	} else {
8073 		req->general_flags |=
8074 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
8075 	}
8076 
8077 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
8078 		err = iwm_fill_probe_req(sc, &tail->preq);
8079 	else
8080 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8081 	if (err) {
8082 		free(req, M_DEVBUF, req_len);
8083 		return err;
8084 	}
8085 
8086 	/* Specify the scan plan: We'll do one iteration. */
8087 	tail->schedule[0].interval = 0;
8088 	tail->schedule[0].iter_count = 1;
8089 
8090 	err = iwm_send_cmd(sc, &hcmd);
8091 	free(req, M_DEVBUF, req_len);
8092 	return err;
8093 }
8094 
8095 void
8096 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8097 {
8098 	struct ieee80211com *ic = &sc->sc_ic;
8099 	struct ifnet *ifp = IC2IFP(ic);
8100 	char alpha2[3];
8101 
8102 	snprintf(alpha2, sizeof(alpha2), "%c%c",
8103 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
8104 
8105 	if (ifp->if_flags & IFF_DEBUG) {
8106 		printf("%s: firmware has detected regulatory domain '%s' "
8107 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
8108 	}
8109 
8110 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
8111 }
8112 
8113 uint8_t
8114 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
8115 {
8116 	int i;
8117 	uint8_t rval;
8118 
8119 	for (i = 0; i < rs->rs_nrates; i++) {
8120 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
8121 		if (rval == iwm_rates[ridx].rate)
8122 			return rs->rs_rates[i];
8123 	}
8124 
8125 	return 0;
8126 }
8127 
8128 int
8129 iwm_rval2ridx(int rval)
8130 {
8131 	int ridx;
8132 
8133 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
8134 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
8135 			continue;
8136 		if (rval == iwm_rates[ridx].rate)
8137 			break;
8138 	}
8139 
8140        return ridx;
8141 }
8142 
8143 void
8144 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8145     int *ofdm_rates)
8146 {
8147 	struct ieee80211_node *ni = &in->in_ni;
8148 	struct ieee80211_rateset *rs = &ni->ni_rates;
8149 	int lowest_present_ofdm = -1;
8150 	int lowest_present_cck = -1;
8151 	uint8_t cck = 0;
8152 	uint8_t ofdm = 0;
8153 	int i;
8154 
8155 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
8156 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
8157 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
8158 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8159 				continue;
8160 			cck |= (1 << i);
8161 			if (lowest_present_cck == -1 || lowest_present_cck > i)
8162 				lowest_present_cck = i;
8163 		}
8164 	}
8165 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
8166 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8167 			continue;
8168 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
8169 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
8170 			lowest_present_ofdm = i;
8171 	}
8172 
8173 	/*
8174 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
8175 	 * variables. This isn't sufficient though, as there might not
8176 	 * be all the right rates in the bitmap. E.g. if the only basic
8177 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
8178 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
8179 	 *
8180 	 *    [...] a STA responding to a received frame shall transmit
8181 	 *    its Control Response frame [...] at the highest rate in the
8182 	 *    BSSBasicRateSet parameter that is less than or equal to the
8183 	 *    rate of the immediately previous frame in the frame exchange
8184 	 *    sequence ([...]) and that is of the same modulation class
8185 	 *    ([...]) as the received frame. If no rate contained in the
8186 	 *    BSSBasicRateSet parameter meets these conditions, then the
8187 	 *    control frame sent in response to a received frame shall be
8188 	 *    transmitted at the highest mandatory rate of the PHY that is
8189 	 *    less than or equal to the rate of the received frame, and
8190 	 *    that is of the same modulation class as the received frame.
8191 	 *
8192 	 * As a consequence, we need to add all mandatory rates that are
8193 	 * lower than all of the basic rates to these bitmaps.
8194 	 */
8195 
8196 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
8197 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
8198 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
8199 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
8200 	/* 6M already there or needed so always add */
8201 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
8202 
8203 	/*
8204 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
8205 	 * Note, however:
8206 	 *  - if no CCK rates are basic, it must be ERP since there must
8207 	 *    be some basic rates at all, so they're OFDM => ERP PHY
8208 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
8209 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
8210 	 *  - if 5.5M is basic, 1M and 2M are mandatory
8211 	 *  - if 2M is basic, 1M is mandatory
8212 	 *  - if 1M is basic, that's the only valid ACK rate.
8213 	 * As a consequence, it's not as complicated as it sounds, just add
8214 	 * any lower rates to the ACK rate bitmap.
8215 	 */
8216 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
8217 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
8218 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
8219 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
8220 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
8221 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
8222 	/* 1M already there or needed so always add */
8223 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
8224 
8225 	*cck_rates = cck;
8226 	*ofdm_rates = ofdm;
8227 }
8228 
8229 void
8230 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8231     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
8232 {
8233 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
8234 	struct ieee80211com *ic = &sc->sc_ic;
8235 	struct ieee80211_node *ni = ic->ic_bss;
8236 	int cck_ack_rates, ofdm_ack_rates;
8237 	int i;
8238 
8239 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
8240 	    in->in_color));
8241 	cmd->action = htole32(action);
8242 
8243 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8244 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
8245 	else if (ic->ic_opmode == IEEE80211_M_STA)
8246 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
8247 	else
8248 		panic("unsupported operating mode %d", ic->ic_opmode);
8249 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
8250 
8251 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
8252 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8253 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
8254 		return;
8255 	}
8256 
8257 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
8258 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8259 	cmd->cck_rates = htole32(cck_ack_rates);
8260 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
8261 
8262 	cmd->cck_short_preamble
8263 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
8264 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
8265 	cmd->short_slot
8266 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
8267 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
8268 
8269 	for (i = 0; i < EDCA_NUM_AC; i++) {
8270 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8271 		int txf = iwm_ac_to_tx_fifo[i];
8272 
8273 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
8274 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
8275 		cmd->ac[txf].aifsn = ac->ac_aifsn;
8276 		cmd->ac[txf].fifos_mask = (1 << txf);
8277 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
8278 	}
8279 	if (ni->ni_flags & IEEE80211_NODE_QOS)
8280 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
8281 
8282 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8283 		enum ieee80211_htprot htprot =
8284 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
8285 		switch (htprot) {
8286 		case IEEE80211_HTPROT_NONE:
8287 			break;
8288 		case IEEE80211_HTPROT_NONMEMBER:
8289 		case IEEE80211_HTPROT_NONHT_MIXED:
8290 			cmd->protection_flags |=
8291 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8292 			    IWM_MAC_PROT_FLG_FAT_PROT);
8293 			break;
8294 		case IEEE80211_HTPROT_20MHZ:
8295 			if (in->in_phyctxt &&
8296 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8297 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
8298 				cmd->protection_flags |=
8299 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8300 				    IWM_MAC_PROT_FLG_FAT_PROT);
8301 			}
8302 			break;
8303 		default:
8304 			break;
8305 		}
8306 
8307 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
8308 	}
8309 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8310 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
8311 
8312 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
8313 #undef IWM_EXP2
8314 }
8315 
8316 void
8317 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8318     struct iwm_mac_data_sta *sta, int assoc)
8319 {
8320 	struct ieee80211_node *ni = &in->in_ni;
8321 	uint32_t dtim_off;
8322 	uint64_t tsf;
8323 
8324 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
8325 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
8326 	tsf = letoh64(tsf);
8327 
8328 	sta->is_assoc = htole32(assoc);
8329 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
8330 	sta->dtim_tsf = htole64(tsf + dtim_off);
8331 	sta->bi = htole32(ni->ni_intval);
8332 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
8333 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
8334 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
8335 	sta->listen_interval = htole32(10);
8336 	sta->assoc_id = htole32(ni->ni_associd);
8337 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
8338 }
8339 
8340 int
8341 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8342     int assoc)
8343 {
8344 	struct ieee80211com *ic = &sc->sc_ic;
8345 	struct ieee80211_node *ni = &in->in_ni;
8346 	struct iwm_mac_ctx_cmd cmd;
8347 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8348 
8349 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
8350 		panic("MAC already added");
8351 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
8352 		panic("MAC already removed");
8353 
8354 	memset(&cmd, 0, sizeof(cmd));
8355 
8356 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8357 
8358 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8359 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
8360 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
8361 		    IWM_MAC_FILTER_ACCEPT_GRP |
8362 		    IWM_MAC_FILTER_IN_BEACON |
8363 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
8364 		    IWM_MAC_FILTER_IN_CRC32);
8365 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8366 		/*
8367 		 * Allow beacons to pass through as long as we are not
8368 		 * associated or we do not have dtim period information.
8369 		 */
8370 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
8371 	else
8372 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8373 
8374 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8375 }
8376 
8377 int
8378 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8379 {
8380 	struct iwm_time_quota_cmd_v1 cmd;
8381 	int i, idx, num_active_macs, quota, quota_rem;
8382 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
8383 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
8384 	uint16_t id;
8385 
8386 	memset(&cmd, 0, sizeof(cmd));
8387 
8388 	/* currently, PHY ID == binding ID */
8389 	if (in && in->in_phyctxt) {
8390 		id = in->in_phyctxt->id;
8391 		KASSERT(id < IWM_MAX_BINDINGS);
8392 		colors[id] = in->in_phyctxt->color;
8393 		if (running)
8394 			n_ifs[id] = 1;
8395 	}
8396 
8397 	/*
8398 	 * The FW's scheduling session consists of
8399 	 * IWM_MAX_QUOTA fragments. Divide these fragments
8400 	 * equally between all the bindings that require quota
8401 	 */
8402 	num_active_macs = 0;
8403 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8404 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
8405 		num_active_macs += n_ifs[i];
8406 	}
8407 
8408 	quota = 0;
8409 	quota_rem = 0;
8410 	if (num_active_macs) {
8411 		quota = IWM_MAX_QUOTA / num_active_macs;
8412 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
8413 	}
8414 
8415 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8416 		if (colors[i] < 0)
8417 			continue;
8418 
8419 		cmd.quotas[idx].id_and_color =
8420 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
8421 
8422 		if (n_ifs[i] <= 0) {
8423 			cmd.quotas[idx].quota = htole32(0);
8424 			cmd.quotas[idx].max_duration = htole32(0);
8425 		} else {
8426 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8427 			cmd.quotas[idx].max_duration = htole32(0);
8428 		}
8429 		idx++;
8430 	}
8431 
8432 	/* Give the remainder of the session to the first binding */
8433 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
8434 
8435 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8436 		struct iwm_time_quota_cmd cmd_v2;
8437 
8438 		memset(&cmd_v2, 0, sizeof(cmd_v2));
8439 		for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8440 			cmd_v2.quotas[i].id_and_color =
8441 			    cmd.quotas[i].id_and_color;
8442 			cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8443 			cmd_v2.quotas[i].max_duration =
8444 			    cmd.quotas[i].max_duration;
8445 		}
8446 		return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8447 		    sizeof(cmd_v2), &cmd_v2);
8448 	}
8449 
8450 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8451 }
8452 
8453 void
8454 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8455 {
8456 	int s = splnet();
8457 
8458 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8459 		splx(s);
8460 		return;
8461 	}
8462 
8463 	refcnt_take(&sc->task_refs);
8464 	if (!task_add(taskq, task))
8465 		refcnt_rele_wake(&sc->task_refs);
8466 	splx(s);
8467 }
8468 
8469 void
8470 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8471 {
8472 	if (task_del(taskq, task))
8473 		refcnt_rele(&sc->task_refs);
8474 }
8475 
8476 int
8477 iwm_scan(struct iwm_softc *sc)
8478 {
8479 	struct ieee80211com *ic = &sc->sc_ic;
8480 	struct ifnet *ifp = IC2IFP(ic);
8481 	int err;
8482 
8483 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8484 		err = iwm_scan_abort(sc);
8485 		if (err) {
8486 			printf("%s: could not abort background scan\n",
8487 			    DEVNAME(sc));
8488 			return err;
8489 		}
8490 	}
8491 
8492 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8493 		err = iwm_umac_scan(sc, 0);
8494 	else
8495 		err = iwm_lmac_scan(sc, 0);
8496 	if (err) {
8497 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8498 		return err;
8499 	}
8500 
8501 	/*
8502 	 * The current mode might have been fixed during association.
8503 	 * Ensure all channels get scanned.
8504 	 */
8505 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
8506 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8507 
8508 	sc->sc_flags |= IWM_FLAG_SCANNING;
8509 	if (ifp->if_flags & IFF_DEBUG)
8510 		printf("%s: %s -> %s\n", ifp->if_xname,
8511 		    ieee80211_state_name[ic->ic_state],
8512 		    ieee80211_state_name[IEEE80211_S_SCAN]);
8513 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8514 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
8515 		ieee80211_node_cleanup(ic, ic->ic_bss);
8516 	}
8517 	ic->ic_state = IEEE80211_S_SCAN;
8518 	iwm_led_blink_start(sc);
8519 	wakeup(&ic->ic_state); /* wake iwm_init() */
8520 
8521 	return 0;
8522 }
8523 
8524 int
8525 iwm_bgscan(struct ieee80211com *ic)
8526 {
8527 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8528 	int err;
8529 
8530 	if (sc->sc_flags & IWM_FLAG_SCANNING)
8531 		return 0;
8532 
8533 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8534 		err = iwm_umac_scan(sc, 1);
8535 	else
8536 		err = iwm_lmac_scan(sc, 1);
8537 	if (err) {
8538 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8539 		return err;
8540 	}
8541 
8542 	sc->sc_flags |= IWM_FLAG_BGSCAN;
8543 	return 0;
8544 }
8545 
8546 void
8547 iwm_bgscan_done(struct ieee80211com *ic,
8548     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8549 {
8550 	struct iwm_softc *sc = ic->ic_softc;
8551 
8552 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8553 	sc->bgscan_unref_arg = arg;
8554 	sc->bgscan_unref_arg_size = arg_size;
8555 	iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
8556 }
8557 
8558 void
8559 iwm_bgscan_done_task(void *arg)
8560 {
8561 	struct iwm_softc *sc = arg;
8562 	struct ieee80211com *ic = &sc->sc_ic;
8563 	struct iwm_node *in = (void *)ic->ic_bss;
8564 	struct ieee80211_node *ni = &in->in_ni;
8565 	int tid, err = 0, s = splnet();
8566 
8567 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
8568 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
8569 	    ic->ic_state != IEEE80211_S_RUN) {
8570 		err = ENXIO;
8571 		goto done;
8572 	}
8573 
8574 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
8575 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
8576 
8577 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8578 			continue;
8579 
8580 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8581 		if (err)
8582 			goto done;
8583 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8584 		if (err)
8585 			goto done;
8586 		in->tfd_queue_msk &= ~(1 << qid);
8587 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8588 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
8589 		    IEEE80211_ACTION_DELBA,
8590 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
8591 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
8592 #endif
8593 		ieee80211_node_tx_ba_clear(ni, tid);
8594 	}
8595 
8596 	err = iwm_flush_sta(sc, in);
8597 	if (err)
8598 		goto done;
8599 
8600 	/*
8601 	 * Tx queues have been flushed and Tx agg has been stopped.
8602 	 * Allow roaming to proceed.
8603 	 */
8604 	ni->ni_unref_arg = sc->bgscan_unref_arg;
8605 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8606 	sc->bgscan_unref_arg = NULL;
8607 	sc->bgscan_unref_arg_size = 0;
8608 	ieee80211_node_tx_stopped(ic, &in->in_ni);
8609 done:
8610 	if (err) {
8611 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8612 		sc->bgscan_unref_arg = NULL;
8613 		sc->bgscan_unref_arg_size = 0;
8614 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8615 			task_add(systq, &sc->init_task);
8616 	}
8617 	refcnt_rele_wake(&sc->task_refs);
8618 	splx(s);
8619 }
8620 
8621 int
8622 iwm_umac_scan_abort(struct iwm_softc *sc)
8623 {
8624 	struct iwm_umac_scan_abort cmd = { 0 };
8625 
8626 	return iwm_send_cmd_pdu(sc,
8627 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
8628 	    0, sizeof(cmd), &cmd);
8629 }
8630 
8631 int
8632 iwm_lmac_scan_abort(struct iwm_softc *sc)
8633 {
8634 	struct iwm_host_cmd cmd = {
8635 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
8636 	};
8637 	int err, status;
8638 
8639 	err = iwm_send_cmd_status(sc, &cmd, &status);
8640 	if (err)
8641 		return err;
8642 
8643 	if (status != IWM_CAN_ABORT_STATUS) {
8644 		/*
8645 		 * The scan abort will return 1 for success or
8646 		 * 2 for "failure".  A failure condition can be
8647 		 * due to simply not being in an active scan which
8648 		 * can occur if we send the scan abort before the
8649 		 * microcode has notified us that a scan is completed.
8650 		 */
8651 		return EBUSY;
8652 	}
8653 
8654 	return 0;
8655 }
8656 
8657 int
8658 iwm_scan_abort(struct iwm_softc *sc)
8659 {
8660 	int err;
8661 
8662 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8663 		err = iwm_umac_scan_abort(sc);
8664 	else
8665 		err = iwm_lmac_scan_abort(sc);
8666 
8667 	if (err == 0)
8668 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8669 	return err;
8670 }
8671 
8672 int
8673 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8674     struct ieee80211_channel *chan, uint8_t chains_static,
8675     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8676     uint8_t vht_chan_width)
8677 {
8678 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8679 	int err;
8680 
8681 	if (isset(sc->sc_enabled_capa,
8682 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8683 	    (phyctxt->channel->ic_flags & band_flags) !=
8684 	    (chan->ic_flags & band_flags)) {
8685 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8686 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8687 		    vht_chan_width);
8688 		if (err) {
8689 			printf("%s: could not remove PHY context "
8690 			    "(error %d)\n", DEVNAME(sc), err);
8691 			return err;
8692 		}
8693 		phyctxt->channel = chan;
8694 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8695 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time, sco,
8696 		    vht_chan_width);
8697 		if (err) {
8698 			printf("%s: could not add PHY context "
8699 			    "(error %d)\n", DEVNAME(sc), err);
8700 			return err;
8701 		}
8702 	} else {
8703 		phyctxt->channel = chan;
8704 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8705 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8706 		    vht_chan_width);
8707 		if (err) {
8708 			printf("%s: could not update PHY context (error %d)\n",
8709 			    DEVNAME(sc), err);
8710 			return err;
8711 		}
8712 	}
8713 
8714 	phyctxt->sco = sco;
8715 	phyctxt->vht_chan_width = vht_chan_width;
8716 	return 0;
8717 }
8718 
8719 int
8720 iwm_auth(struct iwm_softc *sc)
8721 {
8722 	struct ieee80211com *ic = &sc->sc_ic;
8723 	struct iwm_node *in = (void *)ic->ic_bss;
8724 	uint32_t duration;
8725 	int generation = sc->sc_generation, err;
8726 
8727 	splassert(IPL_NET);
8728 
8729 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8730 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8731 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8732 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8733 		if (err)
8734 			return err;
8735 	} else {
8736 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8737 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8738 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8739 		if (err)
8740 		if (err)
8741 			return err;
8742 	}
8743 	in->in_phyctxt = &sc->sc_phyctxt[0];
8744 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8745 	iwm_setrates(in, 0);
8746 
8747 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8748 	if (err) {
8749 		printf("%s: could not add MAC context (error %d)\n",
8750 		    DEVNAME(sc), err);
8751 		return err;
8752  	}
8753 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8754 
8755 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8756 	if (err) {
8757 		printf("%s: could not add binding (error %d)\n",
8758 		    DEVNAME(sc), err);
8759 		goto rm_mac_ctxt;
8760 	}
8761 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8762 
8763 	in->tid_disable_ampdu = 0xffff;
8764 	err = iwm_add_sta_cmd(sc, in, 0);
8765 	if (err) {
8766 		printf("%s: could not add sta (error %d)\n",
8767 		    DEVNAME(sc), err);
8768 		goto rm_binding;
8769 	}
8770 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8771 
8772 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8773 		return 0;
8774 
8775 	/*
8776 	 * Prevent the FW from wandering off channel during association
8777 	 * by "protecting" the session with a time event.
8778 	 */
8779 	if (in->in_ni.ni_intval)
8780 		duration = in->in_ni.ni_intval * 2;
8781 	else
8782 		duration = IEEE80211_DUR_TU;
8783 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8784 
8785 	return 0;
8786 
8787 rm_binding:
8788 	if (generation == sc->sc_generation) {
8789 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8790 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8791 	}
8792 rm_mac_ctxt:
8793 	if (generation == sc->sc_generation) {
8794 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8795 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8796 	}
8797 	return err;
8798 }
8799 
8800 int
8801 iwm_deauth(struct iwm_softc *sc)
8802 {
8803 	struct ieee80211com *ic = &sc->sc_ic;
8804 	struct iwm_node *in = (void *)ic->ic_bss;
8805 	int err;
8806 
8807 	splassert(IPL_NET);
8808 
8809 	iwm_unprotect_session(sc, in);
8810 
8811 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8812 		err = iwm_flush_sta(sc, in);
8813 		if (err)
8814 			return err;
8815 		err = iwm_rm_sta_cmd(sc, in);
8816 		if (err) {
8817 			printf("%s: could not remove STA (error %d)\n",
8818 			    DEVNAME(sc), err);
8819 			return err;
8820 		}
8821 		in->tid_disable_ampdu = 0xffff;
8822 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8823 		sc->sc_rx_ba_sessions = 0;
8824 		sc->ba_rx.start_tidmask = 0;
8825 		sc->ba_rx.stop_tidmask = 0;
8826 		sc->tx_ba_queue_mask = 0;
8827 		sc->ba_tx.start_tidmask = 0;
8828 		sc->ba_tx.stop_tidmask = 0;
8829 	}
8830 
8831 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8832 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8833 		if (err) {
8834 			printf("%s: could not remove binding (error %d)\n",
8835 			    DEVNAME(sc), err);
8836 			return err;
8837 		}
8838 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8839 	}
8840 
8841 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8842 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8843 		if (err) {
8844 			printf("%s: could not remove MAC context (error %d)\n",
8845 			    DEVNAME(sc), err);
8846 			return err;
8847 		}
8848 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8849 	}
8850 
8851 	/* Move unused PHY context to a default channel. */
8852 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8853 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8854 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8855 	if (err)
8856 		return err;
8857 
8858 	return 0;
8859 }
8860 
8861 int
8862 iwm_run(struct iwm_softc *sc)
8863 {
8864 	struct ieee80211com *ic = &sc->sc_ic;
8865 	struct iwm_node *in = (void *)ic->ic_bss;
8866 	struct ieee80211_node *ni = &in->in_ni;
8867 	int err;
8868 
8869 	splassert(IPL_NET);
8870 
8871 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8872 		/* Add a MAC context and a sniffing STA. */
8873 		err = iwm_auth(sc);
8874 		if (err)
8875 			return err;
8876 	}
8877 
8878 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8879 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8880 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8881 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8882 		    in->in_phyctxt->channel, chains, chains,
8883 		    0, IEEE80211_HTOP0_SCO_SCN,
8884 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8885 		if (err) {
8886 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8887 			return err;
8888 		}
8889 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8890 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8891 		uint8_t sco, vht_chan_width;
8892 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8893 		    ieee80211_node_supports_ht_chan40(ni))
8894 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8895 		else
8896 			sco = IEEE80211_HTOP0_SCO_SCN;
8897 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8898 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8899 		    ieee80211_node_supports_vht_chan80(ni))
8900 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8901 		else
8902 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8903 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8904 		    in->in_phyctxt->channel, chains, chains,
8905 		    0, sco, vht_chan_width);
8906 		if (err) {
8907 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8908 			return err;
8909 		}
8910 	}
8911 
8912 	/* Update STA again to apply HT and VHT settings. */
8913 	err = iwm_add_sta_cmd(sc, in, 1);
8914 	if (err) {
8915 		printf("%s: could not update STA (error %d)\n",
8916 		    DEVNAME(sc), err);
8917 		return err;
8918 	}
8919 
8920 	/* We have now been assigned an associd by the AP. */
8921 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8922 	if (err) {
8923 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8924 		return err;
8925 	}
8926 
8927 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8928 	if (err) {
8929 		printf("%s: could not set sf full on (error %d)\n",
8930 		    DEVNAME(sc), err);
8931 		return err;
8932 	}
8933 
8934 	err = iwm_allow_mcast(sc);
8935 	if (err) {
8936 		printf("%s: could not allow mcast (error %d)\n",
8937 		    DEVNAME(sc), err);
8938 		return err;
8939 	}
8940 
8941 	err = iwm_power_update_device(sc);
8942 	if (err) {
8943 		printf("%s: could not send power command (error %d)\n",
8944 		    DEVNAME(sc), err);
8945 		return err;
8946 	}
8947 #ifdef notyet
8948 	/*
8949 	 * Disabled for now. Default beacon filter settings
8950 	 * prevent net80211 from getting ERP and HT protection
8951 	 * updates from beacons.
8952 	 */
8953 	err = iwm_enable_beacon_filter(sc, in);
8954 	if (err) {
8955 		printf("%s: could not enable beacon filter\n",
8956 		    DEVNAME(sc));
8957 		return err;
8958 	}
8959 #endif
8960 	err = iwm_power_mac_update_mode(sc, in);
8961 	if (err) {
8962 		printf("%s: could not update MAC power (error %d)\n",
8963 		    DEVNAME(sc), err);
8964 		return err;
8965 	}
8966 
8967 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
8968 		err = iwm_update_quotas(sc, in, 1);
8969 		if (err) {
8970 			printf("%s: could not update quotas (error %d)\n",
8971 			    DEVNAME(sc), err);
8972 			return err;
8973 		}
8974 	}
8975 
8976 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
8977 	ieee80211_ra_node_init(&in->in_rn);
8978 	ieee80211_ra_vht_node_init(&in->in_rn_vht);
8979 
8980 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8981 		iwm_led_blink_start(sc);
8982 		return 0;
8983 	}
8984 
8985 	/* Start at lowest available bit-rate, AMRR will raise. */
8986 	in->in_ni.ni_txrate = 0;
8987 	in->in_ni.ni_txmcs = 0;
8988 	in->in_ni.ni_vht_ss = 1;
8989 	iwm_setrates(in, 0);
8990 
8991 	timeout_add_msec(&sc->sc_calib_to, 500);
8992 	iwm_led_enable(sc);
8993 
8994 	return 0;
8995 }
8996 
8997 int
8998 iwm_run_stop(struct iwm_softc *sc)
8999 {
9000 	struct ieee80211com *ic = &sc->sc_ic;
9001 	struct iwm_node *in = (void *)ic->ic_bss;
9002 	struct ieee80211_node *ni = &in->in_ni;
9003 	int err, i, tid;
9004 
9005 	splassert(IPL_NET);
9006 
9007 	/*
9008 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
9009 	 * for this when moving out of RUN state since it runs in a
9010 	 * separate thread.
9011 	 * Note that in->in_ni (struct ieee80211_node) already represents
9012 	 * our new access point in case we are roaming between APs.
9013 	 * This means we cannot rely on struct ieee802111_node to tell
9014 	 * us which BA sessions exist.
9015 	 */
9016 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9017 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9018 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
9019 			continue;
9020 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9021 		if (err)
9022 			return err;
9023 		iwm_clear_reorder_buffer(sc, rxba);
9024 		if (sc->sc_rx_ba_sessions > 0)
9025 			sc->sc_rx_ba_sessions--;
9026 	}
9027 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
9028 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
9029 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9030 			continue;
9031 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9032 		if (err)
9033 			return err;
9034 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
9035 		if (err)
9036 			return err;
9037 		in->tfd_queue_msk &= ~(1 << qid);
9038 	}
9039 	ieee80211_ba_del(ni);
9040 
9041 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9042 		iwm_led_blink_stop(sc);
9043 
9044 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
9045 	if (err)
9046 		return err;
9047 
9048 	iwm_disable_beacon_filter(sc);
9049 
9050 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9051 		err = iwm_update_quotas(sc, in, 0);
9052 		if (err) {
9053 			printf("%s: could not update quotas (error %d)\n",
9054 			    DEVNAME(sc), err);
9055 			return err;
9056 		}
9057 	}
9058 
9059 	/* Mark station as disassociated. */
9060 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
9061 	if (err) {
9062 		printf("%s: failed to update MAC\n", DEVNAME(sc));
9063 		return err;
9064 	}
9065 
9066 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
9067 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
9068 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9069 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
9070 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9071 		if (err) {
9072 			printf("%s: failed to update PHY\n", DEVNAME(sc));
9073 			return err;
9074 		}
9075 	}
9076 
9077 	return 0;
9078 }
9079 
9080 struct ieee80211_node *
9081 iwm_node_alloc(struct ieee80211com *ic)
9082 {
9083 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
9084 }
9085 
9086 int
9087 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9088     struct ieee80211_key *k)
9089 {
9090 	struct iwm_softc *sc = ic->ic_softc;
9091 	struct iwm_add_sta_key_cmd_v1 cmd;
9092 
9093 	memset(&cmd, 0, sizeof(cmd));
9094 
9095 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9096 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9097 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9098 	    IWM_STA_KEY_FLG_KEYID_MSK));
9099 	if (k->k_flags & IEEE80211_KEY_GROUP)
9100 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9101 
9102 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9103 	cmd.common.key_offset = 0;
9104 	cmd.common.sta_id = IWM_STATION_ID;
9105 
9106 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9107 	    sizeof(cmd), &cmd);
9108 }
9109 
9110 int
9111 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9112     struct ieee80211_key *k)
9113 {
9114 	struct iwm_softc *sc = ic->ic_softc;
9115 	struct iwm_add_sta_key_cmd cmd;
9116 
9117 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9118 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
9119 		/* Fallback to software crypto for other ciphers. */
9120 		return (ieee80211_set_key(ic, ni, k));
9121 	}
9122 
9123 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9124 		return iwm_set_key_v1(ic, ni, k);
9125 
9126 	memset(&cmd, 0, sizeof(cmd));
9127 
9128 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9129 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9130 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9131 	    IWM_STA_KEY_FLG_KEYID_MSK));
9132 	if (k->k_flags & IEEE80211_KEY_GROUP)
9133 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9134 
9135 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9136 	cmd.common.key_offset = 0;
9137 	cmd.common.sta_id = IWM_STATION_ID;
9138 
9139 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
9140 
9141 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9142 	    sizeof(cmd), &cmd);
9143 }
9144 
9145 void
9146 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9147     struct ieee80211_key *k)
9148 {
9149 	struct iwm_softc *sc = ic->ic_softc;
9150 	struct iwm_add_sta_key_cmd_v1 cmd;
9151 
9152 	memset(&cmd, 0, sizeof(cmd));
9153 
9154 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9155 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9156 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9157 	    IWM_STA_KEY_FLG_KEYID_MSK));
9158 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9159 	cmd.common.key_offset = 0;
9160 	cmd.common.sta_id = IWM_STATION_ID;
9161 
9162 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9163 }
9164 
9165 void
9166 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9167     struct ieee80211_key *k)
9168 {
9169 	struct iwm_softc *sc = ic->ic_softc;
9170 	struct iwm_add_sta_key_cmd cmd;
9171 
9172 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9173 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
9174 		/* Fallback to software crypto for other ciphers. */
9175                 ieee80211_delete_key(ic, ni, k);
9176 		return;
9177 	}
9178 
9179 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9180 		return iwm_delete_key_v1(ic, ni, k);
9181 
9182 	memset(&cmd, 0, sizeof(cmd));
9183 
9184 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9185 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9186 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9187 	    IWM_STA_KEY_FLG_KEYID_MSK));
9188 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9189 	cmd.common.key_offset = 0;
9190 	cmd.common.sta_id = IWM_STATION_ID;
9191 
9192 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9193 }
9194 
9195 void
9196 iwm_calib_timeout(void *arg)
9197 {
9198 	struct iwm_softc *sc = arg;
9199 	struct ieee80211com *ic = &sc->sc_ic;
9200 	struct iwm_node *in = (void *)ic->ic_bss;
9201 	struct ieee80211_node *ni = &in->in_ni;
9202 	int s;
9203 
9204 	s = splnet();
9205 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
9206 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
9207 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
9208 		int old_txrate = ni->ni_txrate;
9209 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9210 		/*
9211 		 * If AMRR has chosen a new TX rate we must update
9212 		 * the firwmare's LQ rate table.
9213 		 * ni_txrate may change again before the task runs so
9214 		 * cache the chosen rate in the iwm_node structure.
9215 		 */
9216 		if (ni->ni_txrate != old_txrate)
9217 			iwm_setrates(in, 1);
9218 	}
9219 
9220 	splx(s);
9221 
9222 	timeout_add_msec(&sc->sc_calib_to, 500);
9223 }
9224 
9225 void
9226 iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9227 {
9228 	struct ieee80211_node *ni = &in->in_ni;
9229 	struct ieee80211com *ic = ni->ni_ic;
9230 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9231 	int ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9232 	int i, tab, txmcs;
9233 
9234 	/*
9235 	 * Fill the LQ rate selection table with VHT rates in descending
9236 	 * order, i.e. with the node's current TX rate first. Keep reducing
9237 	 * channel width during later Tx attempts, and eventually fall back
9238 	 * to legacy OFDM. Do not mix SISO and MIMO rates.
9239 	 */
9240 	lqcmd->mimo_delim = 0;
9241 	txmcs = ni->ni_txmcs;
9242 	for (i = 0; i < nitems(lqcmd->rs_table); i++) {
9243 		if (txmcs >= 0) {
9244 			tab = IWM_RATE_MCS_VHT_MSK;
9245 			tab |= txmcs & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9246 			tab |= ((ni->ni_vht_ss - 1) <<
9247 			    IWM_RATE_VHT_MCS_NSS_POS) &
9248 			    IWM_RATE_VHT_MCS_NSS_MSK;
9249 			if (ni->ni_vht_ss > 1)
9250 				tab |= IWM_RATE_MCS_ANT_AB_MSK;
9251 			else {
9252 				if (sc->sc_device_family ==
9253 				    IWM_DEVICE_FAMILY_9000)
9254 					tab |= IWM_RATE_MCS_ANT_B_MSK;
9255 				else
9256 					tab |= IWM_RATE_MCS_ANT_A_MSK;
9257 			}
9258 
9259 			/*
9260 			 * First two Tx attempts may use 80MHz/40MHz/SGI.
9261 			 * Next two Tx attempts may use 40MHz/SGI.
9262 			 * Beyond that use 20 MHz and decrease the rate.
9263 			 * As a special case, MCS 9 is invalid on 20 Mhz.
9264 			 */
9265 			if (txmcs == 9) {
9266 				if (i < 2 && in->in_phyctxt->vht_chan_width >=
9267 				    IEEE80211_VHTOP0_CHAN_WIDTH_80)
9268 					tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9269 				else
9270 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9271 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9272 				if (i < 4) {
9273 					if (ieee80211_ra_vht_use_sgi(ni))
9274 						tab |= IWM_RATE_MCS_SGI_MSK;
9275 				} else
9276 					txmcs--;
9277 			} else if (i < 2 && in->in_phyctxt->vht_chan_width >=
9278 			    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
9279 				tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9280 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9281 				if (ieee80211_ra_vht_use_sgi(ni))
9282 					tab |= IWM_RATE_MCS_SGI_MSK;
9283 			} else if (i < 4 &&
9284 			    in->in_phyctxt->vht_chan_width >=
9285 			    IEEE80211_VHTOP0_CHAN_WIDTH_HT &&
9286 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
9287 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
9288 				tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9289 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9290 				if (ieee80211_ra_vht_use_sgi(ni))
9291 					tab |= IWM_RATE_MCS_SGI_MSK;
9292 			} else if (txmcs >= 0)
9293 				txmcs--;
9294 		} else {
9295 			/* Fill the rest with the lowest possible rate. */
9296 			tab = iwm_rates[ridx_min].plcp;
9297 			if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
9298 				tab |= IWM_RATE_MCS_ANT_B_MSK;
9299 			else
9300 				tab |= IWM_RATE_MCS_ANT_A_MSK;
9301 			if (ni->ni_vht_ss > 1 && lqcmd->mimo_delim == 0)
9302 				lqcmd->mimo_delim = i;
9303 		}
9304 
9305 		lqcmd->rs_table[i] = htole32(tab);
9306 	}
9307 }
9308 
9309 void
9310 iwm_set_rate_table(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9311 {
9312 	struct ieee80211_node *ni = &in->in_ni;
9313 	struct ieee80211com *ic = ni->ni_ic;
9314 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9315 	struct ieee80211_rateset *rs = &ni->ni_rates;
9316 	int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
9317 
9318 	/*
9319 	 * Fill the LQ rate selection table with legacy and/or HT rates
9320 	 * in descending order, i.e. with the node's current TX rate first.
9321 	 * In cases where throughput of an HT rate corresponds to a legacy
9322 	 * rate it makes no sense to add both. We rely on the fact that
9323 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
9324 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
9325 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
9326 	 */
9327 	j = 0;
9328 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9329 	mimo = iwm_is_mimo_ht_mcs(ni->ni_txmcs);
9330 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
9331 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
9332 		uint8_t plcp = iwm_rates[ridx].plcp;
9333 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
9334 
9335 		if (j >= nitems(lqcmd->rs_table))
9336 			break;
9337 		tab = 0;
9338 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9339 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
9340 				continue;
9341 	 		/* Do not mix SISO and MIMO HT rates. */
9342 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
9343 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9344 				continue;
9345 			for (i = ni->ni_txmcs; i >= 0; i--) {
9346 				if (isclr(ni->ni_rxmcs, i))
9347 					continue;
9348 				if (ridx != iwm_ht_mcs2ridx[i])
9349 					continue;
9350 				tab = ht_plcp;
9351 				tab |= IWM_RATE_MCS_HT_MSK;
9352 				/* First two Tx attempts may use 40MHz/SGI. */
9353 				if (j > 1)
9354 					break;
9355 				if (in->in_phyctxt->sco ==
9356 				    IEEE80211_HTOP0_SCO_SCA ||
9357 				    in->in_phyctxt->sco ==
9358 				    IEEE80211_HTOP0_SCO_SCB) {
9359 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9360 					tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9361 				}
9362 				if (ieee80211_ra_use_ht_sgi(ni))
9363 					tab |= IWM_RATE_MCS_SGI_MSK;
9364 				break;
9365 			}
9366 		} else if (plcp != IWM_RATE_INVM_PLCP) {
9367 			for (i = ni->ni_txrate; i >= 0; i--) {
9368 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9369 				    IEEE80211_RATE_VAL)) {
9370 					tab = plcp;
9371 					break;
9372 				}
9373 			}
9374 		}
9375 
9376 		if (tab == 0)
9377 			continue;
9378 
9379 		if (iwm_is_mimo_ht_plcp(ht_plcp))
9380 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
9381 		else if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
9382 			tab |= IWM_RATE_MCS_ANT_B_MSK;
9383 		else
9384 			tab |= IWM_RATE_MCS_ANT_A_MSK;
9385 
9386 		if (IWM_RIDX_IS_CCK(ridx))
9387 			tab |= IWM_RATE_MCS_CCK_MSK;
9388 		lqcmd->rs_table[j++] = htole32(tab);
9389 	}
9390 
9391 	lqcmd->mimo_delim = (mimo ? j : 0);
9392 
9393 	/* Fill the rest with the lowest possible rate */
9394 	while (j < nitems(lqcmd->rs_table)) {
9395 		tab = iwm_rates[ridx_min].plcp;
9396 		if (IWM_RIDX_IS_CCK(ridx_min))
9397 			tab |= IWM_RATE_MCS_CCK_MSK;
9398 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
9399 			tab |= IWM_RATE_MCS_ANT_B_MSK;
9400 		else
9401 			tab |= IWM_RATE_MCS_ANT_A_MSK;
9402 		lqcmd->rs_table[j++] = htole32(tab);
9403 	}
9404 }
9405 
9406 void
9407 iwm_setrates(struct iwm_node *in, int async)
9408 {
9409 	struct ieee80211_node *ni = &in->in_ni;
9410 	struct ieee80211com *ic = ni->ni_ic;
9411 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9412 	struct iwm_lq_cmd lqcmd;
9413 	struct iwm_host_cmd cmd = {
9414 		.id = IWM_LQ_CMD,
9415 		.len = { sizeof(lqcmd), },
9416 	};
9417 
9418 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
9419 
9420 	memset(&lqcmd, 0, sizeof(lqcmd));
9421 	lqcmd.sta_id = IWM_STATION_ID;
9422 
9423 	if (ic->ic_flags & IEEE80211_F_USEPROT)
9424 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
9425 
9426 	if (ni->ni_flags & IEEE80211_NODE_VHT)
9427 		iwm_set_rate_table_vht(in, &lqcmd);
9428 	else
9429 		iwm_set_rate_table(in, &lqcmd);
9430 
9431 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000)
9432 		lqcmd.single_stream_ant_msk = IWM_ANT_B;
9433 	else
9434 		lqcmd.single_stream_ant_msk = IWM_ANT_A;
9435 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
9436 
9437 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
9438 	lqcmd.agg_disable_start_th = 3;
9439 	lqcmd.agg_frame_cnt_limit = 0x3f;
9440 
9441 	cmd.data[0] = &lqcmd;
9442 	iwm_send_cmd(sc, &cmd);
9443 }
9444 
9445 int
9446 iwm_media_change(struct ifnet *ifp)
9447 {
9448 	struct iwm_softc *sc = ifp->if_softc;
9449 	struct ieee80211com *ic = &sc->sc_ic;
9450 	uint8_t rate, ridx;
9451 	int err;
9452 
9453 	err = ieee80211_media_change(ifp);
9454 	if (err != ENETRESET)
9455 		return err;
9456 
9457 	if (ic->ic_fixed_mcs != -1)
9458 		sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9459 	else if (ic->ic_fixed_rate != -1) {
9460 		rate = ic->ic_sup_rates[ic->ic_curmode].
9461 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
9462 		/* Map 802.11 rate to HW rate index. */
9463 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
9464 			if (iwm_rates[ridx].rate == rate)
9465 				break;
9466 		sc->sc_fixed_ridx = ridx;
9467 	}
9468 
9469 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9470 	    (IFF_UP | IFF_RUNNING)) {
9471 		iwm_stop(ifp);
9472 		err = iwm_init(ifp);
9473 	}
9474 	return err;
9475 }
9476 
9477 void
9478 iwm_newstate_task(void *psc)
9479 {
9480 	struct iwm_softc *sc = (struct iwm_softc *)psc;
9481 	struct ieee80211com *ic = &sc->sc_ic;
9482 	enum ieee80211_state nstate = sc->ns_nstate;
9483 	enum ieee80211_state ostate = ic->ic_state;
9484 	int arg = sc->ns_arg;
9485 	int err = 0, s = splnet();
9486 
9487 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9488 		/* iwm_stop() is waiting for us. */
9489 		refcnt_rele_wake(&sc->task_refs);
9490 		splx(s);
9491 		return;
9492 	}
9493 
9494 	if (ostate == IEEE80211_S_SCAN) {
9495 		if (nstate == ostate) {
9496 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
9497 				refcnt_rele_wake(&sc->task_refs);
9498 				splx(s);
9499 				return;
9500 			}
9501 			/* Firmware is no longer scanning. Do another scan. */
9502 			goto next_scan;
9503 		} else
9504 			iwm_led_blink_stop(sc);
9505 	}
9506 
9507 	if (nstate <= ostate) {
9508 		switch (ostate) {
9509 		case IEEE80211_S_RUN:
9510 			err = iwm_run_stop(sc);
9511 			if (err)
9512 				goto out;
9513 			/* FALLTHROUGH */
9514 		case IEEE80211_S_ASSOC:
9515 		case IEEE80211_S_AUTH:
9516 			if (nstate <= IEEE80211_S_AUTH) {
9517 				err = iwm_deauth(sc);
9518 				if (err)
9519 					goto out;
9520 			}
9521 			/* FALLTHROUGH */
9522 		case IEEE80211_S_SCAN:
9523 		case IEEE80211_S_INIT:
9524 			break;
9525 		}
9526 
9527 		/* Die now if iwm_stop() was called while we were sleeping. */
9528 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9529 			refcnt_rele_wake(&sc->task_refs);
9530 			splx(s);
9531 			return;
9532 		}
9533 	}
9534 
9535 	switch (nstate) {
9536 	case IEEE80211_S_INIT:
9537 		break;
9538 
9539 	case IEEE80211_S_SCAN:
9540 next_scan:
9541 		err = iwm_scan(sc);
9542 		if (err)
9543 			break;
9544 		refcnt_rele_wake(&sc->task_refs);
9545 		splx(s);
9546 		return;
9547 
9548 	case IEEE80211_S_AUTH:
9549 		err = iwm_auth(sc);
9550 		break;
9551 
9552 	case IEEE80211_S_ASSOC:
9553 		break;
9554 
9555 	case IEEE80211_S_RUN:
9556 		err = iwm_run(sc);
9557 		break;
9558 	}
9559 
9560 out:
9561 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9562 		if (err)
9563 			task_add(systq, &sc->init_task);
9564 		else
9565 			sc->sc_newstate(ic, nstate, arg);
9566 	}
9567 	refcnt_rele_wake(&sc->task_refs);
9568 	splx(s);
9569 }
9570 
9571 int
9572 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9573 {
9574 	struct ifnet *ifp = IC2IFP(ic);
9575 	struct iwm_softc *sc = ifp->if_softc;
9576 
9577 	/*
9578 	 * Prevent attempts to transition towards the same state, unless
9579 	 * we are scanning in which case a SCAN -> SCAN transition
9580 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9581 	 * to support band-steering.
9582 	 */
9583 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9584 	    nstate != IEEE80211_S_AUTH)
9585 		return 0;
9586 
9587 	if (ic->ic_state == IEEE80211_S_RUN) {
9588 		timeout_del(&sc->sc_calib_to);
9589 		iwm_del_task(sc, systq, &sc->ba_task);
9590 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9591 		iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9592 		iwm_del_task(sc, systq, &sc->bgscan_done_task);
9593 	}
9594 
9595 	sc->ns_nstate = nstate;
9596 	sc->ns_arg = arg;
9597 
9598 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9599 
9600 	return 0;
9601 }
9602 
9603 void
9604 iwm_endscan(struct iwm_softc *sc)
9605 {
9606 	struct ieee80211com *ic = &sc->sc_ic;
9607 
9608 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9609 		return;
9610 
9611 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9612 	ieee80211_end_scan(&ic->ic_if);
9613 }
9614 
9615 /*
9616  * Aging and idle timeouts for the different possible scenarios
9617  * in default configuration
9618  */
9619 static const uint32_t
9620 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9621 	{
9622 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9623 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9624 	},
9625 	{
9626 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
9627 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9628 	},
9629 	{
9630 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
9631 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
9632 	},
9633 	{
9634 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
9635 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
9636 	},
9637 	{
9638 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
9639 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
9640 	},
9641 };
9642 
9643 /*
9644  * Aging and idle timeouts for the different possible scenarios
9645  * in single BSS MAC configuration.
9646  */
9647 static const uint32_t
9648 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9649 	{
9650 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
9651 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
9652 	},
9653 	{
9654 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
9655 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
9656 	},
9657 	{
9658 		htole32(IWM_SF_MCAST_AGING_TIMER),
9659 		htole32(IWM_SF_MCAST_IDLE_TIMER)
9660 	},
9661 	{
9662 		htole32(IWM_SF_BA_AGING_TIMER),
9663 		htole32(IWM_SF_BA_IDLE_TIMER)
9664 	},
9665 	{
9666 		htole32(IWM_SF_TX_RE_AGING_TIMER),
9667 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
9668 	},
9669 };
9670 
9671 void
9672 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9673     struct ieee80211_node *ni)
9674 {
9675 	int i, j, watermark;
9676 
9677 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
9678 
9679 	/*
9680 	 * If we are in association flow - check antenna configuration
9681 	 * capabilities of the AP station, and choose the watermark accordingly.
9682 	 */
9683 	if (ni) {
9684 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9685 			if (ni->ni_rxmcs[1] != 0)
9686 				watermark = IWM_SF_W_MARK_MIMO2;
9687 			else
9688 				watermark = IWM_SF_W_MARK_SISO;
9689 		} else {
9690 			watermark = IWM_SF_W_MARK_LEGACY;
9691 		}
9692 	/* default watermark value for unassociated mode. */
9693 	} else {
9694 		watermark = IWM_SF_W_MARK_MIMO2;
9695 	}
9696 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
9697 
9698 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
9699 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
9700 			sf_cmd->long_delay_timeouts[i][j] =
9701 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
9702 		}
9703 	}
9704 
9705 	if (ni) {
9706 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
9707 		       sizeof(iwm_sf_full_timeout));
9708 	} else {
9709 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
9710 		       sizeof(iwm_sf_full_timeout_def));
9711 	}
9712 
9713 }
9714 
9715 int
9716 iwm_sf_config(struct iwm_softc *sc, int new_state)
9717 {
9718 	struct ieee80211com *ic = &sc->sc_ic;
9719 	struct iwm_sf_cfg_cmd sf_cmd = {
9720 		.state = htole32(new_state),
9721 	};
9722 	int err = 0;
9723 
9724 #if 0	/* only used for models with sdio interface, in iwlwifi */
9725 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9726 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
9727 #endif
9728 
9729 	switch (new_state) {
9730 	case IWM_SF_UNINIT:
9731 	case IWM_SF_INIT_OFF:
9732 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
9733 		break;
9734 	case IWM_SF_FULL_ON:
9735 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9736 		break;
9737 	default:
9738 		return EINVAL;
9739 	}
9740 
9741 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9742 				   sizeof(sf_cmd), &sf_cmd);
9743 	return err;
9744 }
9745 
9746 int
9747 iwm_send_bt_init_conf(struct iwm_softc *sc)
9748 {
9749 	struct iwm_bt_coex_cmd bt_cmd;
9750 
9751 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
9752 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
9753 
9754 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9755 	    &bt_cmd);
9756 }
9757 
9758 int
9759 iwm_send_soc_conf(struct iwm_softc *sc)
9760 {
9761 	struct iwm_soc_configuration_cmd cmd;
9762 	int err;
9763 	uint32_t cmd_id, flags = 0;
9764 
9765 	memset(&cmd, 0, sizeof(cmd));
9766 
9767 	/*
9768 	 * In VER_1 of this command, the discrete value is considered
9769 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9770 	 * values in VER_1, this is backwards-compatible with VER_2,
9771 	 * as long as we don't set any other flag bits.
9772 	 */
9773 	if (!sc->sc_integrated) { /* VER_1 */
9774 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9775 	} else { /* VER_2 */
9776 		uint8_t scan_cmd_ver;
9777 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9778 			flags |= (sc->sc_ltr_delay &
9779 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9780 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9781 		    IWM_SCAN_REQ_UMAC);
9782 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
9783 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9784 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9785 	}
9786 	cmd.flags = htole32(flags);
9787 
9788 	cmd.latency = htole32(sc->sc_xtal_latency);
9789 
9790 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
9791 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9792 	if (err)
9793 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9794 	return err;
9795 }
9796 
9797 int
9798 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9799 {
9800 	struct iwm_mcc_update_cmd mcc_cmd;
9801 	struct iwm_host_cmd hcmd = {
9802 		.id = IWM_MCC_UPDATE_CMD,
9803 		.flags = IWM_CMD_WANT_RESP,
9804 		.resp_pkt_len = IWM_CMD_RESP_MAX,
9805 		.data = { &mcc_cmd },
9806 	};
9807 	struct iwm_rx_packet *pkt;
9808 	size_t resp_len;
9809 	int err;
9810 	int resp_v3 = isset(sc->sc_enabled_capa,
9811 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
9812 
9813 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9814 	    !sc->sc_nvm.lar_enabled) {
9815 		return 0;
9816 	}
9817 
9818 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9819 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9820 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9821 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9822 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
9823 	else
9824 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
9825 
9826 	if (resp_v3) { /* same size as resp_v2 */
9827 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9828 	} else {
9829 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9830 	}
9831 
9832 	err = iwm_send_cmd(sc, &hcmd);
9833 	if (err)
9834 		return err;
9835 
9836 	pkt = hcmd.resp_pkt;
9837 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
9838 		err = EIO;
9839 		goto out;
9840 	}
9841 
9842 	if (resp_v3) {
9843 		struct iwm_mcc_update_resp_v3 *resp;
9844 		resp_len = iwm_rx_packet_payload_len(pkt);
9845 		if (resp_len < sizeof(*resp)) {
9846 			err = EIO;
9847 			goto out;
9848 		}
9849 
9850 		resp = (void *)pkt->data;
9851 		if (resp_len != sizeof(*resp) +
9852 		    resp->n_channels * sizeof(resp->channels[0])) {
9853 			err = EIO;
9854 			goto out;
9855 		}
9856 	} else {
9857 		struct iwm_mcc_update_resp_v1 *resp_v1;
9858 		resp_len = iwm_rx_packet_payload_len(pkt);
9859 		if (resp_len < sizeof(*resp_v1)) {
9860 			err = EIO;
9861 			goto out;
9862 		}
9863 
9864 		resp_v1 = (void *)pkt->data;
9865 		if (resp_len != sizeof(*resp_v1) +
9866 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9867 			err = EIO;
9868 			goto out;
9869 		}
9870 	}
9871 out:
9872 	iwm_free_resp(sc, &hcmd);
9873 	return err;
9874 }
9875 
9876 int
9877 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9878 {
9879 	struct iwm_temp_report_ths_cmd cmd;
9880 	int err;
9881 
9882 	/*
9883 	 * In order to give responsibility for critical-temperature-kill
9884 	 * and TX backoff to FW we need to send an empty temperature
9885 	 * reporting command at init time.
9886 	 */
9887 	memset(&cmd, 0, sizeof(cmd));
9888 
9889 	err = iwm_send_cmd_pdu(sc,
9890 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
9891 	    0, sizeof(cmd), &cmd);
9892 	if (err)
9893 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9894 		    DEVNAME(sc), err);
9895 
9896 	return err;
9897 }
9898 
9899 void
9900 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9901 {
9902 	struct iwm_host_cmd cmd = {
9903 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
9904 		.len = { sizeof(uint32_t), },
9905 		.data = { &backoff, },
9906 	};
9907 
9908 	iwm_send_cmd(sc, &cmd);
9909 }
9910 
9911 void
9912 iwm_free_fw_paging(struct iwm_softc *sc)
9913 {
9914 	int i;
9915 
9916 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9917 		return;
9918 
9919 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
9920 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9921 	}
9922 
9923 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9924 }
9925 
9926 int
9927 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9928 {
9929 	int sec_idx, idx;
9930 	uint32_t offset = 0;
9931 
9932 	/*
9933 	 * find where is the paging image start point:
9934 	 * if CPU2 exist and it's in paging format, then the image looks like:
9935 	 * CPU1 sections (2 or more)
9936 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9937 	 * CPU2 sections (not paged)
9938 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9939 	 * non paged to CPU2 paging sec
9940 	 * CPU2 paging CSS
9941 	 * CPU2 paging image (including instruction and data)
9942 	 */
9943 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
9944 		if (image->fw_sect[sec_idx].fws_devoff ==
9945 		    IWM_PAGING_SEPARATOR_SECTION) {
9946 			sec_idx++;
9947 			break;
9948 		}
9949 	}
9950 
9951 	/*
9952 	 * If paging is enabled there should be at least 2 more sections left
9953 	 * (one for CSS and one for Paging data)
9954 	 */
9955 	if (sec_idx >= nitems(image->fw_sect) - 1) {
9956 		printf("%s: Paging: Missing CSS and/or paging sections\n",
9957 		    DEVNAME(sc));
9958 		iwm_free_fw_paging(sc);
9959 		return EINVAL;
9960 	}
9961 
9962 	/* copy the CSS block to the dram */
9963 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
9964 	    DEVNAME(sc), sec_idx));
9965 
9966 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
9967 	    image->fw_sect[sec_idx].fws_data,
9968 	    sc->fw_paging_db[0].fw_paging_size);
9969 
9970 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
9971 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
9972 
9973 	sec_idx++;
9974 
9975 	/*
9976 	 * copy the paging blocks to the dram
9977 	 * loop index start from 1 since that CSS block already copied to dram
9978 	 * and CSS index is 0.
9979 	 * loop stop at num_of_paging_blk since that last block is not full.
9980 	 */
9981 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
9982 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9983 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9984 		    sc->fw_paging_db[idx].fw_paging_size);
9985 
9986 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
9987 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
9988 
9989 		offset += sc->fw_paging_db[idx].fw_paging_size;
9990 	}
9991 
9992 	/* copy the last paging block */
9993 	if (sc->num_of_pages_in_last_blk > 0) {
9994 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9995 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9996 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
9997 
9998 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
9999 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10000 	}
10001 
10002 	return 0;
10003 }
10004 
10005 int
10006 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10007 {
10008 	int blk_idx = 0;
10009 	int error, num_of_pages;
10010 
10011 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
10012 		int i;
10013 		/* Device got reset, and we setup firmware paging again */
10014 		bus_dmamap_sync(sc->sc_dmat,
10015 		    sc->fw_paging_db[0].fw_paging_block.map,
10016 		    0, IWM_FW_PAGING_SIZE,
10017 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10018 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10019 			bus_dmamap_sync(sc->sc_dmat,
10020 			    sc->fw_paging_db[i].fw_paging_block.map,
10021 			    0, IWM_PAGING_BLOCK_SIZE,
10022 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10023 		}
10024 		return 0;
10025 	}
10026 
10027 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
10028 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
10029 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
10030 #endif
10031 
10032 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
10033 	sc->num_of_paging_blk =
10034 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
10035 
10036 	sc->num_of_pages_in_last_blk =
10037 		num_of_pages -
10038 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
10039 
10040 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
10041 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
10042 	    sc->num_of_paging_blk,
10043 	    sc->num_of_pages_in_last_blk));
10044 
10045 	/* allocate block of 4Kbytes for paging CSS */
10046 	error = iwm_dma_contig_alloc(sc->sc_dmat,
10047 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
10048 	    4096);
10049 	if (error) {
10050 		/* free all the previous pages since we failed */
10051 		iwm_free_fw_paging(sc);
10052 		return ENOMEM;
10053 	}
10054 
10055 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
10056 
10057 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
10058 	    DEVNAME(sc)));
10059 
10060 	/*
10061 	 * allocate blocks in dram.
10062 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
10063 	 */
10064 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10065 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
10066 		/* XXX Use iwm_dma_contig_alloc for allocating */
10067 		error = iwm_dma_contig_alloc(sc->sc_dmat,
10068 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
10069 		    IWM_PAGING_BLOCK_SIZE, 4096);
10070 		if (error) {
10071 			/* free all the previous pages since we failed */
10072 			iwm_free_fw_paging(sc);
10073 			return ENOMEM;
10074 		}
10075 
10076 		sc->fw_paging_db[blk_idx].fw_paging_size =
10077 		    IWM_PAGING_BLOCK_SIZE;
10078 
10079 		DPRINTF((
10080 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
10081 		    DEVNAME(sc)));
10082 	}
10083 
10084 	return 0;
10085 }
10086 
10087 int
10088 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10089 {
10090 	int ret;
10091 
10092 	ret = iwm_alloc_fw_paging_mem(sc, fw);
10093 	if (ret)
10094 		return ret;
10095 
10096 	return iwm_fill_paging_mem(sc, fw);
10097 }
10098 
10099 /* send paging cmd to FW in case CPU2 has paging image */
10100 int
10101 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10102 {
10103 	int blk_idx;
10104 	uint32_t dev_phy_addr;
10105 	struct iwm_fw_paging_cmd fw_paging_cmd = {
10106 		.flags =
10107 			htole32(IWM_PAGING_CMD_IS_SECURED |
10108 				IWM_PAGING_CMD_IS_ENABLED |
10109 				(sc->num_of_pages_in_last_blk <<
10110 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
10111 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
10112 		.block_num = htole32(sc->num_of_paging_blk),
10113 	};
10114 
10115 	/* loop for for all paging blocks + CSS block */
10116 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10117 		dev_phy_addr = htole32(
10118 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
10119 		    IWM_PAGE_2_EXP_SIZE);
10120 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
10121 		bus_dmamap_sync(sc->sc_dmat,
10122 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
10123 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
10124 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10125 	}
10126 
10127 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
10128 					       IWM_LONG_GROUP, 0),
10129 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
10130 }
10131 
10132 int
10133 iwm_init_hw(struct iwm_softc *sc)
10134 {
10135 	struct ieee80211com *ic = &sc->sc_ic;
10136 	int err, i, ac, qid, s;
10137 
10138 	err = iwm_run_init_mvm_ucode(sc, 0);
10139 	if (err)
10140 		return err;
10141 
10142 	/* Should stop and start HW since INIT image just loaded. */
10143 	iwm_stop_device(sc);
10144 	err = iwm_start_hw(sc);
10145 	if (err) {
10146 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10147 		return err;
10148 	}
10149 
10150 	/* Restart, this time with the regular firmware */
10151 	s = splnet();
10152 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10153 	if (err) {
10154 		printf("%s: could not load firmware\n", DEVNAME(sc));
10155 		splx(s);
10156 		return err;
10157 	}
10158 
10159 	if (!iwm_nic_lock(sc)) {
10160 		splx(s);
10161 		return EBUSY;
10162 	}
10163 
10164 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10165 	if (err) {
10166 		printf("%s: could not init tx ant config (error %d)\n",
10167 		    DEVNAME(sc), err);
10168 		goto err;
10169 	}
10170 
10171 	err = iwm_send_phy_db_data(sc);
10172 	if (err) {
10173 		printf("%s: could not init phy db (error %d)\n",
10174 		    DEVNAME(sc), err);
10175 		goto err;
10176 	}
10177 
10178 	err = iwm_send_phy_cfg_cmd(sc);
10179 	if (err) {
10180 		printf("%s: could not send phy config (error %d)\n",
10181 		    DEVNAME(sc), err);
10182 		goto err;
10183 	}
10184 
10185 	err = iwm_send_bt_init_conf(sc);
10186 	if (err) {
10187 		printf("%s: could not init bt coex (error %d)\n",
10188 		    DEVNAME(sc), err);
10189 		goto err;
10190 	}
10191 
10192 	if (isset(sc->sc_enabled_capa,
10193 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
10194 		err = iwm_send_soc_conf(sc);
10195 		if (err)
10196 			goto err;
10197 	}
10198 
10199 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
10200 		err = iwm_send_dqa_cmd(sc);
10201 		if (err)
10202 			goto err;
10203 	}
10204 
10205 	/* Add auxiliary station for scanning */
10206 	err = iwm_add_aux_sta(sc);
10207 	if (err) {
10208 		printf("%s: could not add aux station (error %d)\n",
10209 		    DEVNAME(sc), err);
10210 		goto err;
10211 	}
10212 
10213 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
10214 		/*
10215 		 * The channel used here isn't relevant as it's
10216 		 * going to be overwritten in the other flows.
10217 		 * For now use the first channel we have.
10218 		 */
10219 		sc->sc_phyctxt[i].id = i;
10220 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10221 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10222 		    IWM_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
10223 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
10224 		if (err) {
10225 			printf("%s: could not add phy context %d (error %d)\n",
10226 			    DEVNAME(sc), i, err);
10227 			goto err;
10228 		}
10229 	}
10230 
10231 	/* Initialize tx backoffs to the minimum. */
10232 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
10233 		iwm_tt_tx_backoff(sc, 0);
10234 
10235 
10236 	err = iwm_config_ltr(sc);
10237 	if (err) {
10238 		printf("%s: PCIe LTR configuration failed (error %d)\n",
10239 		    DEVNAME(sc), err);
10240 	}
10241 
10242 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
10243 		err = iwm_send_temp_report_ths_cmd(sc);
10244 		if (err)
10245 			goto err;
10246 	}
10247 
10248 	err = iwm_power_update_device(sc);
10249 	if (err) {
10250 		printf("%s: could not send power command (error %d)\n",
10251 		    DEVNAME(sc), err);
10252 		goto err;
10253 	}
10254 
10255 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
10256 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
10257 		if (err) {
10258 			printf("%s: could not init LAR (error %d)\n",
10259 			    DEVNAME(sc), err);
10260 			goto err;
10261 		}
10262 	}
10263 
10264 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
10265 		err = iwm_config_umac_scan(sc);
10266 		if (err) {
10267 			printf("%s: could not configure scan (error %d)\n",
10268 			    DEVNAME(sc), err);
10269 			goto err;
10270 		}
10271 	}
10272 
10273 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10274 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10275 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
10276 		else
10277 			qid = IWM_AUX_QUEUE;
10278 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
10279 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
10280 		if (err) {
10281 			printf("%s: could not enable monitor inject Tx queue "
10282 			    "(error %d)\n", DEVNAME(sc), err);
10283 			goto err;
10284 		}
10285 	} else {
10286 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
10287 			if (isset(sc->sc_enabled_capa,
10288 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10289 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
10290 			else
10291 				qid = ac;
10292 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
10293 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
10294 			if (err) {
10295 				printf("%s: could not enable Tx queue %d "
10296 				    "(error %d)\n", DEVNAME(sc), ac, err);
10297 				goto err;
10298 			}
10299 		}
10300 	}
10301 
10302 	err = iwm_disable_beacon_filter(sc);
10303 	if (err) {
10304 		printf("%s: could not disable beacon filter (error %d)\n",
10305 		    DEVNAME(sc), err);
10306 		goto err;
10307 	}
10308 
10309 err:
10310 	iwm_nic_unlock(sc);
10311 	splx(s);
10312 	return err;
10313 }
10314 
10315 /* Allow multicast from our BSSID. */
10316 int
10317 iwm_allow_mcast(struct iwm_softc *sc)
10318 {
10319 	struct ieee80211com *ic = &sc->sc_ic;
10320 	struct iwm_node *in = (void *)ic->ic_bss;
10321 	struct iwm_mcast_filter_cmd *cmd;
10322 	size_t size;
10323 	int err;
10324 
10325 	size = roundup(sizeof(*cmd), 4);
10326 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
10327 	if (cmd == NULL)
10328 		return ENOMEM;
10329 	cmd->filter_own = 1;
10330 	cmd->port_id = 0;
10331 	cmd->count = 0;
10332 	cmd->pass_all = 1;
10333 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
10334 
10335 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
10336 	    0, size, cmd);
10337 	free(cmd, M_DEVBUF, size);
10338 	return err;
10339 }
10340 
10341 int
10342 iwm_init(struct ifnet *ifp)
10343 {
10344 	struct iwm_softc *sc = ifp->if_softc;
10345 	struct ieee80211com *ic = &sc->sc_ic;
10346 	int err, generation;
10347 
10348 	rw_assert_wrlock(&sc->ioctl_rwl);
10349 
10350 	generation = ++sc->sc_generation;
10351 
10352 	KASSERT(sc->task_refs.r_refs == 0);
10353 	refcnt_init(&sc->task_refs);
10354 
10355 	err = iwm_preinit(sc);
10356 	if (err)
10357 		return err;
10358 
10359 	err = iwm_start_hw(sc);
10360 	if (err) {
10361 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10362 		return err;
10363 	}
10364 
10365 	err = iwm_init_hw(sc);
10366 	if (err) {
10367 		if (generation == sc->sc_generation)
10368 			iwm_stop(ifp);
10369 		return err;
10370 	}
10371 
10372 	if (sc->sc_nvm.sku_cap_11n_enable)
10373 		iwm_setup_ht_rates(sc);
10374 	if (sc->sc_nvm.sku_cap_11ac_enable)
10375 		iwm_setup_vht_rates(sc);
10376 
10377 	ifq_clr_oactive(&ifp->if_snd);
10378 	ifp->if_flags |= IFF_RUNNING;
10379 
10380 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10381 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10382 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
10383 		return 0;
10384 	}
10385 
10386 	ieee80211_begin_scan(ifp);
10387 
10388 	/*
10389 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10390 	 * Wait until the transition to SCAN state has completed.
10391 	 */
10392 	do {
10393 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
10394 		    SEC_TO_NSEC(1));
10395 		if (generation != sc->sc_generation)
10396 			return ENXIO;
10397 		if (err) {
10398 			iwm_stop(ifp);
10399 			return err;
10400 		}
10401 	} while (ic->ic_state != IEEE80211_S_SCAN);
10402 
10403 	return 0;
10404 }
10405 
10406 void
10407 iwm_start(struct ifnet *ifp)
10408 {
10409 	struct iwm_softc *sc = ifp->if_softc;
10410 	struct ieee80211com *ic = &sc->sc_ic;
10411 	struct ieee80211_node *ni;
10412 	struct ether_header *eh;
10413 	struct mbuf *m;
10414 	int ac = EDCA_AC_BE; /* XXX */
10415 
10416 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
10417 		return;
10418 
10419 	for (;;) {
10420 		/* why isn't this done per-queue? */
10421 		if (sc->qfullmsk != 0) {
10422 			ifq_set_oactive(&ifp->if_snd);
10423 			break;
10424 		}
10425 
10426 		/* Don't queue additional frames while flushing Tx queues. */
10427 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
10428 			break;
10429 
10430 		/* need to send management frames even if we're not RUNning */
10431 		m = mq_dequeue(&ic->ic_mgtq);
10432 		if (m) {
10433 			ni = m->m_pkthdr.ph_cookie;
10434 			goto sendit;
10435 		}
10436 
10437 		if (ic->ic_state != IEEE80211_S_RUN ||
10438 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
10439 			break;
10440 
10441 		m = ifq_dequeue(&ifp->if_snd);
10442 		if (!m)
10443 			break;
10444 		if (m->m_len < sizeof (*eh) &&
10445 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
10446 			ifp->if_oerrors++;
10447 			continue;
10448 		}
10449 #if NBPFILTER > 0
10450 		if (ifp->if_bpf != NULL)
10451 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
10452 #endif
10453 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
10454 			ifp->if_oerrors++;
10455 			continue;
10456 		}
10457 
10458  sendit:
10459 #if NBPFILTER > 0
10460 		if (ic->ic_rawbpf != NULL)
10461 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
10462 #endif
10463 		if (iwm_tx(sc, m, ni, ac) != 0) {
10464 			ieee80211_release_node(ic, ni);
10465 			ifp->if_oerrors++;
10466 			continue;
10467 		}
10468 
10469 		if (ifp->if_flags & IFF_UP)
10470 			ifp->if_timer = 1;
10471 	}
10472 
10473 	return;
10474 }
10475 
10476 void
10477 iwm_stop(struct ifnet *ifp)
10478 {
10479 	struct iwm_softc *sc = ifp->if_softc;
10480 	struct ieee80211com *ic = &sc->sc_ic;
10481 	struct iwm_node *in = (void *)ic->ic_bss;
10482 	int i, s = splnet();
10483 
10484 	rw_assert_wrlock(&sc->ioctl_rwl);
10485 
10486 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10487 
10488 	/* Cancel scheduled tasks and let any stale tasks finish up. */
10489 	task_del(systq, &sc->init_task);
10490 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10491 	iwm_del_task(sc, systq, &sc->ba_task);
10492 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10493 	iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10494 	iwm_del_task(sc, systq, &sc->bgscan_done_task);
10495 	KASSERT(sc->task_refs.r_refs >= 1);
10496 	refcnt_finalize(&sc->task_refs, "iwmstop");
10497 
10498 	iwm_stop_device(sc);
10499 
10500 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
10501 	sc->bgscan_unref_arg = NULL;
10502 	sc->bgscan_unref_arg_size = 0;
10503 
10504 	/* Reset soft state. */
10505 
10506 	sc->sc_generation++;
10507 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10508 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10509 		sc->sc_cmd_resp_pkt[i] = NULL;
10510 		sc->sc_cmd_resp_len[i] = 0;
10511 	}
10512 	ifp->if_flags &= ~IFF_RUNNING;
10513 	ifq_clr_oactive(&ifp->if_snd);
10514 
10515 	in->in_phyctxt = NULL;
10516 	in->tid_disable_ampdu = 0xffff;
10517 	in->tfd_queue_msk = 0;
10518 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
10519 
10520 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10521 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10522 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10523 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10524 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10525 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10526 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10527 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10528 
10529 	sc->sc_rx_ba_sessions = 0;
10530 	sc->ba_rx.start_tidmask = 0;
10531 	sc->ba_rx.stop_tidmask = 0;
10532 	sc->tx_ba_queue_mask = 0;
10533 	sc->ba_tx.start_tidmask = 0;
10534 	sc->ba_tx.stop_tidmask = 0;
10535 
10536 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10537 	sc->ns_nstate = IEEE80211_S_INIT;
10538 
10539 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10540 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10541 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10542 		iwm_clear_reorder_buffer(sc, rxba);
10543 	}
10544 	iwm_led_blink_stop(sc);
10545 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
10546 	ifp->if_timer = 0;
10547 
10548 	splx(s);
10549 }
10550 
10551 void
10552 iwm_watchdog(struct ifnet *ifp)
10553 {
10554 	struct iwm_softc *sc = ifp->if_softc;
10555 	int i;
10556 
10557 	ifp->if_timer = 0;
10558 
10559 	/*
10560 	 * We maintain a separate timer for each Tx queue because
10561 	 * Tx aggregation queues can get "stuck" while other queues
10562 	 * keep working. The Linux driver uses a similar workaround.
10563 	 */
10564 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
10565 		if (sc->sc_tx_timer[i] > 0) {
10566 			if (--sc->sc_tx_timer[i] == 0) {
10567 				printf("%s: device timeout\n", DEVNAME(sc));
10568 				if (ifp->if_flags & IFF_DEBUG) {
10569 					iwm_nic_error(sc);
10570 					iwm_dump_driver_status(sc);
10571 				}
10572 				if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10573 					task_add(systq, &sc->init_task);
10574 				ifp->if_oerrors++;
10575 				return;
10576 			}
10577 			ifp->if_timer = 1;
10578 		}
10579 	}
10580 
10581 	ieee80211_watchdog(ifp);
10582 }
10583 
10584 int
10585 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10586 {
10587 	struct iwm_softc *sc = ifp->if_softc;
10588 	int s, err = 0, generation = sc->sc_generation;
10589 
10590 	/*
10591 	 * Prevent processes from entering this function while another
10592 	 * process is tsleep'ing in it.
10593 	 */
10594 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10595 	if (err == 0 && generation != sc->sc_generation) {
10596 		rw_exit(&sc->ioctl_rwl);
10597 		return ENXIO;
10598 	}
10599 	if (err)
10600 		return err;
10601 	s = splnet();
10602 
10603 	switch (cmd) {
10604 	case SIOCSIFADDR:
10605 		ifp->if_flags |= IFF_UP;
10606 		/* FALLTHROUGH */
10607 	case SIOCSIFFLAGS:
10608 		if (ifp->if_flags & IFF_UP) {
10609 			if (!(ifp->if_flags & IFF_RUNNING)) {
10610 				/* Force reload of firmware image from disk. */
10611 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10612 				err = iwm_init(ifp);
10613 			}
10614 		} else {
10615 			if (ifp->if_flags & IFF_RUNNING)
10616 				iwm_stop(ifp);
10617 		}
10618 		break;
10619 
10620 	default:
10621 		err = ieee80211_ioctl(ifp, cmd, data);
10622 	}
10623 
10624 	if (err == ENETRESET) {
10625 		err = 0;
10626 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
10627 		    (IFF_UP | IFF_RUNNING)) {
10628 			iwm_stop(ifp);
10629 			err = iwm_init(ifp);
10630 		}
10631 	}
10632 
10633 	splx(s);
10634 	rw_exit(&sc->ioctl_rwl);
10635 
10636 	return err;
10637 }
10638 
10639 /*
10640  * Note: This structure is read from the device with IO accesses,
10641  * and the reading already does the endian conversion. As it is
10642  * read with uint32_t-sized accesses, any members with a different size
10643  * need to be ordered correctly though!
10644  */
10645 struct iwm_error_event_table {
10646 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10647 	uint32_t error_id;		/* type of error */
10648 	uint32_t trm_hw_status0;	/* TRM HW status */
10649 	uint32_t trm_hw_status1;	/* TRM HW status */
10650 	uint32_t blink2;		/* branch link */
10651 	uint32_t ilink1;		/* interrupt link */
10652 	uint32_t ilink2;		/* interrupt link */
10653 	uint32_t data1;		/* error-specific data */
10654 	uint32_t data2;		/* error-specific data */
10655 	uint32_t data3;		/* error-specific data */
10656 	uint32_t bcon_time;		/* beacon timer */
10657 	uint32_t tsf_low;		/* network timestamp function timer */
10658 	uint32_t tsf_hi;		/* network timestamp function timer */
10659 	uint32_t gp1;		/* GP1 timer register */
10660 	uint32_t gp2;		/* GP2 timer register */
10661 	uint32_t fw_rev_type;	/* firmware revision type */
10662 	uint32_t major;		/* uCode version major */
10663 	uint32_t minor;		/* uCode version minor */
10664 	uint32_t hw_ver;		/* HW Silicon version */
10665 	uint32_t brd_ver;		/* HW board version */
10666 	uint32_t log_pc;		/* log program counter */
10667 	uint32_t frame_ptr;		/* frame pointer */
10668 	uint32_t stack_ptr;		/* stack pointer */
10669 	uint32_t hcmd;		/* last host command header */
10670 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
10671 				 * rxtx_flag */
10672 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
10673 				 * host_flag */
10674 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
10675 				 * enc_flag */
10676 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
10677 				 * time_flag */
10678 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
10679 				 * wico interrupt */
10680 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
10681 	uint32_t wait_event;		/* wait event() caller address */
10682 	uint32_t l2p_control;	/* L2pControlField */
10683 	uint32_t l2p_duration;	/* L2pDurationField */
10684 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
10685 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
10686 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
10687 				 * (LMPM_PMG_SEL) */
10688 	uint32_t u_timestamp;	/* indicate when the date and time of the
10689 				 * compilation */
10690 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
10691 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
10692 
10693 /*
10694  * UMAC error struct - relevant starting from family 8000 chip.
10695  * Note: This structure is read from the device with IO accesses,
10696  * and the reading already does the endian conversion. As it is
10697  * read with u32-sized accesses, any members with a different size
10698  * need to be ordered correctly though!
10699  */
10700 struct iwm_umac_error_event_table {
10701 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10702 	uint32_t error_id;	/* type of error */
10703 	uint32_t blink1;	/* branch link */
10704 	uint32_t blink2;	/* branch link */
10705 	uint32_t ilink1;	/* interrupt link */
10706 	uint32_t ilink2;	/* interrupt link */
10707 	uint32_t data1;		/* error-specific data */
10708 	uint32_t data2;		/* error-specific data */
10709 	uint32_t data3;		/* error-specific data */
10710 	uint32_t umac_major;
10711 	uint32_t umac_minor;
10712 	uint32_t frame_pointer;	/* core register 27*/
10713 	uint32_t stack_pointer;	/* core register 28 */
10714 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
10715 	uint32_t nic_isr_pref;	/* ISR status register */
10716 } __packed;
10717 
10718 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
10719 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
10720 
10721 void
10722 iwm_nic_umac_error(struct iwm_softc *sc)
10723 {
10724 	struct iwm_umac_error_event_table table;
10725 	uint32_t base;
10726 
10727 	base = sc->sc_uc.uc_umac_error_event_table;
10728 
10729 	if (base < 0x800000) {
10730 		printf("%s: Invalid error log pointer 0x%08x\n",
10731 		    DEVNAME(sc), base);
10732 		return;
10733 	}
10734 
10735 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10736 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10737 		return;
10738 	}
10739 
10740 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10741 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10742 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10743 			sc->sc_flags, table.valid);
10744 	}
10745 
10746 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10747 		iwm_desc_lookup(table.error_id));
10748 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10749 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10750 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10751 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10752 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10753 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10754 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10755 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10756 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10757 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10758 	    table.frame_pointer);
10759 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10760 	    table.stack_pointer);
10761 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10762 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10763 	    table.nic_isr_pref);
10764 }
10765 
10766 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
10767 static struct {
10768 	const char *name;
10769 	uint8_t num;
10770 } advanced_lookup[] = {
10771 	{ "NMI_INTERRUPT_WDG", 0x34 },
10772 	{ "SYSASSERT", 0x35 },
10773 	{ "UCODE_VERSION_MISMATCH", 0x37 },
10774 	{ "BAD_COMMAND", 0x38 },
10775 	{ "BAD_COMMAND", 0x39 },
10776 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10777 	{ "FATAL_ERROR", 0x3D },
10778 	{ "NMI_TRM_HW_ERR", 0x46 },
10779 	{ "NMI_INTERRUPT_TRM", 0x4C },
10780 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10781 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10782 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10783 	{ "NMI_INTERRUPT_HOST", 0x66 },
10784 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10785 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10786 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10787 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
10788 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
10789 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10790 	{ "ADVANCED_SYSASSERT", 0 },
10791 };
10792 
10793 const char *
10794 iwm_desc_lookup(uint32_t num)
10795 {
10796 	int i;
10797 
10798 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
10799 		if (advanced_lookup[i].num ==
10800 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
10801 			return advanced_lookup[i].name;
10802 
10803 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10804 	return advanced_lookup[i].name;
10805 }
10806 
10807 /*
10808  * Support for dumping the error log seemed like a good idea ...
10809  * but it's mostly hex junk and the only sensible thing is the
10810  * hw/ucode revision (which we know anyway).  Since it's here,
10811  * I'll just leave it in, just in case e.g. the Intel guys want to
10812  * help us decipher some "ADVANCED_SYSASSERT" later.
10813  */
10814 void
10815 iwm_nic_error(struct iwm_softc *sc)
10816 {
10817 	struct iwm_error_event_table table;
10818 	uint32_t base;
10819 
10820 	printf("%s: dumping device error log\n", DEVNAME(sc));
10821 	base = sc->sc_uc.uc_error_event_table;
10822 	if (base < 0x800000) {
10823 		printf("%s: Invalid error log pointer 0x%08x\n",
10824 		    DEVNAME(sc), base);
10825 		return;
10826 	}
10827 
10828 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10829 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10830 		return;
10831 	}
10832 
10833 	if (!table.valid) {
10834 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10835 		return;
10836 	}
10837 
10838 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10839 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10840 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10841 		    sc->sc_flags, table.valid);
10842 	}
10843 
10844 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10845 	    iwm_desc_lookup(table.error_id));
10846 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10847 	    table.trm_hw_status0);
10848 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10849 	    table.trm_hw_status1);
10850 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10851 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10852 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10853 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10854 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10855 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10856 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10857 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10858 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10859 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10860 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10861 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10862 	    table.fw_rev_type);
10863 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10864 	    table.major);
10865 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10866 	    table.minor);
10867 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10868 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10869 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10870 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10871 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10872 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10873 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10874 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10875 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10876 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10877 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10878 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10879 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10880 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10881 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10882 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10883 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10884 
10885 	if (sc->sc_uc.uc_umac_error_event_table)
10886 		iwm_nic_umac_error(sc);
10887 }
10888 
10889 void
10890 iwm_dump_driver_status(struct iwm_softc *sc)
10891 {
10892 	int i;
10893 
10894 	printf("driver status:\n");
10895 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
10896 		struct iwm_tx_ring *ring = &sc->txq[i];
10897 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10898 		    "queued=%-3d\n",
10899 		    i, ring->qid, ring->cur, ring->queued);
10900 	}
10901 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10902 	printf("  802.11 state %s\n",
10903 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10904 }
10905 
10906 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10907 do {									\
10908 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10909 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10910 	_var_ = (void *)((_pkt_)+1);					\
10911 } while (/*CONSTCOND*/0)
10912 
10913 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10914 do {									\
10915 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10916 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10917 	_ptr_ = (void *)((_pkt_)+1);					\
10918 } while (/*CONSTCOND*/0)
10919 
10920 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10921 
10922 int
10923 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10924 {
10925 	int qid, idx, code;
10926 
10927 	qid = pkt->hdr.qid & ~0x80;
10928 	idx = pkt->hdr.idx;
10929 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10930 
10931 	return (!(qid == 0 && idx == 0 && code == 0) &&
10932 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
10933 }
10934 
10935 void
10936 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10937 {
10938 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10939 	struct iwm_rx_packet *pkt, *nextpkt;
10940 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10941 	struct mbuf *m0, *m;
10942 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10943 	int qid, idx, code, handled = 1;
10944 
10945 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
10946 	    BUS_DMASYNC_POSTREAD);
10947 
10948 	m0 = data->m;
10949 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
10950 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
10951 		qid = pkt->hdr.qid;
10952 		idx = pkt->hdr.idx;
10953 
10954 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10955 
10956 		if (!iwm_rx_pkt_valid(pkt))
10957 			break;
10958 
10959 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10960 		if (len < minsz || len > (IWM_RBUF_SIZE - offset))
10961 			break;
10962 
10963 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
10964 			/* Take mbuf m0 off the RX ring. */
10965 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
10966 				ifp->if_ierrors++;
10967 				break;
10968 			}
10969 			KASSERT(data->m != m0);
10970 		}
10971 
10972 		switch (code) {
10973 		case IWM_REPLY_RX_PHY_CMD:
10974 			iwm_rx_rx_phy_cmd(sc, pkt, data);
10975 			break;
10976 
10977 		case IWM_REPLY_RX_MPDU_CMD: {
10978 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
10979 			nextoff = offset +
10980 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
10981 			nextpkt = (struct iwm_rx_packet *)
10982 			    (m0->m_data + nextoff);
10983 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
10984 			    !iwm_rx_pkt_valid(nextpkt)) {
10985 				/* No need to copy last frame in buffer. */
10986 				if (offset > 0)
10987 					m_adj(m0, offset);
10988 				if (sc->sc_mqrx_supported)
10989 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
10990 					    maxlen, ml);
10991 				else
10992 					iwm_rx_mpdu(sc, m0, pkt->data,
10993 					    maxlen, ml);
10994 				m0 = NULL; /* stack owns m0 now; abort loop */
10995 			} else {
10996 				/*
10997 				 * Create an mbuf which points to the current
10998 				 * packet. Always copy from offset zero to
10999 				 * preserve m_pkthdr.
11000 				 */
11001 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
11002 				if (m == NULL) {
11003 					ifp->if_ierrors++;
11004 					m_freem(m0);
11005 					m0 = NULL;
11006 					break;
11007 				}
11008 				m_adj(m, offset);
11009 				if (sc->sc_mqrx_supported)
11010 					iwm_rx_mpdu_mq(sc, m, pkt->data,
11011 					    maxlen, ml);
11012 				else
11013 					iwm_rx_mpdu(sc, m, pkt->data,
11014 					    maxlen, ml);
11015 			}
11016  			break;
11017 		}
11018 
11019 		case IWM_TX_CMD:
11020 			iwm_rx_tx_cmd(sc, pkt, data);
11021 			break;
11022 
11023 		case IWM_BA_NOTIF:
11024 			iwm_rx_compressed_ba(sc, pkt);
11025 			break;
11026 
11027 		case IWM_MISSED_BEACONS_NOTIFICATION:
11028 			iwm_rx_bmiss(sc, pkt, data);
11029 			break;
11030 
11031 		case IWM_MFUART_LOAD_NOTIFICATION:
11032 			break;
11033 
11034 		case IWM_ALIVE: {
11035 			struct iwm_alive_resp_v1 *resp1;
11036 			struct iwm_alive_resp_v2 *resp2;
11037 			struct iwm_alive_resp_v3 *resp3;
11038 
11039 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
11040 				SYNC_RESP_STRUCT(resp1, pkt);
11041 				sc->sc_uc.uc_error_event_table
11042 				    = le32toh(resp1->error_event_table_ptr);
11043 				sc->sc_uc.uc_log_event_table
11044 				    = le32toh(resp1->log_event_table_ptr);
11045 				sc->sched_base = le32toh(resp1->scd_base_ptr);
11046 				if (resp1->status == IWM_ALIVE_STATUS_OK)
11047 					sc->sc_uc.uc_ok = 1;
11048 				else
11049 					sc->sc_uc.uc_ok = 0;
11050 			}
11051 
11052 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
11053 				SYNC_RESP_STRUCT(resp2, pkt);
11054 				sc->sc_uc.uc_error_event_table
11055 				    = le32toh(resp2->error_event_table_ptr);
11056 				sc->sc_uc.uc_log_event_table
11057 				    = le32toh(resp2->log_event_table_ptr);
11058 				sc->sched_base = le32toh(resp2->scd_base_ptr);
11059 				sc->sc_uc.uc_umac_error_event_table
11060 				    = le32toh(resp2->error_info_addr);
11061 				if (resp2->status == IWM_ALIVE_STATUS_OK)
11062 					sc->sc_uc.uc_ok = 1;
11063 				else
11064 					sc->sc_uc.uc_ok = 0;
11065 			}
11066 
11067 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
11068 				SYNC_RESP_STRUCT(resp3, pkt);
11069 				sc->sc_uc.uc_error_event_table
11070 				    = le32toh(resp3->error_event_table_ptr);
11071 				sc->sc_uc.uc_log_event_table
11072 				    = le32toh(resp3->log_event_table_ptr);
11073 				sc->sched_base = le32toh(resp3->scd_base_ptr);
11074 				sc->sc_uc.uc_umac_error_event_table
11075 				    = le32toh(resp3->error_info_addr);
11076 				if (resp3->status == IWM_ALIVE_STATUS_OK)
11077 					sc->sc_uc.uc_ok = 1;
11078 				else
11079 					sc->sc_uc.uc_ok = 0;
11080 			}
11081 
11082 			sc->sc_uc.uc_intr = 1;
11083 			wakeup(&sc->sc_uc);
11084 			break;
11085 		}
11086 
11087 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
11088 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
11089 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
11090 			iwm_phy_db_set_section(sc, phy_db_notif);
11091 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
11092 			wakeup(&sc->sc_init_complete);
11093 			break;
11094 		}
11095 
11096 		case IWM_STATISTICS_NOTIFICATION: {
11097 			struct iwm_notif_statistics *stats;
11098 			SYNC_RESP_STRUCT(stats, pkt);
11099 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
11100 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
11101 			break;
11102 		}
11103 
11104 		case IWM_MCC_CHUB_UPDATE_CMD: {
11105 			struct iwm_mcc_chub_notif *notif;
11106 			SYNC_RESP_STRUCT(notif, pkt);
11107 			iwm_mcc_update(sc, notif);
11108 			break;
11109 		}
11110 
11111 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
11112 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11113 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
11114 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11115 				 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
11116 			break;
11117 
11118 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11119 		    IWM_CT_KILL_NOTIFICATION): {
11120 			struct iwm_ct_kill_notif *notif;
11121 			SYNC_RESP_STRUCT(notif, pkt);
11122 			printf("%s: device at critical temperature (%u degC), "
11123 			    "stopping device\n",
11124 			    DEVNAME(sc), le16toh(notif->temperature));
11125 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11126 			task_add(systq, &sc->init_task);
11127 			break;
11128 		}
11129 
11130 		case IWM_ADD_STA_KEY:
11131 		case IWM_PHY_CONFIGURATION_CMD:
11132 		case IWM_TX_ANT_CONFIGURATION_CMD:
11133 		case IWM_ADD_STA:
11134 		case IWM_MAC_CONTEXT_CMD:
11135 		case IWM_REPLY_SF_CFG_CMD:
11136 		case IWM_POWER_TABLE_CMD:
11137 		case IWM_LTR_CONFIG:
11138 		case IWM_PHY_CONTEXT_CMD:
11139 		case IWM_BINDING_CONTEXT_CMD:
11140 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
11141 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
11142 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
11143 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
11144 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
11145 		case IWM_REPLY_BEACON_FILTERING_CMD:
11146 		case IWM_MAC_PM_POWER_TABLE:
11147 		case IWM_TIME_QUOTA_CMD:
11148 		case IWM_REMOVE_STA:
11149 		case IWM_TXPATH_FLUSH:
11150 		case IWM_LQ_CMD:
11151 		case IWM_WIDE_ID(IWM_LONG_GROUP,
11152 				 IWM_FW_PAGING_BLOCK_CMD):
11153 		case IWM_BT_CONFIG:
11154 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
11155 		case IWM_NVM_ACCESS_CMD:
11156 		case IWM_MCC_UPDATE_CMD:
11157 		case IWM_TIME_EVENT_CMD: {
11158 			size_t pkt_len;
11159 
11160 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
11161 				break;
11162 
11163 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
11164 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11165 
11166 			pkt_len = sizeof(pkt->len_n_flags) +
11167 			    iwm_rx_packet_len(pkt);
11168 
11169 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
11170 			    pkt_len < sizeof(*pkt) ||
11171 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
11172 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11173 				    sc->sc_cmd_resp_len[idx]);
11174 				sc->sc_cmd_resp_pkt[idx] = NULL;
11175 				break;
11176 			}
11177 
11178 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
11179 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11180 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11181 			break;
11182 		}
11183 
11184 		/* ignore */
11185 		case IWM_PHY_DB_CMD:
11186 			break;
11187 
11188 		case IWM_INIT_COMPLETE_NOTIF:
11189 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
11190 			wakeup(&sc->sc_init_complete);
11191 			break;
11192 
11193 		case IWM_SCAN_OFFLOAD_COMPLETE: {
11194 			struct iwm_periodic_scan_complete *notif;
11195 			SYNC_RESP_STRUCT(notif, pkt);
11196 			break;
11197 		}
11198 
11199 		case IWM_SCAN_ITERATION_COMPLETE: {
11200 			struct iwm_lmac_scan_complete_notif *notif;
11201 			SYNC_RESP_STRUCT(notif, pkt);
11202 			iwm_endscan(sc);
11203 			break;
11204 		}
11205 
11206 		case IWM_SCAN_COMPLETE_UMAC: {
11207 			struct iwm_umac_scan_complete *notif;
11208 			SYNC_RESP_STRUCT(notif, pkt);
11209 			iwm_endscan(sc);
11210 			break;
11211 		}
11212 
11213 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
11214 			struct iwm_umac_scan_iter_complete_notif *notif;
11215 			SYNC_RESP_STRUCT(notif, pkt);
11216 			iwm_endscan(sc);
11217 			break;
11218 		}
11219 
11220 		case IWM_REPLY_ERROR: {
11221 			struct iwm_error_resp *resp;
11222 			SYNC_RESP_STRUCT(resp, pkt);
11223 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
11224 				DEVNAME(sc), le32toh(resp->error_type),
11225 				resp->cmd_id);
11226 			break;
11227 		}
11228 
11229 		case IWM_TIME_EVENT_NOTIFICATION: {
11230 			struct iwm_time_event_notif *notif;
11231 			uint32_t action;
11232 			SYNC_RESP_STRUCT(notif, pkt);
11233 
11234 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
11235 				break;
11236 			action = le32toh(notif->action);
11237 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
11238 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
11239 			break;
11240 		}
11241 
11242 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
11243 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
11244 		    break;
11245 
11246 		/*
11247 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
11248 		 * messages. Just ignore them for now.
11249 		 */
11250 		case IWM_DEBUG_LOG_MSG:
11251 			break;
11252 
11253 		case IWM_MCAST_FILTER_CMD:
11254 			break;
11255 
11256 		case IWM_SCD_QUEUE_CFG: {
11257 			struct iwm_scd_txq_cfg_rsp *rsp;
11258 			SYNC_RESP_STRUCT(rsp, pkt);
11259 
11260 			break;
11261 		}
11262 
11263 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
11264 			break;
11265 
11266 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
11267 			break;
11268 
11269 		default:
11270 			handled = 0;
11271 			printf("%s: unhandled firmware response 0x%x/0x%x "
11272 			    "rx ring %d[%d]\n",
11273 			    DEVNAME(sc), code, pkt->len_n_flags,
11274 			    (qid & ~0x80), idx);
11275 			break;
11276 		}
11277 
11278 		/*
11279 		 * uCode sets bit 0x80 when it originates the notification,
11280 		 * i.e. when the notification is not a direct response to a
11281 		 * command sent by the driver.
11282 		 * For example, uCode issues IWM_REPLY_RX when it sends a
11283 		 * received frame to the driver.
11284 		 */
11285 		if (handled && !(qid & (1 << 7))) {
11286 			iwm_cmd_done(sc, qid, idx, code);
11287 		}
11288 
11289 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11290 	}
11291 
11292 	if (m0 && m0 != data->m)
11293 		m_freem(m0);
11294 }
11295 
11296 void
11297 iwm_notif_intr(struct iwm_softc *sc)
11298 {
11299 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
11300 	uint32_t wreg;
11301 	uint16_t hw;
11302 	int count;
11303 
11304 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
11305 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
11306 
11307 	if (sc->sc_mqrx_supported) {
11308 		count = IWM_RX_MQ_RING_COUNT;
11309 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
11310 	} else {
11311 		count = IWM_RX_RING_COUNT;
11312 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
11313 	}
11314 
11315 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
11316 	hw &= (count - 1);
11317 	while (sc->rxq.cur != hw) {
11318 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11319 		iwm_rx_pkt(sc, data, &ml);
11320 		ADVANCE_RXQ(sc);
11321 	}
11322 	if_input(&sc->sc_ic.ic_if, &ml);
11323 
11324 	/*
11325 	 * Tell the firmware what we have processed.
11326 	 * Seems like the hardware gets upset unless we align the write by 8??
11327 	 */
11328 	hw = (hw == 0) ? count - 1 : hw - 1;
11329 	IWM_WRITE(sc, wreg, hw & ~7);
11330 }
11331 
11332 int
11333 iwm_intr(void *arg)
11334 {
11335 	struct iwm_softc *sc = arg;
11336 	struct ieee80211com *ic = &sc->sc_ic;
11337 	struct ifnet *ifp = IC2IFP(ic);
11338 	int handled = 0;
11339 	int rv = 0;
11340 	uint32_t r1, r2;
11341 
11342 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
11343 
11344 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
11345 		uint32_t *ict = sc->ict_dma.vaddr;
11346 		int tmp;
11347 
11348 		tmp = htole32(ict[sc->ict_cur]);
11349 		if (!tmp)
11350 			goto out_ena;
11351 
11352 		/*
11353 		 * ok, there was something.  keep plowing until we have all.
11354 		 */
11355 		r1 = r2 = 0;
11356 		while (tmp) {
11357 			r1 |= tmp;
11358 			ict[sc->ict_cur] = 0;
11359 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
11360 			tmp = htole32(ict[sc->ict_cur]);
11361 		}
11362 
11363 		/* this is where the fun begins.  don't ask */
11364 		if (r1 == 0xffffffff)
11365 			r1 = 0;
11366 
11367 		/*
11368 		 * Workaround for hardware bug where bits are falsely cleared
11369 		 * when using interrupt coalescing.  Bit 15 should be set if
11370 		 * bits 18 and 19 are set.
11371 		 */
11372 		if (r1 & 0xc0000)
11373 			r1 |= 0x8000;
11374 
11375 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11376 	} else {
11377 		r1 = IWM_READ(sc, IWM_CSR_INT);
11378 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
11379 	}
11380 	if (r1 == 0 && r2 == 0) {
11381 		goto out_ena;
11382 	}
11383 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11384 		goto out;
11385 
11386 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
11387 
11388 	/* ignored */
11389 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
11390 
11391 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
11392 		handled |= IWM_CSR_INT_BIT_RF_KILL;
11393 		iwm_check_rfkill(sc);
11394 		task_add(systq, &sc->init_task);
11395 		rv = 1;
11396 		goto out_ena;
11397 	}
11398 
11399 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
11400 		if (ifp->if_flags & IFF_DEBUG) {
11401 			iwm_nic_error(sc);
11402 			iwm_dump_driver_status(sc);
11403 		}
11404 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11405 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11406 			task_add(systq, &sc->init_task);
11407 		rv = 1;
11408 		goto out;
11409 
11410 	}
11411 
11412 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
11413 		handled |= IWM_CSR_INT_BIT_HW_ERR;
11414 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11415 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11416 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11417 			task_add(systq, &sc->init_task);
11418 		}
11419 		rv = 1;
11420 		goto out;
11421 	}
11422 
11423 	/* firmware chunk loaded */
11424 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
11425 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
11426 		handled |= IWM_CSR_INT_BIT_FH_TX;
11427 
11428 		sc->sc_fw_chunk_done = 1;
11429 		wakeup(&sc->sc_fw);
11430 	}
11431 
11432 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
11433 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
11434 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
11435 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
11436 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
11437 		}
11438 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
11439 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
11440 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
11441 		}
11442 
11443 		/* Disable periodic interrupt; we use it as just a one-shot. */
11444 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
11445 
11446 		/*
11447 		 * Enable periodic interrupt in 8 msec only if we received
11448 		 * real RX interrupt (instead of just periodic int), to catch
11449 		 * any dangling Rx interrupt.  If it was just the periodic
11450 		 * interrupt, there was no dangling Rx activity, and no need
11451 		 * to extend the periodic interrupt; one-shot is enough.
11452 		 */
11453 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
11454 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
11455 			    IWM_CSR_INT_PERIODIC_ENA);
11456 
11457 		iwm_notif_intr(sc);
11458 	}
11459 
11460 	rv = 1;
11461 
11462  out_ena:
11463 	iwm_restore_interrupts(sc);
11464  out:
11465 	return rv;
11466 }
11467 
11468 int
11469 iwm_intr_msix(void *arg)
11470 {
11471 	struct iwm_softc *sc = arg;
11472 	struct ieee80211com *ic = &sc->sc_ic;
11473 	struct ifnet *ifp = IC2IFP(ic);
11474 	uint32_t inta_fh, inta_hw;
11475 	int vector = 0;
11476 
11477 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11478 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11479 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11480 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11481 	inta_fh &= sc->sc_fh_mask;
11482 	inta_hw &= sc->sc_hw_mask;
11483 
11484 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11485 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11486 		iwm_notif_intr(sc);
11487 	}
11488 
11489 	/* firmware chunk loaded */
11490 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11491 		sc->sc_fw_chunk_done = 1;
11492 		wakeup(&sc->sc_fw);
11493 	}
11494 
11495 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11496 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11497 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11498 		if (ifp->if_flags & IFF_DEBUG) {
11499 			iwm_nic_error(sc);
11500 			iwm_dump_driver_status(sc);
11501 		}
11502 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11503 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11504 			task_add(systq, &sc->init_task);
11505 		return 1;
11506 	}
11507 
11508 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11509 		iwm_check_rfkill(sc);
11510 		task_add(systq, &sc->init_task);
11511 	}
11512 
11513 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11514 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11515 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11516 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11517 			task_add(systq, &sc->init_task);
11518 		}
11519 		return 1;
11520 	}
11521 
11522 	/*
11523 	 * Before sending the interrupt the HW disables it to prevent
11524 	 * a nested interrupt. This is done by writing 1 to the corresponding
11525 	 * bit in the mask register. After handling the interrupt, it should be
11526 	 * re-enabled by clearing this bit. This register is defined as
11527 	 * write 1 clear (W1C) register, meaning that it's being clear
11528 	 * by writing 1 to the bit.
11529 	 */
11530 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11531 	return 1;
11532 }
11533 
11534 typedef void *iwm_match_t;
11535 
11536 static const struct pci_matchid iwm_devices[] = {
11537 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
11538 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
11539 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
11540 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
11541 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
11542 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
11543 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
11544 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
11545 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
11546 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
11547 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
11548 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
11549 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
11550 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
11551 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
11552 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_3 },
11553 };
11554 
11555 int
11556 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
11557 {
11558 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11559 	    nitems(iwm_devices));
11560 }
11561 
11562 int
11563 iwm_preinit(struct iwm_softc *sc)
11564 {
11565 	struct ieee80211com *ic = &sc->sc_ic;
11566 	struct ifnet *ifp = IC2IFP(ic);
11567 	int err;
11568 
11569 	err = iwm_prepare_card_hw(sc);
11570 	if (err) {
11571 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11572 		return err;
11573 	}
11574 
11575 	if (sc->attached) {
11576 		/* Update MAC in case the upper layers changed it. */
11577 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11578 		    ((struct arpcom *)ifp)->ac_enaddr);
11579 		return 0;
11580 	}
11581 
11582 	err = iwm_start_hw(sc);
11583 	if (err) {
11584 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11585 		return err;
11586 	}
11587 
11588 	err = iwm_run_init_mvm_ucode(sc, 1);
11589 	iwm_stop_device(sc);
11590 	if (err)
11591 		return err;
11592 
11593 	/* Print version info and MAC address on first successful fw load. */
11594 	sc->attached = 1;
11595 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11596 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11597 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11598 
11599 	if (sc->sc_nvm.sku_cap_11n_enable)
11600 		iwm_setup_ht_rates(sc);
11601 
11602 	/* not all hardware can do 5GHz band */
11603 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11604 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11605 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11606 
11607 	/* Configure channel information obtained from firmware. */
11608 	ieee80211_channel_init(ifp);
11609 
11610 	/* Configure MAC address. */
11611 	err = if_setlladdr(ifp, ic->ic_myaddr);
11612 	if (err)
11613 		printf("%s: could not set MAC address (error %d)\n",
11614 		    DEVNAME(sc), err);
11615 
11616 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11617 
11618 	return 0;
11619 }
11620 
11621 void
11622 iwm_attach_hook(struct device *self)
11623 {
11624 	struct iwm_softc *sc = (void *)self;
11625 
11626 	KASSERT(!cold);
11627 
11628 	iwm_preinit(sc);
11629 }
11630 
11631 void
11632 iwm_attach(struct device *parent, struct device *self, void *aux)
11633 {
11634 	struct iwm_softc *sc = (void *)self;
11635 	struct pci_attach_args *pa = aux;
11636 	pci_intr_handle_t ih;
11637 	pcireg_t reg, memtype;
11638 	struct ieee80211com *ic = &sc->sc_ic;
11639 	struct ifnet *ifp = &ic->ic_if;
11640 	const char *intrstr;
11641 	int err;
11642 	int txq_i, i, j;
11643 
11644 	sc->sc_pct = pa->pa_pc;
11645 	sc->sc_pcitag = pa->pa_tag;
11646 	sc->sc_dmat = pa->pa_dmat;
11647 
11648 	rw_init(&sc->ioctl_rwl, "iwmioctl");
11649 
11650 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11651 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11652 	if (err == 0) {
11653 		printf("%s: PCIe capability structure not found!\n",
11654 		    DEVNAME(sc));
11655 		return;
11656 	}
11657 
11658 	/*
11659 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11660 	 * PCI Tx retries from interfering with C3 CPU state.
11661 	 */
11662 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11663 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11664 
11665 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11666 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11667 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11668 	if (err) {
11669 		printf("%s: can't map mem space\n", DEVNAME(sc));
11670 		return;
11671 	}
11672 
11673 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11674 		sc->sc_msix = 1;
11675 	} else if (pci_intr_map_msi(pa, &ih)) {
11676 		if (pci_intr_map(pa, &ih)) {
11677 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11678 			return;
11679 		}
11680 		/* Hardware bug workaround. */
11681 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11682 		    PCI_COMMAND_STATUS_REG);
11683 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11684 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11685 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11686 		    PCI_COMMAND_STATUS_REG, reg);
11687 	}
11688 
11689 	intrstr = pci_intr_string(sc->sc_pct, ih);
11690 	if (sc->sc_msix)
11691 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11692 		    iwm_intr_msix, sc, DEVNAME(sc));
11693 	else
11694 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11695 		    iwm_intr, sc, DEVNAME(sc));
11696 
11697 	if (sc->sc_ih == NULL) {
11698 		printf("\n");
11699 		printf("%s: can't establish interrupt", DEVNAME(sc));
11700 		if (intrstr != NULL)
11701 			printf(" at %s", intrstr);
11702 		printf("\n");
11703 		return;
11704 	}
11705 	printf(", %s\n", intrstr);
11706 
11707 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11708 	switch (PCI_PRODUCT(pa->pa_id)) {
11709 	case PCI_PRODUCT_INTEL_WL_3160_1:
11710 	case PCI_PRODUCT_INTEL_WL_3160_2:
11711 		sc->sc_fwname = "iwm-3160-17";
11712 		sc->host_interrupt_operation_mode = 1;
11713 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11714 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11715 		sc->sc_nvm_max_section_size = 16384;
11716 		sc->nvm_type = IWM_NVM;
11717 		break;
11718 	case PCI_PRODUCT_INTEL_WL_3165_1:
11719 	case PCI_PRODUCT_INTEL_WL_3165_2:
11720 		sc->sc_fwname = "iwm-7265D-29";
11721 		sc->host_interrupt_operation_mode = 0;
11722 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11723 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11724 		sc->sc_nvm_max_section_size = 16384;
11725 		sc->nvm_type = IWM_NVM;
11726 		break;
11727 	case PCI_PRODUCT_INTEL_WL_3168_1:
11728 		sc->sc_fwname = "iwm-3168-29";
11729 		sc->host_interrupt_operation_mode = 0;
11730 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11731 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11732 		sc->sc_nvm_max_section_size = 16384;
11733 		sc->nvm_type = IWM_NVM_SDP;
11734 		break;
11735 	case PCI_PRODUCT_INTEL_WL_7260_1:
11736 	case PCI_PRODUCT_INTEL_WL_7260_2:
11737 		sc->sc_fwname = "iwm-7260-17";
11738 		sc->host_interrupt_operation_mode = 1;
11739 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11740 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11741 		sc->sc_nvm_max_section_size = 16384;
11742 		sc->nvm_type = IWM_NVM;
11743 		break;
11744 	case PCI_PRODUCT_INTEL_WL_7265_1:
11745 	case PCI_PRODUCT_INTEL_WL_7265_2:
11746 		sc->sc_fwname = "iwm-7265-17";
11747 		sc->host_interrupt_operation_mode = 0;
11748 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11749 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11750 		sc->sc_nvm_max_section_size = 16384;
11751 		sc->nvm_type = IWM_NVM;
11752 		break;
11753 	case PCI_PRODUCT_INTEL_WL_8260_1:
11754 	case PCI_PRODUCT_INTEL_WL_8260_2:
11755 		sc->sc_fwname = "iwm-8000C-36";
11756 		sc->host_interrupt_operation_mode = 0;
11757 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11758 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11759 		sc->sc_nvm_max_section_size = 32768;
11760 		sc->nvm_type = IWM_NVM_EXT;
11761 		break;
11762 	case PCI_PRODUCT_INTEL_WL_8265_1:
11763 		sc->sc_fwname = "iwm-8265-36";
11764 		sc->host_interrupt_operation_mode = 0;
11765 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11766 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11767 		sc->sc_nvm_max_section_size = 32768;
11768 		sc->nvm_type = IWM_NVM_EXT;
11769 		break;
11770 	case PCI_PRODUCT_INTEL_WL_9260_1:
11771 		sc->sc_fwname = "iwm-9260-46";
11772 		sc->host_interrupt_operation_mode = 0;
11773 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11774 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11775 		sc->sc_nvm_max_section_size = 32768;
11776 		sc->sc_mqrx_supported = 1;
11777 		break;
11778 	case PCI_PRODUCT_INTEL_WL_9560_1:
11779 	case PCI_PRODUCT_INTEL_WL_9560_2:
11780 	case PCI_PRODUCT_INTEL_WL_9560_3:
11781 		sc->sc_fwname = "iwm-9000-46";
11782 		sc->host_interrupt_operation_mode = 0;
11783 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11784 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11785 		sc->sc_nvm_max_section_size = 32768;
11786 		sc->sc_mqrx_supported = 1;
11787 		sc->sc_integrated = 1;
11788 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_WL_9560_3) {
11789 			sc->sc_xtal_latency = 670;
11790 			sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK;
11791 		} else
11792 			sc->sc_xtal_latency = 650;
11793 		break;
11794 	default:
11795 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11796 		return;
11797 	}
11798 
11799 	/*
11800 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11801 	 * changed, and now the revision step also includes bit 0-1 (no more
11802 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11803 	 * in the old format.
11804 	 */
11805 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11806 		uint32_t hw_step;
11807 
11808 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11809 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11810 
11811 		if (iwm_prepare_card_hw(sc) != 0) {
11812 			printf("%s: could not initialize hardware\n",
11813 			    DEVNAME(sc));
11814 			return;
11815 		}
11816 
11817 		/*
11818 		 * In order to recognize C step the driver should read the
11819 		 * chip version id located at the AUX bus MISC address.
11820 		 */
11821 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11822 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
11823 		DELAY(2);
11824 
11825 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11826 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11827 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11828 				   25000);
11829 		if (!err) {
11830 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11831 			return;
11832 		}
11833 
11834 		if (iwm_nic_lock(sc)) {
11835 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11836 			hw_step |= IWM_ENABLE_WFPM;
11837 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11838 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11839 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
11840 			if (hw_step == 0x3)
11841 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11842 						(IWM_SILICON_C_STEP << 2);
11843 			iwm_nic_unlock(sc);
11844 		} else {
11845 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11846 			return;
11847 		}
11848 	}
11849 
11850 	/*
11851 	 * Allocate DMA memory for firmware transfers.
11852 	 * Must be aligned on a 16-byte boundary.
11853 	 */
11854 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11855 	    sc->sc_fwdmasegsz, 16);
11856 	if (err) {
11857 		printf("%s: could not allocate memory for firmware\n",
11858 		    DEVNAME(sc));
11859 		return;
11860 	}
11861 
11862 	/* Allocate "Keep Warm" page, used internally by the card. */
11863 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11864 	if (err) {
11865 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11866 		goto fail1;
11867 	}
11868 
11869 	/* Allocate interrupt cause table (ICT).*/
11870 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11871 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
11872 	if (err) {
11873 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11874 		goto fail2;
11875 	}
11876 
11877 	/* TX scheduler rings must be aligned on a 1KB boundary. */
11878 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11879 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11880 	if (err) {
11881 		printf("%s: could not allocate TX scheduler rings\n",
11882 		    DEVNAME(sc));
11883 		goto fail3;
11884 	}
11885 
11886 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11887 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11888 		if (err) {
11889 			printf("%s: could not allocate TX ring %d\n",
11890 			    DEVNAME(sc), txq_i);
11891 			goto fail4;
11892 		}
11893 	}
11894 
11895 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
11896 	if (err) {
11897 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11898 		goto fail4;
11899 	}
11900 
11901 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
11902 	if (sc->sc_nswq == NULL)
11903 		goto fail4;
11904 
11905 	/* Clear pending interrupts. */
11906 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
11907 
11908 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11909 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11910 	ic->ic_state = IEEE80211_S_INIT;
11911 
11912 	/* Set device capabilities. */
11913 	ic->ic_caps =
11914 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11915 	    IEEE80211_C_WEP |		/* WEP */
11916 	    IEEE80211_C_RSN |		/* WPA/RSN */
11917 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11918 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11919 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11920 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11921 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11922 
11923 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11924 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11925 	ic->ic_htcaps |=
11926 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11927 	ic->ic_htxcaps = 0;
11928 	ic->ic_txbfcaps = 0;
11929 	ic->ic_aselcaps = 0;
11930 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11931 
11932 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11933 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11934 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11935 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11936 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11937 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11938 
11939 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11940 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11941 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11942 
11943 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11944 		sc->sc_phyctxt[i].id = i;
11945 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11946 		sc->sc_phyctxt[i].vht_chan_width =
11947 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11948 	}
11949 
11950 	sc->sc_amrr.amrr_min_success_threshold =  1;
11951 	sc->sc_amrr.amrr_max_success_threshold = 15;
11952 
11953 	/* IBSS channel undefined for now. */
11954 	ic->ic_ibss_chan = &ic->ic_channels[1];
11955 
11956 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
11957 
11958 	ifp->if_softc = sc;
11959 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11960 	ifp->if_ioctl = iwm_ioctl;
11961 	ifp->if_start = iwm_start;
11962 	ifp->if_watchdog = iwm_watchdog;
11963 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11964 
11965 	if_attach(ifp);
11966 	ieee80211_ifattach(ifp);
11967 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11968 
11969 #if NBPFILTER > 0
11970 	iwm_radiotap_attach(sc);
11971 #endif
11972 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
11973 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
11974 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11975 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
11976 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
11977 		rxba->sc = sc;
11978 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
11979 		    rxba);
11980 		timeout_set(&rxba->reorder_buf.reorder_timer,
11981 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
11982 		for (j = 0; j < nitems(rxba->entries); j++)
11983 			ml_init(&rxba->entries[j].frames);
11984 	}
11985 	task_set(&sc->init_task, iwm_init_task, sc);
11986 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
11987 	task_set(&sc->ba_task, iwm_ba_task, sc);
11988 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
11989 	task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
11990 	task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
11991 
11992 	ic->ic_node_alloc = iwm_node_alloc;
11993 	ic->ic_bgscan_start = iwm_bgscan;
11994 	ic->ic_bgscan_done = iwm_bgscan_done;
11995 	ic->ic_set_key = iwm_set_key;
11996 	ic->ic_delete_key = iwm_delete_key;
11997 
11998 	/* Override 802.11 state transition machine. */
11999 	sc->sc_newstate = ic->ic_newstate;
12000 	ic->ic_newstate = iwm_newstate;
12001 	ic->ic_updateprot = iwm_updateprot;
12002 	ic->ic_updateslot = iwm_updateslot;
12003 	ic->ic_updateedca = iwm_updateedca;
12004 	ic->ic_updatedtim = iwm_updatedtim;
12005 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
12006 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
12007 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
12008 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
12009 	/*
12010 	 * We cannot read the MAC address without loading the
12011 	 * firmware from disk. Postpone until mountroot is done.
12012 	 */
12013 	config_mountroot(self, iwm_attach_hook);
12014 
12015 	return;
12016 
12017 fail4:	while (--txq_i >= 0)
12018 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12019 	iwm_free_rx_ring(sc, &sc->rxq);
12020 	iwm_dma_contig_free(&sc->sched_dma);
12021 fail3:	if (sc->ict_dma.vaddr != NULL)
12022 		iwm_dma_contig_free(&sc->ict_dma);
12023 
12024 fail2:	iwm_dma_contig_free(&sc->kw_dma);
12025 fail1:	iwm_dma_contig_free(&sc->fw_dma);
12026 	return;
12027 }
12028 
12029 #if NBPFILTER > 0
12030 void
12031 iwm_radiotap_attach(struct iwm_softc *sc)
12032 {
12033 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
12034 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
12035 
12036 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12037 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
12038 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
12039 
12040 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
12041 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
12042 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
12043 }
12044 #endif
12045 
12046 void
12047 iwm_init_task(void *arg1)
12048 {
12049 	struct iwm_softc *sc = arg1;
12050 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12051 	int s = splnet();
12052 	int generation = sc->sc_generation;
12053 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
12054 
12055 	rw_enter_write(&sc->ioctl_rwl);
12056 	if (generation != sc->sc_generation) {
12057 		rw_exit(&sc->ioctl_rwl);
12058 		splx(s);
12059 		return;
12060 	}
12061 
12062 	if (ifp->if_flags & IFF_RUNNING)
12063 		iwm_stop(ifp);
12064 	else
12065 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
12066 
12067 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
12068 		iwm_init(ifp);
12069 
12070 	rw_exit(&sc->ioctl_rwl);
12071 	splx(s);
12072 }
12073 
12074 void
12075 iwm_resume(struct iwm_softc *sc)
12076 {
12077 	pcireg_t reg;
12078 
12079 	/*
12080 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
12081 	 * PCI Tx retries from interfering with C3 CPU state.
12082 	 */
12083 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12084 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12085 
12086 	if (!sc->sc_msix) {
12087 		/* Hardware bug workaround. */
12088 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12089 		    PCI_COMMAND_STATUS_REG);
12090 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
12091 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
12092 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12093 		    PCI_COMMAND_STATUS_REG, reg);
12094 	}
12095 
12096 	iwm_disable_interrupts(sc);
12097 }
12098 
12099 int
12100 iwm_wakeup(struct iwm_softc *sc)
12101 {
12102 	struct ieee80211com *ic = &sc->sc_ic;
12103 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12104 	int err;
12105 
12106 	err = iwm_start_hw(sc);
12107 	if (err)
12108 		return err;
12109 
12110 	err = iwm_init_hw(sc);
12111 	if (err)
12112 		return err;
12113 
12114 	refcnt_init(&sc->task_refs);
12115 	ifq_clr_oactive(&ifp->if_snd);
12116 	ifp->if_flags |= IFF_RUNNING;
12117 
12118 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
12119 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
12120 	else
12121 		ieee80211_begin_scan(ifp);
12122 
12123 	return 0;
12124 }
12125 
12126 int
12127 iwm_activate(struct device *self, int act)
12128 {
12129 	struct iwm_softc *sc = (struct iwm_softc *)self;
12130 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12131 	int err = 0;
12132 
12133 	switch (act) {
12134 	case DVACT_QUIESCE:
12135 		if (ifp->if_flags & IFF_RUNNING) {
12136 			rw_enter_write(&sc->ioctl_rwl);
12137 			iwm_stop(ifp);
12138 			rw_exit(&sc->ioctl_rwl);
12139 		}
12140 		break;
12141 	case DVACT_RESUME:
12142 		iwm_resume(sc);
12143 		break;
12144 	case DVACT_WAKEUP:
12145 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
12146 			err = iwm_wakeup(sc);
12147 			if (err)
12148 				printf("%s: could not initialize hardware\n",
12149 				    DEVNAME(sc));
12150 		}
12151 		break;
12152 	}
12153 
12154 	return 0;
12155 }
12156 
12157 struct cfdriver iwm_cd = {
12158 	NULL, "iwm", DV_IFNET
12159 };
12160 
12161 const struct cfattach iwm_ca = {
12162 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
12163 	NULL, iwm_activate
12164 };
12165