xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: if_iwm.c,v 1.403 2022/07/11 11:28:37 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_ra.h>
146 #include <net80211/ieee80211_ra_vht.h>
147 #include <net80211/ieee80211_radiotap.h>
148 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
149 #undef DPRINTF /* defined in ieee80211_priv.h */
150 
151 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
152 
153 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
154 
155 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
156 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
157 
158 #ifdef IWM_DEBUG
159 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
160 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
161 int iwm_debug = 1;
162 #else
163 #define DPRINTF(x)	do { ; } while (0)
164 #define DPRINTFN(n, x)	do { ; } while (0)
165 #endif
166 
167 #include <dev/pci/if_iwmreg.h>
168 #include <dev/pci/if_iwmvar.h>
169 
170 const uint8_t iwm_nvm_channels[] = {
171 	/* 2.4 GHz */
172 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
173 	/* 5 GHz */
174 	36, 40, 44 , 48, 52, 56, 60, 64,
175 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
176 	149, 153, 157, 161, 165
177 };
178 
179 const uint8_t iwm_nvm_channels_8000[] = {
180 	/* 2.4 GHz */
181 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182 	/* 5 GHz */
183 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185 	149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 
188 #define IWM_NUM_2GHZ_CHANNELS	14
189 
190 const struct iwm_rate {
191 	uint16_t rate;
192 	uint8_t plcp;
193 	uint8_t ht_plcp;
194 } iwm_rates[] = {
195 		/* Legacy */		/* HT */
196 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
200 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
201 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
202 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
203 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
204 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
205 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
206 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
207 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
208 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
209 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
210 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
211 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
212 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
213 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
214 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
215 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
216 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
217 };
218 #define IWM_RIDX_CCK	0
219 #define IWM_RIDX_OFDM	4
220 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
221 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
222 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
223 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
224 
225 /* Convert an MCS index into an iwm_rates[] index. */
226 const int iwm_ht_mcs2ridx[] = {
227 	IWM_RATE_MCS_0_INDEX,
228 	IWM_RATE_MCS_1_INDEX,
229 	IWM_RATE_MCS_2_INDEX,
230 	IWM_RATE_MCS_3_INDEX,
231 	IWM_RATE_MCS_4_INDEX,
232 	IWM_RATE_MCS_5_INDEX,
233 	IWM_RATE_MCS_6_INDEX,
234 	IWM_RATE_MCS_7_INDEX,
235 	IWM_RATE_MCS_8_INDEX,
236 	IWM_RATE_MCS_9_INDEX,
237 	IWM_RATE_MCS_10_INDEX,
238 	IWM_RATE_MCS_11_INDEX,
239 	IWM_RATE_MCS_12_INDEX,
240 	IWM_RATE_MCS_13_INDEX,
241 	IWM_RATE_MCS_14_INDEX,
242 	IWM_RATE_MCS_15_INDEX,
243 };
244 
245 struct iwm_nvm_section {
246 	uint16_t length;
247 	uint8_t *data;
248 };
249 
250 int	iwm_is_mimo_ht_plcp(uint8_t);
251 int	iwm_is_mimo_ht_mcs(int);
252 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
253 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
254 	    uint8_t *, size_t);
255 int	iwm_set_default_calib(struct iwm_softc *, const void *);
256 void	iwm_fw_info_free(struct iwm_fw_info *);
257 void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
258 int	iwm_read_firmware(struct iwm_softc *);
259 uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
260 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
261 void	iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
262 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
263 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
264 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
265 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
266 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
267 int	iwm_nic_lock(struct iwm_softc *);
268 void	iwm_nic_assert_locked(struct iwm_softc *);
269 void	iwm_nic_unlock(struct iwm_softc *);
270 int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
271 	    uint32_t);
272 int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
273 int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
274 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
275 	    bus_size_t);
276 void	iwm_dma_contig_free(struct iwm_dma_info *);
277 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
278 void	iwm_disable_rx_dma(struct iwm_softc *);
279 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
281 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
282 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
283 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
284 void	iwm_enable_rfkill_int(struct iwm_softc *);
285 int	iwm_check_rfkill(struct iwm_softc *);
286 void	iwm_enable_interrupts(struct iwm_softc *);
287 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
288 void	iwm_restore_interrupts(struct iwm_softc *);
289 void	iwm_disable_interrupts(struct iwm_softc *);
290 void	iwm_ict_reset(struct iwm_softc *);
291 int	iwm_set_hw_ready(struct iwm_softc *);
292 int	iwm_prepare_card_hw(struct iwm_softc *);
293 void	iwm_apm_config(struct iwm_softc *);
294 int	iwm_apm_init(struct iwm_softc *);
295 void	iwm_apm_stop(struct iwm_softc *);
296 int	iwm_allow_mcast(struct iwm_softc *);
297 void	iwm_init_msix_hw(struct iwm_softc *);
298 void	iwm_conf_msix_hw(struct iwm_softc *, int);
299 int	iwm_clear_persistence_bit(struct iwm_softc *);
300 int	iwm_start_hw(struct iwm_softc *);
301 void	iwm_stop_device(struct iwm_softc *);
302 void	iwm_nic_config(struct iwm_softc *);
303 int	iwm_nic_rx_init(struct iwm_softc *);
304 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
305 int	iwm_nic_rx_mq_init(struct iwm_softc *);
306 int	iwm_nic_tx_init(struct iwm_softc *);
307 int	iwm_nic_init(struct iwm_softc *);
308 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
309 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
310 	    uint16_t);
311 int	iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
312 int	iwm_post_alive(struct iwm_softc *);
313 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
314 	    uint16_t);
315 int	iwm_phy_db_set_section(struct iwm_softc *,
316 	    struct iwm_calib_res_notif_phy_db *);
317 int	iwm_is_valid_channel(uint16_t);
318 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
319 uint16_t iwm_channel_id_to_papd(uint16_t);
320 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
321 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
322 	    uint16_t *, uint16_t);
323 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
324 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
325 	    uint8_t);
326 int	iwm_send_phy_db_data(struct iwm_softc *);
327 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
328 	    uint32_t);
329 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
330 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
331 	    uint8_t *, uint16_t *);
332 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333 	    uint16_t *, size_t);
334 uint8_t	iwm_fw_valid_tx_ant(struct iwm_softc *);
335 uint8_t	iwm_fw_valid_rx_ant(struct iwm_softc *);
336 int	iwm_valid_siso_ant_rate_mask(struct iwm_softc *);
337 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
338 	    const uint8_t *nvm_channels, int nchan);
339 int	iwm_mimo_enabled(struct iwm_softc *);
340 void	iwm_setup_ht_rates(struct iwm_softc *);
341 void	iwm_setup_vht_rates(struct iwm_softc *);
342 void	iwm_mac_ctxt_task(void *);
343 void	iwm_phy_ctxt_task(void *);
344 void	iwm_updateprot(struct ieee80211com *);
345 void	iwm_updateslot(struct ieee80211com *);
346 void	iwm_updateedca(struct ieee80211com *);
347 void	iwm_updatechan(struct ieee80211com *);
348 void	iwm_updatedtim(struct ieee80211com *);
349 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
350 	    uint16_t);
351 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
352 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
353 	    uint8_t);
354 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
355 	    uint8_t);
356 void	iwm_rx_ba_session_expired(void *);
357 void	iwm_reorder_timer_expired(void *);
358 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
359 	    uint16_t, uint16_t, int, int);
360 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
361 	    uint8_t);
362 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
363 	    uint8_t);
364 void	iwm_ba_task(void *);
365 
366 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
367 	    const uint16_t *, const uint16_t *,
368 	    const uint16_t *, const uint16_t *,
369 	    const uint16_t *, int);
370 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
371 	    const uint16_t *, const uint16_t *);
372 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
373 int	iwm_nvm_init(struct iwm_softc *);
374 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
375 	    uint32_t);
376 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
377 	    uint32_t);
378 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
379 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
380 	    int , int *);
381 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
382 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
383 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
384 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
385 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
386 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
387 int	iwm_send_dqa_cmd(struct iwm_softc *);
388 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
389 int	iwm_config_ltr(struct iwm_softc *);
390 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
391 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
392 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
393 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
394 	    struct iwm_rx_data *);
395 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
396 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
397 	    struct ieee80211_rxinfo *);
398 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
399 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
400 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
401 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
402 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
403 	    int, uint8_t, int);
404 void	iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
405 	    int, int, uint8_t, int);
406 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
407 	    struct iwm_node *, int, int);
408 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
409 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
410 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
411 	    struct iwm_rx_data *);
412 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
413 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
414 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
415 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
416 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
417 	    struct iwm_rx_data *);
418 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
419 uint8_t	iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
420 int	iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
421 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
422 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
423 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
424 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
425 	    struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t);
426 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
427 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
428 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
429 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
430 	    const void *);
431 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
432 	    uint32_t *);
433 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
434 	    const void *, uint32_t *);
435 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
436 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
437 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
438 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
439 uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
440 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
441 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
442 int	iwm_flush_tx_path(struct iwm_softc *, int);
443 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
444 void	iwm_led_enable(struct iwm_softc *);
445 void	iwm_led_disable(struct iwm_softc *);
446 int	iwm_led_is_enabled(struct iwm_softc *);
447 void	iwm_led_blink_timeout(void *);
448 void	iwm_led_blink_start(struct iwm_softc *);
449 void	iwm_led_blink_stop(struct iwm_softc *);
450 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
451 	    struct iwm_beacon_filter_cmd *);
452 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
453 	    struct iwm_beacon_filter_cmd *);
454 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
455 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
456 	    struct iwm_mac_power_cmd *);
457 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
458 int	iwm_power_update_device(struct iwm_softc *);
459 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
460 int	iwm_disable_beacon_filter(struct iwm_softc *);
461 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
462 int	iwm_add_aux_sta(struct iwm_softc *);
463 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
464 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
465 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
466 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
467 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
468 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
469 	    struct iwm_scan_channel_cfg_lmac *, int, int);
470 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
471 int	iwm_lmac_scan(struct iwm_softc *, int);
472 int	iwm_config_umac_scan(struct iwm_softc *);
473 int	iwm_umac_scan(struct iwm_softc *, int);
474 void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
475 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
476 int	iwm_rval2ridx(int);
477 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
478 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
479 	    struct iwm_mac_ctx_cmd *, uint32_t);
480 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
481 	    struct iwm_mac_data_sta *, int);
482 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
483 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
484 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
485 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
486 int	iwm_scan(struct iwm_softc *);
487 int	iwm_bgscan(struct ieee80211com *);
488 void	iwm_bgscan_done(struct ieee80211com *,
489 	    struct ieee80211_node_switch_bss_arg *, size_t);
490 void	iwm_bgscan_done_task(void *);
491 int	iwm_umac_scan_abort(struct iwm_softc *);
492 int	iwm_lmac_scan_abort(struct iwm_softc *);
493 int	iwm_scan_abort(struct iwm_softc *);
494 int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
495 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
496 	    uint8_t);
497 int	iwm_auth(struct iwm_softc *);
498 int	iwm_deauth(struct iwm_softc *);
499 int	iwm_run(struct iwm_softc *);
500 int	iwm_run_stop(struct iwm_softc *);
501 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
502 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
503 	    struct ieee80211_key *);
504 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
505 	    struct ieee80211_key *);
506 void	iwm_delete_key_v1(struct ieee80211com *,
507 	    struct ieee80211_node *, struct ieee80211_key *);
508 void	iwm_delete_key(struct ieee80211com *,
509 	    struct ieee80211_node *, struct ieee80211_key *);
510 void	iwm_calib_timeout(void *);
511 void	iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *);
512 void	iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *);
513 void	iwm_setrates(struct iwm_node *, int);
514 int	iwm_media_change(struct ifnet *);
515 void	iwm_newstate_task(void *);
516 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
517 void	iwm_endscan(struct iwm_softc *);
518 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
519 	    struct ieee80211_node *);
520 int	iwm_sf_config(struct iwm_softc *, int);
521 int	iwm_send_bt_init_conf(struct iwm_softc *);
522 int	iwm_send_soc_conf(struct iwm_softc *);
523 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
524 int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
525 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
526 void	iwm_free_fw_paging(struct iwm_softc *);
527 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
528 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
529 int	iwm_init_hw(struct iwm_softc *);
530 int	iwm_init(struct ifnet *);
531 void	iwm_start(struct ifnet *);
532 void	iwm_stop(struct ifnet *);
533 void	iwm_watchdog(struct ifnet *);
534 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
535 const char *iwm_desc_lookup(uint32_t);
536 void	iwm_nic_error(struct iwm_softc *);
537 void	iwm_dump_driver_status(struct iwm_softc *);
538 void	iwm_nic_umac_error(struct iwm_softc *);
539 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
540 	    struct mbuf_list *);
541 void	iwm_flip_address(uint8_t *);
542 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
543 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
544 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
545 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
546 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
547 	    struct mbuf_list *);
548 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
549 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
550 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
551 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
552 	    struct ieee80211_rxinfo *, struct mbuf_list *);
553 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
554 	    struct mbuf_list *);
555 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
556 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
557 	    struct mbuf_list *);
558 void	iwm_notif_intr(struct iwm_softc *);
559 int	iwm_intr(void *);
560 int	iwm_intr_msix(void *);
561 int	iwm_match(struct device *, void *, void *);
562 int	iwm_preinit(struct iwm_softc *);
563 void	iwm_attach_hook(struct device *);
564 void	iwm_attach(struct device *, struct device *, void *);
565 void	iwm_init_task(void *);
566 int	iwm_activate(struct device *, int);
567 void	iwm_resume(struct iwm_softc *);
568 int	iwm_wakeup(struct iwm_softc *);
569 
570 #if NBPFILTER > 0
571 void	iwm_radiotap_attach(struct iwm_softc *);
572 #endif
573 
574 uint8_t
575 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
576 {
577 	const struct iwm_fw_cmd_version *entry;
578 	int i;
579 
580 	for (i = 0; i < sc->n_cmd_versions; i++) {
581 		entry = &sc->cmd_versions[i];
582 		if (entry->group == grp && entry->cmd == cmd)
583 			return entry->cmd_ver;
584 	}
585 
586 	return IWM_FW_CMD_VER_UNKNOWN;
587 }
588 
589 int
590 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
591 {
592 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
593 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
594 }
595 
596 int
597 iwm_is_mimo_ht_mcs(int mcs)
598 {
599 	int ridx = iwm_ht_mcs2ridx[mcs];
600 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
601 
602 }
603 
604 int
605 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
606 {
607 	struct iwm_fw_cscheme_list *l = (void *)data;
608 
609 	if (dlen < sizeof(*l) ||
610 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
611 		return EINVAL;
612 
613 	/* we don't actually store anything for now, always use s/w crypto */
614 
615 	return 0;
616 }
617 
618 int
619 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
620     uint8_t *data, size_t dlen)
621 {
622 	struct iwm_fw_sects *fws;
623 	struct iwm_fw_onesect *fwone;
624 
625 	if (type >= IWM_UCODE_TYPE_MAX)
626 		return EINVAL;
627 	if (dlen < sizeof(uint32_t))
628 		return EINVAL;
629 
630 	fws = &sc->sc_fw.fw_sects[type];
631 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
632 		return EINVAL;
633 
634 	fwone = &fws->fw_sect[fws->fw_count];
635 
636 	/* first 32bit are device load offset */
637 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
638 
639 	/* rest is data */
640 	fwone->fws_data = data + sizeof(uint32_t);
641 	fwone->fws_len = dlen - sizeof(uint32_t);
642 
643 	fws->fw_count++;
644 	fws->fw_totlen += fwone->fws_len;
645 
646 	return 0;
647 }
648 
649 #define IWM_DEFAULT_SCAN_CHANNELS	40
650 /* Newer firmware might support more channels. Raise this value if needed. */
651 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
652 
653 struct iwm_tlv_calib_data {
654 	uint32_t ucode_type;
655 	struct iwm_tlv_calib_ctrl calib;
656 } __packed;
657 
658 int
659 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
660 {
661 	const struct iwm_tlv_calib_data *def_calib = data;
662 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
663 
664 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
665 		return EINVAL;
666 
667 	sc->sc_default_calib[ucode_type].flow_trigger =
668 	    def_calib->calib.flow_trigger;
669 	sc->sc_default_calib[ucode_type].event_trigger =
670 	    def_calib->calib.event_trigger;
671 
672 	return 0;
673 }
674 
675 void
676 iwm_fw_info_free(struct iwm_fw_info *fw)
677 {
678 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
679 	fw->fw_rawdata = NULL;
680 	fw->fw_rawsize = 0;
681 	/* don't touch fw->fw_status */
682 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
683 }
684 
685 void
686 iwm_fw_version_str(char *buf, size_t bufsize,
687     uint32_t major, uint32_t minor, uint32_t api)
688 {
689 	/*
690 	 * Starting with major version 35 the Linux driver prints the minor
691 	 * version in hexadecimal.
692 	 */
693 	if (major >= 35)
694 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
695 	else
696 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
697 }
698 
699 int
700 iwm_read_firmware(struct iwm_softc *sc)
701 {
702 	struct iwm_fw_info *fw = &sc->sc_fw;
703 	struct iwm_tlv_ucode_header *uhdr;
704 	struct iwm_ucode_tlv tlv;
705 	uint32_t tlv_type;
706 	uint8_t *data;
707 	uint32_t usniffer_img;
708 	uint32_t paging_mem_size;
709 	int err;
710 	size_t len;
711 
712 	if (fw->fw_status == IWM_FW_STATUS_DONE)
713 		return 0;
714 
715 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
716 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
717 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
718 
719 	if (fw->fw_rawdata != NULL)
720 		iwm_fw_info_free(fw);
721 
722 	err = loadfirmware(sc->sc_fwname,
723 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
724 	if (err) {
725 		printf("%s: could not read firmware %s (error %d)\n",
726 		    DEVNAME(sc), sc->sc_fwname, err);
727 		goto out;
728 	}
729 
730 	sc->sc_capaflags = 0;
731 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
732 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
733 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
734 	sc->n_cmd_versions = 0;
735 
736 	uhdr = (void *)fw->fw_rawdata;
737 	if (*(uint32_t *)fw->fw_rawdata != 0
738 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
739 		printf("%s: invalid firmware %s\n",
740 		    DEVNAME(sc), sc->sc_fwname);
741 		err = EINVAL;
742 		goto out;
743 	}
744 
745 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
746 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
747 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
748 	    IWM_UCODE_API(le32toh(uhdr->ver)));
749 
750 	data = uhdr->data;
751 	len = fw->fw_rawsize - sizeof(*uhdr);
752 
753 	while (len >= sizeof(tlv)) {
754 		size_t tlv_len;
755 		void *tlv_data;
756 
757 		memcpy(&tlv, data, sizeof(tlv));
758 		tlv_len = le32toh(tlv.length);
759 		tlv_type = le32toh(tlv.type);
760 
761 		len -= sizeof(tlv);
762 		data += sizeof(tlv);
763 		tlv_data = data;
764 
765 		if (len < tlv_len) {
766 			printf("%s: firmware too short: %zu bytes\n",
767 			    DEVNAME(sc), len);
768 			err = EINVAL;
769 			goto parse_out;
770 		}
771 
772 		switch (tlv_type) {
773 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
774 			if (tlv_len < sizeof(uint32_t)) {
775 				err = EINVAL;
776 				goto parse_out;
777 			}
778 			sc->sc_capa_max_probe_len
779 			    = le32toh(*(uint32_t *)tlv_data);
780 			if (sc->sc_capa_max_probe_len >
781 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
782 				err = EINVAL;
783 				goto parse_out;
784 			}
785 			break;
786 		case IWM_UCODE_TLV_PAN:
787 			if (tlv_len) {
788 				err = EINVAL;
789 				goto parse_out;
790 			}
791 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
792 			break;
793 		case IWM_UCODE_TLV_FLAGS:
794 			if (tlv_len < sizeof(uint32_t)) {
795 				err = EINVAL;
796 				goto parse_out;
797 			}
798 			/*
799 			 * Apparently there can be many flags, but Linux driver
800 			 * parses only the first one, and so do we.
801 			 *
802 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
803 			 * Intentional or a bug?  Observations from
804 			 * current firmware file:
805 			 *  1) TLV_PAN is parsed first
806 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
807 			 * ==> this resets TLV_PAN to itself... hnnnk
808 			 */
809 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
810 			break;
811 		case IWM_UCODE_TLV_CSCHEME:
812 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
813 			if (err)
814 				goto parse_out;
815 			break;
816 		case IWM_UCODE_TLV_NUM_OF_CPU: {
817 			uint32_t num_cpu;
818 			if (tlv_len != sizeof(uint32_t)) {
819 				err = EINVAL;
820 				goto parse_out;
821 			}
822 			num_cpu = le32toh(*(uint32_t *)tlv_data);
823 			if (num_cpu < 1 || num_cpu > 2) {
824 				err = EINVAL;
825 				goto parse_out;
826 			}
827 			break;
828 		}
829 		case IWM_UCODE_TLV_SEC_RT:
830 			err = iwm_firmware_store_section(sc,
831 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
832 			if (err)
833 				goto parse_out;
834 			break;
835 		case IWM_UCODE_TLV_SEC_INIT:
836 			err = iwm_firmware_store_section(sc,
837 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
838 			if (err)
839 				goto parse_out;
840 			break;
841 		case IWM_UCODE_TLV_SEC_WOWLAN:
842 			err = iwm_firmware_store_section(sc,
843 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
844 			if (err)
845 				goto parse_out;
846 			break;
847 		case IWM_UCODE_TLV_DEF_CALIB:
848 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
849 				err = EINVAL;
850 				goto parse_out;
851 			}
852 			err = iwm_set_default_calib(sc, tlv_data);
853 			if (err)
854 				goto parse_out;
855 			break;
856 		case IWM_UCODE_TLV_PHY_SKU:
857 			if (tlv_len != sizeof(uint32_t)) {
858 				err = EINVAL;
859 				goto parse_out;
860 			}
861 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
862 			break;
863 
864 		case IWM_UCODE_TLV_API_CHANGES_SET: {
865 			struct iwm_ucode_api *api;
866 			int idx, i;
867 			if (tlv_len != sizeof(*api)) {
868 				err = EINVAL;
869 				goto parse_out;
870 			}
871 			api = (struct iwm_ucode_api *)tlv_data;
872 			idx = le32toh(api->api_index);
873 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
874 				err = EINVAL;
875 				goto parse_out;
876 			}
877 			for (i = 0; i < 32; i++) {
878 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
879 					continue;
880 				setbit(sc->sc_ucode_api, i + (32 * idx));
881 			}
882 			break;
883 		}
884 
885 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
886 			struct iwm_ucode_capa *capa;
887 			int idx, i;
888 			if (tlv_len != sizeof(*capa)) {
889 				err = EINVAL;
890 				goto parse_out;
891 			}
892 			capa = (struct iwm_ucode_capa *)tlv_data;
893 			idx = le32toh(capa->api_index);
894 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
895 				goto parse_out;
896 			}
897 			for (i = 0; i < 32; i++) {
898 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
899 					continue;
900 				setbit(sc->sc_enabled_capa, i + (32 * idx));
901 			}
902 			break;
903 		}
904 
905 		case IWM_UCODE_TLV_CMD_VERSIONS:
906 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
907 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
908 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
909 			}
910 			if (sc->n_cmd_versions != 0) {
911 				err = EINVAL;
912 				goto parse_out;
913 			}
914 			if (tlv_len > sizeof(sc->cmd_versions)) {
915 				err = EINVAL;
916 				goto parse_out;
917 			}
918 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
919 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
920 			break;
921 
922 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
923 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
924 			/* ignore, not used by current driver */
925 			break;
926 
927 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
928 			err = iwm_firmware_store_section(sc,
929 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
930 			    tlv_len);
931 			if (err)
932 				goto parse_out;
933 			break;
934 
935 		case IWM_UCODE_TLV_PAGING:
936 			if (tlv_len != sizeof(uint32_t)) {
937 				err = EINVAL;
938 				goto parse_out;
939 			}
940 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
941 
942 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
943 			    DEVNAME(sc), paging_mem_size));
944 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
945 				printf("%s: Driver only supports up to %u"
946 				    " bytes for paging image (%u requested)\n",
947 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
948 				    paging_mem_size);
949 				err = EINVAL;
950 				goto out;
951 			}
952 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
953 				printf("%s: Paging: image isn't multiple of %u\n",
954 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
955 				err = EINVAL;
956 				goto out;
957 			}
958 
959 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
960 			    paging_mem_size;
961 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
962 			fw->fw_sects[usniffer_img].paging_mem_size =
963 			    paging_mem_size;
964 			break;
965 
966 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
967 			if (tlv_len != sizeof(uint32_t)) {
968 				err = EINVAL;
969 				goto parse_out;
970 			}
971 			sc->sc_capa_n_scan_channels =
972 			  le32toh(*(uint32_t *)tlv_data);
973 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
974 				err = ERANGE;
975 				goto parse_out;
976 			}
977 			break;
978 
979 		case IWM_UCODE_TLV_FW_VERSION:
980 			if (tlv_len != sizeof(uint32_t) * 3) {
981 				err = EINVAL;
982 				goto parse_out;
983 			}
984 
985 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
986 			    le32toh(((uint32_t *)tlv_data)[0]),
987 			    le32toh(((uint32_t *)tlv_data)[1]),
988 			    le32toh(((uint32_t *)tlv_data)[2]));
989 			break;
990 
991 		case IWM_UCODE_TLV_FW_DBG_DEST:
992 		case IWM_UCODE_TLV_FW_DBG_CONF:
993 		case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
994 		case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
995 		case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
996 		case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
997 		case IWM_UCODE_TLV_TYPE_HCMD:
998 		case IWM_UCODE_TLV_TYPE_REGIONS:
999 		case IWM_UCODE_TLV_TYPE_TRIGGERS:
1000 			break;
1001 
1002 		case IWM_UCODE_TLV_HW_TYPE:
1003 			break;
1004 
1005 		case IWM_UCODE_TLV_FW_MEM_SEG:
1006 			break;
1007 
1008 		/* undocumented TLVs found in iwm-9000-43 image */
1009 		case 0x1000003:
1010 		case 0x1000004:
1011 			break;
1012 
1013 		default:
1014 			err = EINVAL;
1015 			goto parse_out;
1016 		}
1017 
1018 		len -= roundup(tlv_len, 4);
1019 		data += roundup(tlv_len, 4);
1020 	}
1021 
1022 	KASSERT(err == 0);
1023 
1024  parse_out:
1025 	if (err) {
1026 		printf("%s: firmware parse error %d, "
1027 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1028 	}
1029 
1030  out:
1031 	if (err) {
1032 		fw->fw_status = IWM_FW_STATUS_NONE;
1033 		if (fw->fw_rawdata != NULL)
1034 			iwm_fw_info_free(fw);
1035 	} else
1036 		fw->fw_status = IWM_FW_STATUS_DONE;
1037 	wakeup(&sc->sc_fw);
1038 
1039 	return err;
1040 }
1041 
1042 uint32_t
1043 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1044 {
1045 	IWM_WRITE(sc,
1046 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1047 	IWM_BARRIER_READ_WRITE(sc);
1048 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1049 }
1050 
1051 uint32_t
1052 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1053 {
1054 	iwm_nic_assert_locked(sc);
1055 	return iwm_read_prph_unlocked(sc, addr);
1056 }
1057 
1058 void
1059 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1060 {
1061 	IWM_WRITE(sc,
1062 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1063 	IWM_BARRIER_WRITE(sc);
1064 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1065 }
1066 
1067 void
1068 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1069 {
1070 	iwm_nic_assert_locked(sc);
1071 	iwm_write_prph_unlocked(sc, addr, val);
1072 }
1073 
1074 void
1075 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1076 {
1077 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1078 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1079 }
1080 
1081 int
1082 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1083 {
1084 	int offs, err = 0;
1085 	uint32_t *vals = buf;
1086 
1087 	if (iwm_nic_lock(sc)) {
1088 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1089 		for (offs = 0; offs < dwords; offs++)
1090 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1091 		iwm_nic_unlock(sc);
1092 	} else {
1093 		err = EBUSY;
1094 	}
1095 	return err;
1096 }
1097 
1098 int
1099 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1100 {
1101 	int offs;
1102 	const uint32_t *vals = buf;
1103 
1104 	if (iwm_nic_lock(sc)) {
1105 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1106 		/* WADDR auto-increments */
1107 		for (offs = 0; offs < dwords; offs++) {
1108 			uint32_t val = vals ? vals[offs] : 0;
1109 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1110 		}
1111 		iwm_nic_unlock(sc);
1112 	} else {
1113 		return EBUSY;
1114 	}
1115 	return 0;
1116 }
1117 
1118 int
1119 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1120 {
1121 	return iwm_write_mem(sc, addr, &val, 1);
1122 }
1123 
1124 int
1125 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1126     int timo)
1127 {
1128 	for (;;) {
1129 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1130 			return 1;
1131 		}
1132 		if (timo < 10) {
1133 			return 0;
1134 		}
1135 		timo -= 10;
1136 		DELAY(10);
1137 	}
1138 }
1139 
1140 int
1141 iwm_nic_lock(struct iwm_softc *sc)
1142 {
1143 	if (sc->sc_nic_locks > 0) {
1144 		iwm_nic_assert_locked(sc);
1145 		sc->sc_nic_locks++;
1146 		return 1; /* already locked */
1147 	}
1148 
1149 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1150 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1151 
1152 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1153 		DELAY(2);
1154 
1155 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1156 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1157 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1158 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1159 		sc->sc_nic_locks++;
1160 		return 1;
1161 	}
1162 
1163 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1164 	return 0;
1165 }
1166 
1167 void
1168 iwm_nic_assert_locked(struct iwm_softc *sc)
1169 {
1170 	if (sc->sc_nic_locks <= 0)
1171 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1172 }
1173 
1174 void
1175 iwm_nic_unlock(struct iwm_softc *sc)
1176 {
1177 	if (sc->sc_nic_locks > 0) {
1178 		if (--sc->sc_nic_locks == 0)
1179 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1180 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1181 	} else
1182 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1183 }
1184 
1185 int
1186 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1187     uint32_t mask)
1188 {
1189 	uint32_t val;
1190 
1191 	if (iwm_nic_lock(sc)) {
1192 		val = iwm_read_prph(sc, reg) & mask;
1193 		val |= bits;
1194 		iwm_write_prph(sc, reg, val);
1195 		iwm_nic_unlock(sc);
1196 		return 0;
1197 	}
1198 	return EBUSY;
1199 }
1200 
1201 int
1202 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1203 {
1204 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1205 }
1206 
1207 int
1208 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1209 {
1210 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1211 }
1212 
1213 int
1214 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1215     bus_size_t size, bus_size_t alignment)
1216 {
1217 	int nsegs, err;
1218 	caddr_t va;
1219 
1220 	dma->tag = tag;
1221 	dma->size = size;
1222 
1223 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1224 	    &dma->map);
1225 	if (err)
1226 		goto fail;
1227 
1228 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1229 	    BUS_DMA_NOWAIT);
1230 	if (err)
1231 		goto fail;
1232 
1233 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1234 	    BUS_DMA_NOWAIT);
1235 	if (err)
1236 		goto fail;
1237 	dma->vaddr = va;
1238 
1239 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1240 	    BUS_DMA_NOWAIT);
1241 	if (err)
1242 		goto fail;
1243 
1244 	memset(dma->vaddr, 0, size);
1245 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1246 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1247 
1248 	return 0;
1249 
1250 fail:	iwm_dma_contig_free(dma);
1251 	return err;
1252 }
1253 
1254 void
1255 iwm_dma_contig_free(struct iwm_dma_info *dma)
1256 {
1257 	if (dma->map != NULL) {
1258 		if (dma->vaddr != NULL) {
1259 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1260 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1261 			bus_dmamap_unload(dma->tag, dma->map);
1262 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1263 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1264 			dma->vaddr = NULL;
1265 		}
1266 		bus_dmamap_destroy(dma->tag, dma->map);
1267 		dma->map = NULL;
1268 	}
1269 }
1270 
1271 int
1272 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1273 {
1274 	bus_size_t size;
1275 	size_t descsz;
1276 	int count, i, err;
1277 
1278 	ring->cur = 0;
1279 
1280 	if (sc->sc_mqrx_supported) {
1281 		count = IWM_RX_MQ_RING_COUNT;
1282 		descsz = sizeof(uint64_t);
1283 	} else {
1284 		count = IWM_RX_RING_COUNT;
1285 		descsz = sizeof(uint32_t);
1286 	}
1287 
1288 	/* Allocate RX descriptors (256-byte aligned). */
1289 	size = count * descsz;
1290 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1291 	if (err) {
1292 		printf("%s: could not allocate RX ring DMA memory\n",
1293 		    DEVNAME(sc));
1294 		goto fail;
1295 	}
1296 	ring->desc = ring->free_desc_dma.vaddr;
1297 
1298 	/* Allocate RX status area (16-byte aligned). */
1299 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1300 	    sizeof(*ring->stat), 16);
1301 	if (err) {
1302 		printf("%s: could not allocate RX status DMA memory\n",
1303 		    DEVNAME(sc));
1304 		goto fail;
1305 	}
1306 	ring->stat = ring->stat_dma.vaddr;
1307 
1308 	if (sc->sc_mqrx_supported) {
1309 		size = count * sizeof(uint32_t);
1310 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1311 		    size, 256);
1312 		if (err) {
1313 			printf("%s: could not allocate RX ring DMA memory\n",
1314 			    DEVNAME(sc));
1315 			goto fail;
1316 		}
1317 	}
1318 
1319 	for (i = 0; i < count; i++) {
1320 		struct iwm_rx_data *data = &ring->data[i];
1321 
1322 		memset(data, 0, sizeof(*data));
1323 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1324 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1325 		    &data->map);
1326 		if (err) {
1327 			printf("%s: could not create RX buf DMA map\n",
1328 			    DEVNAME(sc));
1329 			goto fail;
1330 		}
1331 
1332 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1333 		if (err)
1334 			goto fail;
1335 	}
1336 	return 0;
1337 
1338 fail:	iwm_free_rx_ring(sc, ring);
1339 	return err;
1340 }
1341 
1342 void
1343 iwm_disable_rx_dma(struct iwm_softc *sc)
1344 {
1345 	int ntries;
1346 
1347 	if (iwm_nic_lock(sc)) {
1348 		if (sc->sc_mqrx_supported) {
1349 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1350 			for (ntries = 0; ntries < 1000; ntries++) {
1351 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1352 				    IWM_RXF_DMA_IDLE)
1353 					break;
1354 				DELAY(10);
1355 			}
1356 		} else {
1357 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1358 			for (ntries = 0; ntries < 1000; ntries++) {
1359 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1360 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1361 					break;
1362 				DELAY(10);
1363 			}
1364 		}
1365 		iwm_nic_unlock(sc);
1366 	}
1367 }
1368 
1369 void
1370 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1371 {
1372 	ring->cur = 0;
1373 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1374 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1375 	memset(ring->stat, 0, sizeof(*ring->stat));
1376 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1377 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1378 
1379 }
1380 
1381 void
1382 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1383 {
1384 	int count, i;
1385 
1386 	iwm_dma_contig_free(&ring->free_desc_dma);
1387 	iwm_dma_contig_free(&ring->stat_dma);
1388 	iwm_dma_contig_free(&ring->used_desc_dma);
1389 
1390 	if (sc->sc_mqrx_supported)
1391 		count = IWM_RX_MQ_RING_COUNT;
1392 	else
1393 		count = IWM_RX_RING_COUNT;
1394 
1395 	for (i = 0; i < count; i++) {
1396 		struct iwm_rx_data *data = &ring->data[i];
1397 
1398 		if (data->m != NULL) {
1399 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1400 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1401 			bus_dmamap_unload(sc->sc_dmat, data->map);
1402 			m_freem(data->m);
1403 			data->m = NULL;
1404 		}
1405 		if (data->map != NULL)
1406 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1407 	}
1408 }
1409 
1410 int
1411 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1412 {
1413 	bus_addr_t paddr;
1414 	bus_size_t size;
1415 	int i, err;
1416 
1417 	ring->qid = qid;
1418 	ring->queued = 0;
1419 	ring->cur = 0;
1420 	ring->tail = 0;
1421 
1422 	/* Allocate TX descriptors (256-byte aligned). */
1423 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1424 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1425 	if (err) {
1426 		printf("%s: could not allocate TX ring DMA memory\n",
1427 		    DEVNAME(sc));
1428 		goto fail;
1429 	}
1430 	ring->desc = ring->desc_dma.vaddr;
1431 
1432 	/*
1433 	 * There is no need to allocate DMA buffers for unused rings.
1434 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1435 	 * than we currently need.
1436 	 *
1437 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1438 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1439 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1440 	 * in order to provide one queue per EDCA category.
1441 	 * Tx aggregation requires additional queues, one queue per TID for
1442 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1443 	 *
1444 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1445 	 * and Tx aggregation is not supported.
1446 	 *
1447 	 * Unfortunately, we cannot tell if DQA will be used until the
1448 	 * firmware gets loaded later, so just allocate sufficient rings
1449 	 * in order to satisfy both cases.
1450 	 */
1451 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1452 		return 0;
1453 
1454 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1455 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1456 	if (err) {
1457 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1458 		goto fail;
1459 	}
1460 	ring->cmd = ring->cmd_dma.vaddr;
1461 
1462 	paddr = ring->cmd_dma.paddr;
1463 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1464 		struct iwm_tx_data *data = &ring->data[i];
1465 		size_t mapsize;
1466 
1467 		data->cmd_paddr = paddr;
1468 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1469 		    + offsetof(struct iwm_tx_cmd, scratch);
1470 		paddr += sizeof(struct iwm_device_cmd);
1471 
1472 		/* FW commands may require more mapped space than packets. */
1473 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1474 			mapsize = (sizeof(struct iwm_cmd_header) +
1475 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1476 		else
1477 			mapsize = MCLBYTES;
1478 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1479 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1480 		    &data->map);
1481 		if (err) {
1482 			printf("%s: could not create TX buf DMA map\n",
1483 			    DEVNAME(sc));
1484 			goto fail;
1485 		}
1486 	}
1487 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1488 	return 0;
1489 
1490 fail:	iwm_free_tx_ring(sc, ring);
1491 	return err;
1492 }
1493 
1494 void
1495 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1496 {
1497 	int i;
1498 
1499 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1500 		struct iwm_tx_data *data = &ring->data[i];
1501 
1502 		if (data->m != NULL) {
1503 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1504 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1505 			bus_dmamap_unload(sc->sc_dmat, data->map);
1506 			m_freem(data->m);
1507 			data->m = NULL;
1508 		}
1509 	}
1510 	/* Clear TX descriptors. */
1511 	memset(ring->desc, 0, ring->desc_dma.size);
1512 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1513 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1514 	sc->qfullmsk &= ~(1 << ring->qid);
1515 	sc->qenablemsk &= ~(1 << ring->qid);
1516 	/* 7000 family NICs are locked while commands are in progress. */
1517 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1518 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1519 			iwm_nic_unlock(sc);
1520 	}
1521 	ring->queued = 0;
1522 	ring->cur = 0;
1523 	ring->tail = 0;
1524 }
1525 
1526 void
1527 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1528 {
1529 	int i;
1530 
1531 	iwm_dma_contig_free(&ring->desc_dma);
1532 	iwm_dma_contig_free(&ring->cmd_dma);
1533 
1534 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1535 		struct iwm_tx_data *data = &ring->data[i];
1536 
1537 		if (data->m != NULL) {
1538 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1539 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1540 			bus_dmamap_unload(sc->sc_dmat, data->map);
1541 			m_freem(data->m);
1542 			data->m = NULL;
1543 		}
1544 		if (data->map != NULL)
1545 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1546 	}
1547 }
1548 
1549 void
1550 iwm_enable_rfkill_int(struct iwm_softc *sc)
1551 {
1552 	if (!sc->sc_msix) {
1553 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1554 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1555 	} else {
1556 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1557 		    sc->sc_fh_init_mask);
1558 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1559 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1560 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1561 	}
1562 
1563 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1564 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1565 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1566 }
1567 
1568 int
1569 iwm_check_rfkill(struct iwm_softc *sc)
1570 {
1571 	uint32_t v;
1572 	int rv;
1573 
1574 	/*
1575 	 * "documentation" is not really helpful here:
1576 	 *  27:	HW_RF_KILL_SW
1577 	 *	Indicates state of (platform's) hardware RF-Kill switch
1578 	 *
1579 	 * But apparently when it's off, it's on ...
1580 	 */
1581 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1582 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1583 	if (rv) {
1584 		sc->sc_flags |= IWM_FLAG_RFKILL;
1585 	} else {
1586 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1587 	}
1588 
1589 	return rv;
1590 }
1591 
1592 void
1593 iwm_enable_interrupts(struct iwm_softc *sc)
1594 {
1595 	if (!sc->sc_msix) {
1596 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1597 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1598 	} else {
1599 		/*
1600 		 * fh/hw_mask keeps all the unmasked causes.
1601 		 * Unlike msi, in msix cause is enabled when it is unset.
1602 		 */
1603 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1604 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1605 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1606 		    ~sc->sc_fh_mask);
1607 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1608 		    ~sc->sc_hw_mask);
1609 	}
1610 }
1611 
1612 void
1613 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1614 {
1615 	if (!sc->sc_msix) {
1616 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1617 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1618 	} else {
1619 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1620 		    sc->sc_hw_init_mask);
1621 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1622 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1623 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1624 	}
1625 }
1626 
1627 void
1628 iwm_restore_interrupts(struct iwm_softc *sc)
1629 {
1630 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1631 }
1632 
1633 void
1634 iwm_disable_interrupts(struct iwm_softc *sc)
1635 {
1636 	if (!sc->sc_msix) {
1637 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1638 
1639 		/* acknowledge all interrupts */
1640 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1641 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1642 	} else {
1643 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1644 		    sc->sc_fh_init_mask);
1645 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1646 		    sc->sc_hw_init_mask);
1647 	}
1648 }
1649 
1650 void
1651 iwm_ict_reset(struct iwm_softc *sc)
1652 {
1653 	iwm_disable_interrupts(sc);
1654 
1655 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1656 	sc->ict_cur = 0;
1657 
1658 	/* Set physical address of ICT (4KB aligned). */
1659 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1660 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1661 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1662 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1663 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1664 
1665 	/* Switch to ICT interrupt mode in driver. */
1666 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1667 
1668 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1669 	iwm_enable_interrupts(sc);
1670 }
1671 
1672 #define IWM_HW_READY_TIMEOUT 50
1673 int
1674 iwm_set_hw_ready(struct iwm_softc *sc)
1675 {
1676 	int ready;
1677 
1678 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1679 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1680 
1681 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1682 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1683 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1684 	    IWM_HW_READY_TIMEOUT);
1685 	if (ready)
1686 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1687 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1688 
1689 	return ready;
1690 }
1691 #undef IWM_HW_READY_TIMEOUT
1692 
1693 int
1694 iwm_prepare_card_hw(struct iwm_softc *sc)
1695 {
1696 	int t = 0;
1697 	int ntries;
1698 
1699 	if (iwm_set_hw_ready(sc))
1700 		return 0;
1701 
1702 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1703 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1704 	DELAY(1000);
1705 
1706 	for (ntries = 0; ntries < 10; ntries++) {
1707 		/* If HW is not ready, prepare the conditions to check again */
1708 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1709 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1710 
1711 		do {
1712 			if (iwm_set_hw_ready(sc))
1713 				return 0;
1714 			DELAY(200);
1715 			t += 200;
1716 		} while (t < 150000);
1717 		DELAY(25000);
1718 	}
1719 
1720 	return ETIMEDOUT;
1721 }
1722 
1723 void
1724 iwm_apm_config(struct iwm_softc *sc)
1725 {
1726 	pcireg_t lctl, cap;
1727 
1728 	/*
1729 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1730 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1731 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1732 	 *    costs negligible amount of power savings.
1733 	 * If not (unlikely), enable L0S, so there is at least some
1734 	 *    power savings, even without L1.
1735 	 */
1736 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1737 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1738 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1739 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1740 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1741 	} else {
1742 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1743 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1744 	}
1745 
1746 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1747 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1748 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1749 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1750 	    DEVNAME(sc),
1751 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1752 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1753 }
1754 
1755 /*
1756  * Start up NIC's basic functionality after it has been reset
1757  * e.g. after platform boot or shutdown.
1758  * NOTE:  This does not load uCode nor start the embedded processor
1759  */
1760 int
1761 iwm_apm_init(struct iwm_softc *sc)
1762 {
1763 	int err = 0;
1764 
1765 	/* Disable L0S exit timer (platform NMI workaround) */
1766 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1767 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1768 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1769 
1770 	/*
1771 	 * Disable L0s without affecting L1;
1772 	 *  don't wait for ICH L0s (ICH bug W/A)
1773 	 */
1774 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1775 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1776 
1777 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1778 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1779 
1780 	/*
1781 	 * Enable HAP INTA (interrupt from management bus) to
1782 	 * wake device's PCI Express link L1a -> L0s
1783 	 */
1784 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1785 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1786 
1787 	iwm_apm_config(sc);
1788 
1789 #if 0 /* not for 7k/8k */
1790 	/* Configure analog phase-lock-loop before activating to D0A */
1791 	if (trans->cfg->base_params->pll_cfg_val)
1792 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1793 		    trans->cfg->base_params->pll_cfg_val);
1794 #endif
1795 
1796 	/*
1797 	 * Set "initialization complete" bit to move adapter from
1798 	 * D0U* --> D0A* (powered-up active) state.
1799 	 */
1800 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1801 
1802 	/*
1803 	 * Wait for clock stabilization; once stabilized, access to
1804 	 * device-internal resources is supported, e.g. iwm_write_prph()
1805 	 * and accesses to uCode SRAM.
1806 	 */
1807 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1808 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1809 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1810 		printf("%s: timeout waiting for clock stabilization\n",
1811 		    DEVNAME(sc));
1812 		err = ETIMEDOUT;
1813 		goto out;
1814 	}
1815 
1816 	if (sc->host_interrupt_operation_mode) {
1817 		/*
1818 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1819 		 * only check host_interrupt_operation_mode even if this is
1820 		 * not related to host_interrupt_operation_mode.
1821 		 *
1822 		 * Enable the oscillator to count wake up time for L1 exit. This
1823 		 * consumes slightly more power (100uA) - but allows to be sure
1824 		 * that we wake up from L1 on time.
1825 		 *
1826 		 * This looks weird: read twice the same register, discard the
1827 		 * value, set a bit, and yet again, read that same register
1828 		 * just to discard the value. But that's the way the hardware
1829 		 * seems to like it.
1830 		 */
1831 		if (iwm_nic_lock(sc)) {
1832 			iwm_read_prph(sc, IWM_OSC_CLK);
1833 			iwm_read_prph(sc, IWM_OSC_CLK);
1834 			iwm_nic_unlock(sc);
1835 		}
1836 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1837 		    IWM_OSC_CLK_FORCE_CONTROL);
1838 		if (err)
1839 			goto out;
1840 		if (iwm_nic_lock(sc)) {
1841 			iwm_read_prph(sc, IWM_OSC_CLK);
1842 			iwm_read_prph(sc, IWM_OSC_CLK);
1843 			iwm_nic_unlock(sc);
1844 		}
1845 	}
1846 
1847 	/*
1848 	 * Enable DMA clock and wait for it to stabilize.
1849 	 *
1850 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1851 	 * do not disable clocks.  This preserves any hardware bits already
1852 	 * set by default in "CLK_CTRL_REG" after reset.
1853 	 */
1854 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1855 		if (iwm_nic_lock(sc)) {
1856 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1857 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1858 			iwm_nic_unlock(sc);
1859 		}
1860 		DELAY(20);
1861 
1862 		/* Disable L1-Active */
1863 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1864 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1865 		if (err)
1866 			goto out;
1867 
1868 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1869 		if (iwm_nic_lock(sc)) {
1870 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1871 			    IWM_APMG_RTC_INT_STT_RFKILL);
1872 			iwm_nic_unlock(sc);
1873 		}
1874 	}
1875  out:
1876 	if (err)
1877 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1878 	return err;
1879 }
1880 
1881 void
1882 iwm_apm_stop(struct iwm_softc *sc)
1883 {
1884 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1885 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1886 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1887 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1888 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1889 	DELAY(1000);
1890 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1891 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1892 	DELAY(5000);
1893 
1894 	/* stop device's busmaster DMA activity */
1895 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1896 
1897 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1898 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1899 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1900 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1901 
1902 	/*
1903 	 * Clear "initialization complete" bit to move adapter from
1904 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1905 	 */
1906 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1907 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1908 }
1909 
1910 void
1911 iwm_init_msix_hw(struct iwm_softc *sc)
1912 {
1913 	iwm_conf_msix_hw(sc, 0);
1914 
1915 	if (!sc->sc_msix)
1916 		return;
1917 
1918 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1919 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1920 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1921 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1922 }
1923 
1924 void
1925 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1926 {
1927 	int vector = 0;
1928 
1929 	if (!sc->sc_msix) {
1930 		/* Newer chips default to MSIX. */
1931 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1932 			iwm_write_prph(sc, IWM_UREG_CHICK,
1933 			    IWM_UREG_CHICK_MSI_ENABLE);
1934 			iwm_nic_unlock(sc);
1935 		}
1936 		return;
1937 	}
1938 
1939 	if (!stopped && iwm_nic_lock(sc)) {
1940 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1941 		iwm_nic_unlock(sc);
1942 	}
1943 
1944 	/* Disable all interrupts */
1945 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1946 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1947 
1948 	/* Map fallback-queue (command/mgmt) to a single vector */
1949 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1950 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1951 	/* Map RSS queue (data) to the same vector */
1952 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1953 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1954 
1955 	/* Enable the RX queues cause interrupts */
1956 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1957 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1958 
1959 	/* Map non-RX causes to the same vector */
1960 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1961 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1962 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1963 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1964 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1965 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1966 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1967 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1968 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1969 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1970 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1971 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1972 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1973 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1974 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1975 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1976 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1977 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1978 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1979 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1980 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1981 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1982 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1983 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1984 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1985 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1986 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1987 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1988 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1989 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1990 
1991 	/* Enable non-RX causes interrupts */
1992 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1993 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1994 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1995 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1996 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1997 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1998 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1999 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2000 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
2001 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2002 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2003 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2004 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2005 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
2006 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
2007 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2008 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
2009 }
2010 
2011 int
2012 iwm_clear_persistence_bit(struct iwm_softc *sc)
2013 {
2014 	uint32_t hpm, wprot;
2015 
2016 	hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2017 	if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
2018 		wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2019 		if (wprot & IWM_PREG_WFPM_ACCESS) {
2020 			printf("%s: cannot clear persistence bit\n",
2021 			    DEVNAME(sc));
2022 			return EPERM;
2023 		}
2024 		iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2025 		    hpm & ~IWM_HPM_PERSISTENCE_BIT);
2026 	}
2027 
2028 	return 0;
2029 }
2030 
2031 int
2032 iwm_start_hw(struct iwm_softc *sc)
2033 {
2034 	int err;
2035 
2036 	err = iwm_prepare_card_hw(sc);
2037 	if (err)
2038 		return err;
2039 
2040 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2041 		err = iwm_clear_persistence_bit(sc);
2042 		if (err)
2043 			return err;
2044 	}
2045 
2046 	/* Reset the entire device */
2047 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2048 	DELAY(5000);
2049 
2050 	err = iwm_apm_init(sc);
2051 	if (err)
2052 		return err;
2053 
2054 	iwm_init_msix_hw(sc);
2055 
2056 	iwm_enable_rfkill_int(sc);
2057 	iwm_check_rfkill(sc);
2058 
2059 	return 0;
2060 }
2061 
2062 
2063 void
2064 iwm_stop_device(struct iwm_softc *sc)
2065 {
2066 	int chnl, ntries;
2067 	int qid;
2068 
2069 	iwm_disable_interrupts(sc);
2070 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2071 
2072 	/* Stop all DMA channels. */
2073 	if (iwm_nic_lock(sc)) {
2074 		/* Deactivate TX scheduler. */
2075 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2076 
2077 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2078 			IWM_WRITE(sc,
2079 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2080 			for (ntries = 0; ntries < 200; ntries++) {
2081 				uint32_t r;
2082 
2083 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2084 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
2085 				    chnl))
2086 					break;
2087 				DELAY(20);
2088 			}
2089 		}
2090 		iwm_nic_unlock(sc);
2091 	}
2092 	iwm_disable_rx_dma(sc);
2093 
2094 	iwm_reset_rx_ring(sc, &sc->rxq);
2095 
2096 	for (qid = 0; qid < nitems(sc->txq); qid++)
2097 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
2098 
2099 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2100 		if (iwm_nic_lock(sc)) {
2101 			/* Power-down device's busmaster DMA clocks */
2102 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2103 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
2104 			iwm_nic_unlock(sc);
2105 		}
2106 		DELAY(5);
2107 	}
2108 
2109 	/* Make sure (redundant) we've released our request to stay awake */
2110 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2111 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2112 	if (sc->sc_nic_locks > 0)
2113 		printf("%s: %d active NIC locks forcefully cleared\n",
2114 		    DEVNAME(sc), sc->sc_nic_locks);
2115 	sc->sc_nic_locks = 0;
2116 
2117 	/* Stop the device, and put it in low power state */
2118 	iwm_apm_stop(sc);
2119 
2120 	/* Reset the on-board processor. */
2121 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2122 	DELAY(5000);
2123 
2124 	/*
2125 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2126 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2127 	 * that enables radio won't fire on the correct irq, and the
2128 	 * driver won't be able to handle the interrupt.
2129 	 * Configure the IVAR table again after reset.
2130 	 */
2131 	iwm_conf_msix_hw(sc, 1);
2132 
2133 	/*
2134 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2135 	 * Clear the interrupt again.
2136 	 */
2137 	iwm_disable_interrupts(sc);
2138 
2139 	/* Even though we stop the HW we still want the RF kill interrupt. */
2140 	iwm_enable_rfkill_int(sc);
2141 	iwm_check_rfkill(sc);
2142 
2143 	iwm_prepare_card_hw(sc);
2144 }
2145 
2146 void
2147 iwm_nic_config(struct iwm_softc *sc)
2148 {
2149 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2150 	uint32_t mask, val, reg_val = 0;
2151 
2152 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2153 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2154 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2155 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2156 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2157 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2158 
2159 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2160 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2161 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2162 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2163 
2164 	/* radio configuration */
2165 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2166 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2167 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2168 
2169 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2170 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2171 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2172 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2173 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2174 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2175 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2176 
2177 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2178 	val &= ~mask;
2179 	val |= reg_val;
2180 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2181 
2182 	/*
2183 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2184 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2185 	 * to lose ownership and not being able to obtain it back.
2186 	 */
2187 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2188 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2189 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2190 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2191 }
2192 
2193 int
2194 iwm_nic_rx_init(struct iwm_softc *sc)
2195 {
2196 	if (sc->sc_mqrx_supported)
2197 		return iwm_nic_rx_mq_init(sc);
2198 	else
2199 		return iwm_nic_rx_legacy_init(sc);
2200 }
2201 
2202 int
2203 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2204 {
2205 	int enabled;
2206 
2207 	if (!iwm_nic_lock(sc))
2208 		return EBUSY;
2209 
2210 	/* Stop RX DMA. */
2211 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2212 	/* Disable RX used and free queue operation. */
2213 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2214 
2215 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2216 	    sc->rxq.free_desc_dma.paddr);
2217 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2218 	    sc->rxq.used_desc_dma.paddr);
2219 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2220 	    sc->rxq.stat_dma.paddr);
2221 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2222 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2223 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2224 
2225 	/* We configure only queue 0 for now. */
2226 	enabled = ((1 << 0) << 16) | (1 << 0);
2227 
2228 	/* Enable RX DMA, 4KB buffer size. */
2229 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2230 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2231 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2232 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2233 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2234 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2235 
2236 	/* Enable RX DMA snooping. */
2237 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2238 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2239 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2240 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2241 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2242 
2243 	/* Enable the configured queue(s). */
2244 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2245 
2246 	iwm_nic_unlock(sc);
2247 
2248 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2249 
2250 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2251 
2252 	return 0;
2253 }
2254 
2255 int
2256 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2257 {
2258 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2259 
2260 	iwm_disable_rx_dma(sc);
2261 
2262 	if (!iwm_nic_lock(sc))
2263 		return EBUSY;
2264 
2265 	/* reset and flush pointers */
2266 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2267 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2268 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2269 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2270 
2271 	/* Set physical address of RX ring (256-byte aligned). */
2272 	IWM_WRITE(sc,
2273 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2274 
2275 	/* Set physical address of RX status (16-byte aligned). */
2276 	IWM_WRITE(sc,
2277 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2278 
2279 	/* Enable RX. */
2280 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2281 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2282 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2283 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2284 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2285 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2286 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2287 
2288 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2289 
2290 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2291 	if (sc->host_interrupt_operation_mode)
2292 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2293 
2294 	iwm_nic_unlock(sc);
2295 
2296 	/*
2297 	 * This value should initially be 0 (before preparing any RBs),
2298 	 * and should be 8 after preparing the first 8 RBs (for example).
2299 	 */
2300 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2301 
2302 	return 0;
2303 }
2304 
2305 int
2306 iwm_nic_tx_init(struct iwm_softc *sc)
2307 {
2308 	int qid, err;
2309 
2310 	if (!iwm_nic_lock(sc))
2311 		return EBUSY;
2312 
2313 	/* Deactivate TX scheduler. */
2314 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2315 
2316 	/* Set physical address of "keep warm" page (16-byte aligned). */
2317 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2318 
2319 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2320 		struct iwm_tx_ring *txq = &sc->txq[qid];
2321 
2322 		/* Set physical address of TX ring (256-byte aligned). */
2323 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2324 		    txq->desc_dma.paddr >> 8);
2325 	}
2326 
2327 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2328 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2329 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2330 
2331 	iwm_nic_unlock(sc);
2332 
2333 	return err;
2334 }
2335 
2336 int
2337 iwm_nic_init(struct iwm_softc *sc)
2338 {
2339 	int err;
2340 
2341 	iwm_apm_init(sc);
2342 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2343 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2344 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2345 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2346 
2347 	iwm_nic_config(sc);
2348 
2349 	err = iwm_nic_rx_init(sc);
2350 	if (err)
2351 		return err;
2352 
2353 	err = iwm_nic_tx_init(sc);
2354 	if (err)
2355 		return err;
2356 
2357 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2358 
2359 	return 0;
2360 }
2361 
2362 /* Map a TID to an ieee80211_edca_ac category. */
2363 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2364 	EDCA_AC_BE,
2365 	EDCA_AC_BK,
2366 	EDCA_AC_BK,
2367 	EDCA_AC_BE,
2368 	EDCA_AC_VI,
2369 	EDCA_AC_VI,
2370 	EDCA_AC_VO,
2371 	EDCA_AC_VO,
2372 };
2373 
2374 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2375 const uint8_t iwm_ac_to_tx_fifo[] = {
2376 	IWM_TX_FIFO_BE,
2377 	IWM_TX_FIFO_BK,
2378 	IWM_TX_FIFO_VI,
2379 	IWM_TX_FIFO_VO,
2380 };
2381 
2382 int
2383 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2384 {
2385 	int err;
2386 	iwm_nic_assert_locked(sc);
2387 
2388 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2389 
2390 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2391 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2392 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2393 
2394 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2395 	if (err) {
2396 		return err;
2397 	}
2398 
2399 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2400 
2401 	iwm_write_mem32(sc,
2402 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2403 
2404 	/* Set scheduler window size and frame limit. */
2405 	iwm_write_mem32(sc,
2406 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2407 	    sizeof(uint32_t),
2408 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2409 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2410 	    ((IWM_FRAME_LIMIT
2411 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2412 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2413 
2414 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2415 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2416 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2417 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2418 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2419 
2420 	if (qid == sc->cmdqid)
2421 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2422 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2423 
2424 	return 0;
2425 }
2426 
2427 int
2428 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2429     int aggregate, uint8_t tid, uint16_t ssn)
2430 {
2431 	struct iwm_tx_ring *ring = &sc->txq[qid];
2432 	struct iwm_scd_txq_cfg_cmd cmd;
2433 	int err, idx, scd_bug;
2434 
2435 	iwm_nic_assert_locked(sc);
2436 
2437 	/*
2438 	 * If we need to move the SCD write pointer by steps of
2439 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2440 	 * This is really ugly, but this is the easiest way out for
2441 	 * this sad hardware issue.
2442 	 * This bug has been fixed on devices 9000 and up.
2443 	 */
2444 	scd_bug = !sc->sc_mqrx_supported &&
2445 		!((ssn - ring->cur) & 0x3f) &&
2446 		(ssn != ring->cur);
2447 	if (scd_bug)
2448 		ssn = (ssn + 1) & 0xfff;
2449 
2450 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2451 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2452 	ring->cur = idx;
2453 	ring->tail = idx;
2454 
2455 	memset(&cmd, 0, sizeof(cmd));
2456 	cmd.tid = tid;
2457 	cmd.scd_queue = qid;
2458 	cmd.enable = 1;
2459 	cmd.sta_id = sta_id;
2460 	cmd.tx_fifo = fifo;
2461 	cmd.aggregate = aggregate;
2462 	cmd.ssn = htole16(ssn);
2463 	cmd.window = IWM_FRAME_LIMIT;
2464 
2465 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2466 	    sizeof(cmd), &cmd);
2467 	if (err)
2468 		return err;
2469 
2470 	sc->qenablemsk |= (1 << qid);
2471 	return 0;
2472 }
2473 
2474 int
2475 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2476 {
2477 	struct iwm_scd_txq_cfg_cmd cmd;
2478 	int err;
2479 
2480 	memset(&cmd, 0, sizeof(cmd));
2481 	cmd.tid = tid;
2482 	cmd.scd_queue = qid;
2483 	cmd.enable = 0;
2484 	cmd.sta_id = sta_id;
2485 
2486 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2487 	if (err)
2488 		return err;
2489 
2490 	sc->qenablemsk &= ~(1 << qid);
2491 	return 0;
2492 }
2493 
2494 int
2495 iwm_post_alive(struct iwm_softc *sc)
2496 {
2497 	int nwords;
2498 	int err, chnl;
2499 	uint32_t base;
2500 
2501 	if (!iwm_nic_lock(sc))
2502 		return EBUSY;
2503 
2504 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2505 
2506 	iwm_ict_reset(sc);
2507 
2508 	iwm_nic_unlock(sc);
2509 
2510 	/* Clear TX scheduler state in SRAM. */
2511 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2512 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2513 	    / sizeof(uint32_t);
2514 	err = iwm_write_mem(sc,
2515 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2516 	    NULL, nwords);
2517 	if (err)
2518 		return err;
2519 
2520 	if (!iwm_nic_lock(sc))
2521 		return EBUSY;
2522 
2523 	/* Set physical address of TX scheduler rings (1KB aligned). */
2524 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2525 
2526 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2527 
2528 	/* enable command channel */
2529 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2530 	if (err) {
2531 		iwm_nic_unlock(sc);
2532 		return err;
2533 	}
2534 
2535 	/* Activate TX scheduler. */
2536 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2537 
2538 	/* Enable DMA channels. */
2539 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2540 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2541 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2542 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2543 	}
2544 
2545 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2546 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2547 
2548 	iwm_nic_unlock(sc);
2549 
2550 	/* Enable L1-Active */
2551 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2552 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2553 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2554 	}
2555 
2556 	return err;
2557 }
2558 
2559 struct iwm_phy_db_entry *
2560 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2561 {
2562 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2563 
2564 	if (type >= IWM_PHY_DB_MAX)
2565 		return NULL;
2566 
2567 	switch (type) {
2568 	case IWM_PHY_DB_CFG:
2569 		return &phy_db->cfg;
2570 	case IWM_PHY_DB_CALIB_NCH:
2571 		return &phy_db->calib_nch;
2572 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2573 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2574 			return NULL;
2575 		return &phy_db->calib_ch_group_papd[chg_id];
2576 	case IWM_PHY_DB_CALIB_CHG_TXP:
2577 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2578 			return NULL;
2579 		return &phy_db->calib_ch_group_txp[chg_id];
2580 	default:
2581 		return NULL;
2582 	}
2583 	return NULL;
2584 }
2585 
2586 int
2587 iwm_phy_db_set_section(struct iwm_softc *sc,
2588     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2589 {
2590 	uint16_t type = le16toh(phy_db_notif->type);
2591 	uint16_t size  = le16toh(phy_db_notif->length);
2592 	struct iwm_phy_db_entry *entry;
2593 	uint16_t chg_id = 0;
2594 
2595 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2596 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2597 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2598 
2599 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2600 	if (!entry)
2601 		return EINVAL;
2602 
2603 	if (entry->data)
2604 		free(entry->data, M_DEVBUF, entry->size);
2605 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2606 	if (!entry->data) {
2607 		entry->size = 0;
2608 		return ENOMEM;
2609 	}
2610 	memcpy(entry->data, phy_db_notif->data, size);
2611 	entry->size = size;
2612 
2613 	return 0;
2614 }
2615 
2616 int
2617 iwm_is_valid_channel(uint16_t ch_id)
2618 {
2619 	if (ch_id <= 14 ||
2620 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2621 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2622 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2623 		return 1;
2624 	return 0;
2625 }
2626 
2627 uint8_t
2628 iwm_ch_id_to_ch_index(uint16_t ch_id)
2629 {
2630 	if (!iwm_is_valid_channel(ch_id))
2631 		return 0xff;
2632 
2633 	if (ch_id <= 14)
2634 		return ch_id - 1;
2635 	if (ch_id <= 64)
2636 		return (ch_id + 20) / 4;
2637 	if (ch_id <= 140)
2638 		return (ch_id - 12) / 4;
2639 	return (ch_id - 13) / 4;
2640 }
2641 
2642 
2643 uint16_t
2644 iwm_channel_id_to_papd(uint16_t ch_id)
2645 {
2646 	if (!iwm_is_valid_channel(ch_id))
2647 		return 0xff;
2648 
2649 	if (1 <= ch_id && ch_id <= 14)
2650 		return 0;
2651 	if (36 <= ch_id && ch_id <= 64)
2652 		return 1;
2653 	if (100 <= ch_id && ch_id <= 140)
2654 		return 2;
2655 	return 3;
2656 }
2657 
2658 uint16_t
2659 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2660 {
2661 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2662 	struct iwm_phy_db_chg_txp *txp_chg;
2663 	int i;
2664 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2665 
2666 	if (ch_index == 0xff)
2667 		return 0xff;
2668 
2669 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2670 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2671 		if (!txp_chg)
2672 			return 0xff;
2673 		/*
2674 		 * Looking for the first channel group the max channel
2675 		 * of which is higher than the requested channel.
2676 		 */
2677 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2678 			return i;
2679 	}
2680 	return 0xff;
2681 }
2682 
2683 int
2684 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2685     uint16_t *size, uint16_t ch_id)
2686 {
2687 	struct iwm_phy_db_entry *entry;
2688 	uint16_t ch_group_id = 0;
2689 
2690 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2691 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2692 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2693 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2694 
2695 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2696 	if (!entry)
2697 		return EINVAL;
2698 
2699 	*data = entry->data;
2700 	*size = entry->size;
2701 
2702 	return 0;
2703 }
2704 
2705 int
2706 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2707     void *data)
2708 {
2709 	struct iwm_phy_db_cmd phy_db_cmd;
2710 	struct iwm_host_cmd cmd = {
2711 		.id = IWM_PHY_DB_CMD,
2712 		.flags = IWM_CMD_ASYNC,
2713 	};
2714 
2715 	phy_db_cmd.type = le16toh(type);
2716 	phy_db_cmd.length = le16toh(length);
2717 
2718 	cmd.data[0] = &phy_db_cmd;
2719 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2720 	cmd.data[1] = data;
2721 	cmd.len[1] = length;
2722 
2723 	return iwm_send_cmd(sc, &cmd);
2724 }
2725 
2726 int
2727 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2728     uint8_t max_ch_groups)
2729 {
2730 	uint16_t i;
2731 	int err;
2732 	struct iwm_phy_db_entry *entry;
2733 
2734 	for (i = 0; i < max_ch_groups; i++) {
2735 		entry = iwm_phy_db_get_section(sc, type, i);
2736 		if (!entry)
2737 			return EINVAL;
2738 
2739 		if (!entry->size)
2740 			continue;
2741 
2742 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2743 		if (err)
2744 			return err;
2745 
2746 		DELAY(1000);
2747 	}
2748 
2749 	return 0;
2750 }
2751 
2752 int
2753 iwm_send_phy_db_data(struct iwm_softc *sc)
2754 {
2755 	uint8_t *data = NULL;
2756 	uint16_t size = 0;
2757 	int err;
2758 
2759 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2760 	if (err)
2761 		return err;
2762 
2763 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2764 	if (err)
2765 		return err;
2766 
2767 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2768 	    &data, &size, 0);
2769 	if (err)
2770 		return err;
2771 
2772 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2773 	if (err)
2774 		return err;
2775 
2776 	err = iwm_phy_db_send_all_channel_groups(sc,
2777 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2778 	if (err)
2779 		return err;
2780 
2781 	err = iwm_phy_db_send_all_channel_groups(sc,
2782 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2783 	if (err)
2784 		return err;
2785 
2786 	return 0;
2787 }
2788 
2789 /*
2790  * For the high priority TE use a time event type that has similar priority to
2791  * the FW's action scan priority.
2792  */
2793 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2794 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2795 
2796 int
2797 iwm_send_time_event_cmd(struct iwm_softc *sc,
2798     const struct iwm_time_event_cmd *cmd)
2799 {
2800 	struct iwm_rx_packet *pkt;
2801 	struct iwm_time_event_resp *resp;
2802 	struct iwm_host_cmd hcmd = {
2803 		.id = IWM_TIME_EVENT_CMD,
2804 		.flags = IWM_CMD_WANT_RESP,
2805 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2806 	};
2807 	uint32_t resp_len;
2808 	int err;
2809 
2810 	hcmd.data[0] = cmd;
2811 	hcmd.len[0] = sizeof(*cmd);
2812 	err = iwm_send_cmd(sc, &hcmd);
2813 	if (err)
2814 		return err;
2815 
2816 	pkt = hcmd.resp_pkt;
2817 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2818 		err = EIO;
2819 		goto out;
2820 	}
2821 
2822 	resp_len = iwm_rx_packet_payload_len(pkt);
2823 	if (resp_len != sizeof(*resp)) {
2824 		err = EIO;
2825 		goto out;
2826 	}
2827 
2828 	resp = (void *)pkt->data;
2829 	if (le32toh(resp->status) == 0)
2830 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2831 	else
2832 		err = EIO;
2833 out:
2834 	iwm_free_resp(sc, &hcmd);
2835 	return err;
2836 }
2837 
2838 void
2839 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2840     uint32_t duration, uint32_t max_delay)
2841 {
2842 	struct iwm_time_event_cmd time_cmd;
2843 
2844 	/* Do nothing if a time event is already scheduled. */
2845 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2846 		return;
2847 
2848 	memset(&time_cmd, 0, sizeof(time_cmd));
2849 
2850 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2851 	time_cmd.id_and_color =
2852 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2853 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2854 
2855 	time_cmd.apply_time = htole32(0);
2856 
2857 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2858 	time_cmd.max_delay = htole32(max_delay);
2859 	/* TODO: why do we need to interval = bi if it is not periodic? */
2860 	time_cmd.interval = htole32(1);
2861 	time_cmd.duration = htole32(duration);
2862 	time_cmd.repeat = 1;
2863 	time_cmd.policy
2864 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2865 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2866 		IWM_T2_V2_START_IMMEDIATELY);
2867 
2868 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2869 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2870 
2871 	DELAY(100);
2872 }
2873 
2874 void
2875 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2876 {
2877 	struct iwm_time_event_cmd time_cmd;
2878 
2879 	/* Do nothing if the time event has already ended. */
2880 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2881 		return;
2882 
2883 	memset(&time_cmd, 0, sizeof(time_cmd));
2884 
2885 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2886 	time_cmd.id_and_color =
2887 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2888 	time_cmd.id = htole32(sc->sc_time_event_uid);
2889 
2890 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2891 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2892 
2893 	DELAY(100);
2894 }
2895 
2896 /*
2897  * NVM read access and content parsing.  We do not support
2898  * external NVM or writing NVM.
2899  */
2900 
2901 /* list of NVM sections we are allowed/need to read */
2902 const int iwm_nvm_to_read[] = {
2903 	IWM_NVM_SECTION_TYPE_HW,
2904 	IWM_NVM_SECTION_TYPE_SW,
2905 	IWM_NVM_SECTION_TYPE_REGULATORY,
2906 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2907 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2908 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2909 	IWM_NVM_SECTION_TYPE_HW_8000,
2910 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2911 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2912 };
2913 
2914 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2915 
2916 #define IWM_NVM_WRITE_OPCODE 1
2917 #define IWM_NVM_READ_OPCODE 0
2918 
2919 int
2920 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2921     uint16_t length, uint8_t *data, uint16_t *len)
2922 {
2923 	offset = 0;
2924 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2925 		.offset = htole16(offset),
2926 		.length = htole16(length),
2927 		.type = htole16(section),
2928 		.op_code = IWM_NVM_READ_OPCODE,
2929 	};
2930 	struct iwm_nvm_access_resp *nvm_resp;
2931 	struct iwm_rx_packet *pkt;
2932 	struct iwm_host_cmd cmd = {
2933 		.id = IWM_NVM_ACCESS_CMD,
2934 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2935 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2936 		.data = { &nvm_access_cmd, },
2937 	};
2938 	int err, offset_read;
2939 	size_t bytes_read;
2940 	uint8_t *resp_data;
2941 
2942 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2943 
2944 	err = iwm_send_cmd(sc, &cmd);
2945 	if (err)
2946 		return err;
2947 
2948 	pkt = cmd.resp_pkt;
2949 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2950 		err = EIO;
2951 		goto exit;
2952 	}
2953 
2954 	/* Extract NVM response */
2955 	nvm_resp = (void *)pkt->data;
2956 	if (nvm_resp == NULL)
2957 		return EIO;
2958 
2959 	err = le16toh(nvm_resp->status);
2960 	bytes_read = le16toh(nvm_resp->length);
2961 	offset_read = le16toh(nvm_resp->offset);
2962 	resp_data = nvm_resp->data;
2963 	if (err) {
2964 		err = EINVAL;
2965 		goto exit;
2966 	}
2967 
2968 	if (offset_read != offset) {
2969 		err = EINVAL;
2970 		goto exit;
2971 	}
2972 
2973 	if (bytes_read > length) {
2974 		err = EINVAL;
2975 		goto exit;
2976 	}
2977 
2978 	memcpy(data + offset, resp_data, bytes_read);
2979 	*len = bytes_read;
2980 
2981  exit:
2982 	iwm_free_resp(sc, &cmd);
2983 	return err;
2984 }
2985 
2986 /*
2987  * Reads an NVM section completely.
2988  * NICs prior to 7000 family doesn't have a real NVM, but just read
2989  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2990  * by uCode, we need to manually check in this case that we don't
2991  * overflow and try to read more than the EEPROM size.
2992  */
2993 int
2994 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2995     uint16_t *len, size_t max_len)
2996 {
2997 	uint16_t chunklen, seglen;
2998 	int err = 0;
2999 
3000 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
3001 	*len = 0;
3002 
3003 	/* Read NVM chunks until exhausted (reading less than requested) */
3004 	while (seglen == chunklen && *len < max_len) {
3005 		err = iwm_nvm_read_chunk(sc,
3006 		    section, *len, chunklen, data, &seglen);
3007 		if (err)
3008 			return err;
3009 
3010 		*len += seglen;
3011 	}
3012 
3013 	return err;
3014 }
3015 
3016 uint8_t
3017 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3018 {
3019 	uint8_t tx_ant;
3020 
3021 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3022 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
3023 
3024 	if (sc->sc_nvm.valid_tx_ant)
3025 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3026 
3027 	return tx_ant;
3028 }
3029 
3030 uint8_t
3031 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3032 {
3033 	uint8_t rx_ant;
3034 
3035 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3036 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
3037 
3038 	if (sc->sc_nvm.valid_rx_ant)
3039 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3040 
3041 	return rx_ant;
3042 }
3043 
3044 int
3045 iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc)
3046 {
3047 	uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc);
3048 
3049 	/*
3050 	 * According to the Linux driver, antenna B should be preferred
3051 	 * on 9k devices since it is not shared with bluetooth. However,
3052 	 * there are 9k devices which do not support antenna B at all.
3053 	 */
3054 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
3055 	    (valid_tx_ant & IWM_ANT_B))
3056 		return IWM_RATE_MCS_ANT_B_MSK;
3057 
3058 	return IWM_RATE_MCS_ANT_A_MSK;
3059 }
3060 
3061 void
3062 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3063     const uint8_t *nvm_channels, int nchan)
3064 {
3065 	struct ieee80211com *ic = &sc->sc_ic;
3066 	struct iwm_nvm_data *data = &sc->sc_nvm;
3067 	int ch_idx;
3068 	struct ieee80211_channel *channel;
3069 	uint16_t ch_flags;
3070 	int is_5ghz;
3071 	int flags, hw_value;
3072 
3073 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3074 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
3075 
3076 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
3077 		    !data->sku_cap_band_52GHz_enable)
3078 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
3079 
3080 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
3081 			continue;
3082 
3083 		hw_value = nvm_channels[ch_idx];
3084 		channel = &ic->ic_channels[hw_value];
3085 
3086 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
3087 		if (!is_5ghz) {
3088 			flags = IEEE80211_CHAN_2GHZ;
3089 			channel->ic_flags
3090 			    = IEEE80211_CHAN_CCK
3091 			    | IEEE80211_CHAN_OFDM
3092 			    | IEEE80211_CHAN_DYN
3093 			    | IEEE80211_CHAN_2GHZ;
3094 		} else {
3095 			flags = IEEE80211_CHAN_5GHZ;
3096 			channel->ic_flags =
3097 			    IEEE80211_CHAN_A;
3098 		}
3099 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3100 
3101 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
3102 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3103 
3104 		if (data->sku_cap_11n_enable) {
3105 			channel->ic_flags |= IEEE80211_CHAN_HT;
3106 			if (ch_flags & IWM_NVM_CHANNEL_40MHZ)
3107 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3108 		}
3109 
3110 		if (is_5ghz && data->sku_cap_11ac_enable) {
3111 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3112 			if (ch_flags & IWM_NVM_CHANNEL_80MHZ)
3113 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3114 		}
3115 	}
3116 }
3117 
3118 int
3119 iwm_mimo_enabled(struct iwm_softc *sc)
3120 {
3121 	struct ieee80211com *ic = &sc->sc_ic;
3122 
3123 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3124 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3125 }
3126 
3127 void
3128 iwm_setup_ht_rates(struct iwm_softc *sc)
3129 {
3130 	struct ieee80211com *ic = &sc->sc_ic;
3131 	uint8_t rx_ant;
3132 
3133 	/* TX is supported with the same MCS as RX. */
3134 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3135 
3136 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3137 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3138 
3139 	if (!iwm_mimo_enabled(sc))
3140 		return;
3141 
3142 	rx_ant = iwm_fw_valid_rx_ant(sc);
3143 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3144 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
3145 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3146 }
3147 
3148 void
3149 iwm_setup_vht_rates(struct iwm_softc *sc)
3150 {
3151 	struct ieee80211com *ic = &sc->sc_ic;
3152 	uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3153 	int n;
3154 
3155 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3156 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3157 
3158 	if (iwm_mimo_enabled(sc) &&
3159 	    ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3160 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)) {
3161 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3162 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3163 	} else {
3164 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3165 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3166 	}
3167 
3168 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3169 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3170 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3171 	}
3172 
3173 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3174 }
3175 
3176 void
3177 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3178     uint16_t ssn, uint16_t buf_size)
3179 {
3180 	reorder_buf->head_sn = ssn;
3181 	reorder_buf->num_stored = 0;
3182 	reorder_buf->buf_size = buf_size;
3183 	reorder_buf->last_amsdu = 0;
3184 	reorder_buf->last_sub_index = 0;
3185 	reorder_buf->removed = 0;
3186 	reorder_buf->valid = 0;
3187 	reorder_buf->consec_oldsn_drops = 0;
3188 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3189 	reorder_buf->consec_oldsn_prev_drop = 0;
3190 }
3191 
3192 void
3193 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3194 {
3195 	int i;
3196 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3197 	struct iwm_reorder_buf_entry *entry;
3198 
3199 	for (i = 0; i < reorder_buf->buf_size; i++) {
3200 		entry = &rxba->entries[i];
3201 		ml_purge(&entry->frames);
3202 		timerclear(&entry->reorder_time);
3203 	}
3204 
3205 	reorder_buf->removed = 1;
3206 	timeout_del(&reorder_buf->reorder_timer);
3207 	timerclear(&rxba->last_rx);
3208 	timeout_del(&rxba->session_timer);
3209 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3210 }
3211 
3212 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3213 
3214 void
3215 iwm_rx_ba_session_expired(void *arg)
3216 {
3217 	struct iwm_rxba_data *rxba = arg;
3218 	struct iwm_softc *sc = rxba->sc;
3219 	struct ieee80211com *ic = &sc->sc_ic;
3220 	struct ieee80211_node *ni = ic->ic_bss;
3221 	struct timeval now, timeout, expiry;
3222 	int s;
3223 
3224 	s = splnet();
3225 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3226 	    ic->ic_state == IEEE80211_S_RUN &&
3227 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3228 		getmicrouptime(&now);
3229 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3230 		timeradd(&rxba->last_rx, &timeout, &expiry);
3231 		if (timercmp(&now, &expiry, <)) {
3232 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3233 		} else {
3234 			ic->ic_stats.is_ht_rx_ba_timeout++;
3235 			ieee80211_delba_request(ic, ni,
3236 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3237 		}
3238 	}
3239 	splx(s);
3240 }
3241 
3242 void
3243 iwm_reorder_timer_expired(void *arg)
3244 {
3245 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3246 	struct iwm_reorder_buffer *buf = arg;
3247 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3248 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3249 	struct iwm_softc *sc = rxba->sc;
3250 	struct ieee80211com *ic = &sc->sc_ic;
3251 	struct ieee80211_node *ni = ic->ic_bss;
3252 	int i, s;
3253 	uint16_t sn = 0, index = 0;
3254 	int expired = 0;
3255 	int cont = 0;
3256 	struct timeval now, timeout, expiry;
3257 
3258 	if (!buf->num_stored || buf->removed)
3259 		return;
3260 
3261 	s = splnet();
3262 	getmicrouptime(&now);
3263 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3264 
3265 	for (i = 0; i < buf->buf_size ; i++) {
3266 		index = (buf->head_sn + i) % buf->buf_size;
3267 
3268 		if (ml_empty(&entries[index].frames)) {
3269 			/*
3270 			 * If there is a hole and the next frame didn't expire
3271 			 * we want to break and not advance SN.
3272 			 */
3273 			cont = 0;
3274 			continue;
3275 		}
3276 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3277 		if (!cont && timercmp(&now, &expiry, <))
3278 			break;
3279 
3280 		expired = 1;
3281 		/* continue until next hole after this expired frame */
3282 		cont = 1;
3283 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3284 	}
3285 
3286 	if (expired) {
3287 		/* SN is set to the last expired frame + 1 */
3288 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3289 		if_input(&sc->sc_ic.ic_if, &ml);
3290 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3291 	} else {
3292 		/*
3293 		 * If no frame expired and there are stored frames, index is now
3294 		 * pointing to the first unexpired frame - modify reorder timeout
3295 		 * accordingly.
3296 		 */
3297 		timeout_add_usec(&buf->reorder_timer,
3298 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3299 	}
3300 
3301 	splx(s);
3302 }
3303 
3304 #define IWM_MAX_RX_BA_SESSIONS 16
3305 
3306 int
3307 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3308     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3309 {
3310 	struct ieee80211com *ic = &sc->sc_ic;
3311 	struct iwm_add_sta_cmd cmd;
3312 	struct iwm_node *in = (void *)ni;
3313 	int err, s;
3314 	uint32_t status;
3315 	size_t cmdsize;
3316 	struct iwm_rxba_data *rxba = NULL;
3317 	uint8_t baid = 0;
3318 
3319 	s = splnet();
3320 
3321 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3322 		ieee80211_addba_req_refuse(ic, ni, tid);
3323 		splx(s);
3324 		return 0;
3325 	}
3326 
3327 	memset(&cmd, 0, sizeof(cmd));
3328 
3329 	cmd.sta_id = IWM_STATION_ID;
3330 	cmd.mac_id_n_color
3331 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3332 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3333 
3334 	if (start) {
3335 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3336 		cmd.add_immediate_ba_ssn = ssn;
3337 		cmd.rx_ba_window = winsize;
3338 	} else {
3339 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3340 	}
3341 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3342 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3343 
3344 	status = IWM_ADD_STA_SUCCESS;
3345 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3346 		cmdsize = sizeof(cmd);
3347 	else
3348 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3349 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3350 	    &status);
3351 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3352 		err = EIO;
3353 	if (err) {
3354 		if (start)
3355 			ieee80211_addba_req_refuse(ic, ni, tid);
3356 		splx(s);
3357 		return err;
3358 	}
3359 
3360 	if (sc->sc_mqrx_supported) {
3361 		/* Deaggregation is done in hardware. */
3362 		if (start) {
3363 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3364 				ieee80211_addba_req_refuse(ic, ni, tid);
3365 				splx(s);
3366 				return EIO;
3367 			}
3368 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3369 			    IWM_ADD_STA_BAID_SHIFT;
3370 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3371 			    baid >= nitems(sc->sc_rxba_data)) {
3372 				ieee80211_addba_req_refuse(ic, ni, tid);
3373 				splx(s);
3374 				return EIO;
3375 			}
3376 			rxba = &sc->sc_rxba_data[baid];
3377 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3378 				ieee80211_addba_req_refuse(ic, ni, tid);
3379 				splx(s);
3380 				return 0;
3381 			}
3382 			rxba->sta_id = IWM_STATION_ID;
3383 			rxba->tid = tid;
3384 			rxba->baid = baid;
3385 			rxba->timeout = timeout_val;
3386 			getmicrouptime(&rxba->last_rx);
3387 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3388 			    winsize);
3389 			if (timeout_val != 0) {
3390 				struct ieee80211_rx_ba *ba;
3391 				timeout_add_usec(&rxba->session_timer,
3392 				    timeout_val);
3393 				/* XXX disable net80211's BA timeout handler */
3394 				ba = &ni->ni_rx_ba[tid];
3395 				ba->ba_timeout_val = 0;
3396 			}
3397 		} else {
3398 			int i;
3399 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3400 				rxba = &sc->sc_rxba_data[i];
3401 				if (rxba->baid ==
3402 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3403 					continue;
3404 				if (rxba->tid != tid)
3405 					continue;
3406 				iwm_clear_reorder_buffer(sc, rxba);
3407 				break;
3408 			}
3409 		}
3410 	}
3411 
3412 	if (start) {
3413 		sc->sc_rx_ba_sessions++;
3414 		ieee80211_addba_req_accept(ic, ni, tid);
3415 	} else if (sc->sc_rx_ba_sessions > 0)
3416 		sc->sc_rx_ba_sessions--;
3417 
3418 	splx(s);
3419 	return 0;
3420 }
3421 
3422 void
3423 iwm_mac_ctxt_task(void *arg)
3424 {
3425 	struct iwm_softc *sc = arg;
3426 	struct ieee80211com *ic = &sc->sc_ic;
3427 	struct iwm_node *in = (void *)ic->ic_bss;
3428 	int err, s = splnet();
3429 
3430 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3431 	    ic->ic_state != IEEE80211_S_RUN) {
3432 		refcnt_rele_wake(&sc->task_refs);
3433 		splx(s);
3434 		return;
3435 	}
3436 
3437 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3438 	if (err)
3439 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3440 
3441 	iwm_unprotect_session(sc, in);
3442 
3443 	refcnt_rele_wake(&sc->task_refs);
3444 	splx(s);
3445 }
3446 
3447 void
3448 iwm_updateprot(struct ieee80211com *ic)
3449 {
3450 	struct iwm_softc *sc = ic->ic_softc;
3451 
3452 	if (ic->ic_state == IEEE80211_S_RUN &&
3453 	    !task_pending(&sc->newstate_task))
3454 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3455 }
3456 
3457 void
3458 iwm_updateslot(struct ieee80211com *ic)
3459 {
3460 	struct iwm_softc *sc = ic->ic_softc;
3461 
3462 	if (ic->ic_state == IEEE80211_S_RUN &&
3463 	    !task_pending(&sc->newstate_task))
3464 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3465 }
3466 
3467 void
3468 iwm_updateedca(struct ieee80211com *ic)
3469 {
3470 	struct iwm_softc *sc = ic->ic_softc;
3471 
3472 	if (ic->ic_state == IEEE80211_S_RUN &&
3473 	    !task_pending(&sc->newstate_task))
3474 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3475 }
3476 
3477 void
3478 iwm_phy_ctxt_task(void *arg)
3479 {
3480 	struct iwm_softc *sc = arg;
3481 	struct ieee80211com *ic = &sc->sc_ic;
3482 	struct iwm_node *in = (void *)ic->ic_bss;
3483 	struct ieee80211_node *ni = &in->in_ni;
3484 	uint8_t chains, sco, vht_chan_width;
3485 	int err, s = splnet();
3486 
3487 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3488 	    ic->ic_state != IEEE80211_S_RUN ||
3489 	    in->in_phyctxt == NULL) {
3490 		refcnt_rele_wake(&sc->task_refs);
3491 		splx(s);
3492 		return;
3493 	}
3494 
3495 	chains = iwm_mimo_enabled(sc) ? 2 : 1;
3496 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3497 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3498 	    ieee80211_node_supports_ht_chan40(ni))
3499 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3500 	else
3501 		sco = IEEE80211_HTOP0_SCO_SCN;
3502 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3503 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3504 	    ieee80211_node_supports_vht_chan80(ni))
3505 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3506 	else
3507 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3508 	if (in->in_phyctxt->sco != sco ||
3509 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3510 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3511 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3512 		    vht_chan_width);
3513 		if (err)
3514 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3515 		iwm_setrates(in, 0);
3516 	}
3517 
3518 	refcnt_rele_wake(&sc->task_refs);
3519 	splx(s);
3520 }
3521 
3522 void
3523 iwm_updatechan(struct ieee80211com *ic)
3524 {
3525 	struct iwm_softc *sc = ic->ic_softc;
3526 
3527 	if (ic->ic_state == IEEE80211_S_RUN &&
3528 	    !task_pending(&sc->newstate_task))
3529 		iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3530 }
3531 
3532 void
3533 iwm_updatedtim(struct ieee80211com *ic)
3534 {
3535 	struct iwm_softc *sc = ic->ic_softc;
3536 
3537 	if (ic->ic_state == IEEE80211_S_RUN &&
3538 	    !task_pending(&sc->newstate_task))
3539 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3540 }
3541 
3542 int
3543 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3544     uint16_t ssn, uint16_t winsize, int start)
3545 {
3546 	struct iwm_add_sta_cmd cmd;
3547 	struct ieee80211com *ic = &sc->sc_ic;
3548 	struct iwm_node *in = (void *)ni;
3549 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3550 	struct iwm_tx_ring *ring;
3551 	enum ieee80211_edca_ac ac;
3552 	int fifo;
3553 	uint32_t status;
3554 	int err;
3555 	size_t cmdsize;
3556 
3557 	/* Ensure we can map this TID to an aggregation queue. */
3558 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3559 		return ENOSPC;
3560 
3561 	if (start) {
3562 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3563 			return 0;
3564 	} else {
3565 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3566 			return 0;
3567 	}
3568 
3569 	ring = &sc->txq[qid];
3570 	ac = iwm_tid_to_ac[tid];
3571 	fifo = iwm_ac_to_tx_fifo[ac];
3572 
3573 	memset(&cmd, 0, sizeof(cmd));
3574 
3575 	cmd.sta_id = IWM_STATION_ID;
3576 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3577 	    in->in_color));
3578 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3579 
3580 	if (start) {
3581 		/* Enable Tx aggregation for this queue. */
3582 		in->tid_disable_ampdu &= ~(1 << tid);
3583 		in->tfd_queue_msk |= (1 << qid);
3584 	} else {
3585 		in->tid_disable_ampdu |= (1 << tid);
3586 		/*
3587 		 * Queue remains enabled in the TFD queue mask
3588 		 * until we leave RUN state.
3589 		 */
3590 		err = iwm_flush_sta(sc, in);
3591 		if (err)
3592 			return err;
3593 	}
3594 
3595 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3596 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3597 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3598 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3599 
3600 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3601 		if (!iwm_nic_lock(sc)) {
3602 			if (start)
3603 				ieee80211_addba_resp_refuse(ic, ni, tid,
3604 				    IEEE80211_STATUS_UNSPECIFIED);
3605 			return EBUSY;
3606 		}
3607 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3608 		    ssn);
3609 		iwm_nic_unlock(sc);
3610 		if (err) {
3611 			printf("%s: could not enable Tx queue %d (error %d)\n",
3612 			    DEVNAME(sc), qid, err);
3613 			if (start)
3614 				ieee80211_addba_resp_refuse(ic, ni, tid,
3615 				    IEEE80211_STATUS_UNSPECIFIED);
3616 			return err;
3617 		}
3618 		/*
3619 		 * If iwm_enable_txq() employed the SCD hardware bug
3620 		 * workaround we must skip the frame with seqnum SSN.
3621 		 */
3622 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3623 			ssn = (ssn + 1) & 0xfff;
3624 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3625 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3626 			ni->ni_qos_txseqs[tid] = ssn;
3627 		}
3628 	}
3629 
3630 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3631 		cmdsize = sizeof(cmd);
3632 	else
3633 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3634 
3635 	status = 0;
3636 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3637 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3638 		err = EIO;
3639 	if (err) {
3640 		printf("%s: could not update sta (error %d)\n",
3641 		    DEVNAME(sc), err);
3642 		if (start)
3643 			ieee80211_addba_resp_refuse(ic, ni, tid,
3644 			    IEEE80211_STATUS_UNSPECIFIED);
3645 		return err;
3646 	}
3647 
3648 	if (start) {
3649 		sc->tx_ba_queue_mask |= (1 << qid);
3650 		ieee80211_addba_resp_accept(ic, ni, tid);
3651 	} else {
3652 		sc->tx_ba_queue_mask &= ~(1 << qid);
3653 
3654 		/*
3655 		 * Clear pending frames but keep the queue enabled.
3656 		 * Firmware panics if we disable the queue here.
3657 		 */
3658 		iwm_txq_advance(sc, ring, ring->cur);
3659 		iwm_clear_oactive(sc, ring);
3660 	}
3661 
3662 	return 0;
3663 }
3664 
3665 void
3666 iwm_ba_task(void *arg)
3667 {
3668 	struct iwm_softc *sc = arg;
3669 	struct ieee80211com *ic = &sc->sc_ic;
3670 	struct ieee80211_node *ni = ic->ic_bss;
3671 	int s = splnet();
3672 	int tid, err = 0;
3673 
3674 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3675 	    ic->ic_state != IEEE80211_S_RUN) {
3676 		refcnt_rele_wake(&sc->task_refs);
3677 		splx(s);
3678 		return;
3679 	}
3680 
3681 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3682 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3683 			break;
3684 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3685 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3686 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3687 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3688 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3689 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3690 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3691 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3692 		}
3693 	}
3694 
3695 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3696 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3697 			break;
3698 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3699 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3700 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3701 			    ba->ba_winsize, 1);
3702 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3703 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3704 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3705 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3706 		}
3707 	}
3708 
3709 	/*
3710 	 * We "recover" from failure to start or stop a BA session
3711 	 * by resetting the device.
3712 	 */
3713 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3714 		task_add(systq, &sc->init_task);
3715 
3716 	refcnt_rele_wake(&sc->task_refs);
3717 	splx(s);
3718 }
3719 
3720 /*
3721  * This function is called by upper layer when an ADDBA request is received
3722  * from another STA and before the ADDBA response is sent.
3723  */
3724 int
3725 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3726     uint8_t tid)
3727 {
3728 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3729 
3730 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3731 	    tid > IWM_MAX_TID_COUNT)
3732 		return ENOSPC;
3733 
3734 	if (sc->ba_rx.start_tidmask & (1 << tid))
3735 		return EBUSY;
3736 
3737 	sc->ba_rx.start_tidmask |= (1 << tid);
3738 	iwm_add_task(sc, systq, &sc->ba_task);
3739 
3740 	return EBUSY;
3741 }
3742 
3743 /*
3744  * This function is called by upper layer on teardown of an HT-immediate
3745  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3746  */
3747 void
3748 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3749     uint8_t tid)
3750 {
3751 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3752 
3753 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3754 		return;
3755 
3756 	sc->ba_rx.stop_tidmask |= (1 << tid);
3757 	iwm_add_task(sc, systq, &sc->ba_task);
3758 }
3759 
3760 int
3761 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3762     uint8_t tid)
3763 {
3764 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3765 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3766 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3767 
3768 	/* We only implement Tx aggregation with DQA-capable firmware. */
3769 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3770 		return ENOTSUP;
3771 
3772 	/* Ensure we can map this TID to an aggregation queue. */
3773 	if (tid >= IWM_MAX_TID_COUNT)
3774 		return EINVAL;
3775 
3776 	/* We only support a fixed Tx aggregation window size, for now. */
3777 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3778 		return ENOTSUP;
3779 
3780 	/* Is firmware already using Tx aggregation on this queue? */
3781 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3782 		return ENOSPC;
3783 
3784 	/* Are we already processing an ADDBA request? */
3785 	if (sc->ba_tx.start_tidmask & (1 << tid))
3786 		return EBUSY;
3787 
3788 	sc->ba_tx.start_tidmask |= (1 << tid);
3789 	iwm_add_task(sc, systq, &sc->ba_task);
3790 
3791 	return EBUSY;
3792 }
3793 
3794 void
3795 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3796     uint8_t tid)
3797 {
3798 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3799 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3800 
3801 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3802 		return;
3803 
3804 	/* Is firmware currently using Tx aggregation on this queue? */
3805 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3806 		return;
3807 
3808 	sc->ba_tx.stop_tidmask |= (1 << tid);
3809 	iwm_add_task(sc, systq, &sc->ba_task);
3810 }
3811 
3812 void
3813 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3814     const uint16_t *mac_override, const uint16_t *nvm_hw)
3815 {
3816 	const uint8_t *hw_addr;
3817 
3818 	if (mac_override) {
3819 		static const uint8_t reserved_mac[] = {
3820 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3821 		};
3822 
3823 		hw_addr = (const uint8_t *)(mac_override +
3824 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3825 
3826 		/*
3827 		 * Store the MAC address from MAO section.
3828 		 * No byte swapping is required in MAO section
3829 		 */
3830 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3831 
3832 		/*
3833 		 * Force the use of the OTP MAC address in case of reserved MAC
3834 		 * address in the NVM, or if address is given but invalid.
3835 		 */
3836 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3837 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3838 		    sizeof(etherbroadcastaddr)) != 0) &&
3839 		    (memcmp(etheranyaddr, data->hw_addr,
3840 		    sizeof(etheranyaddr)) != 0) &&
3841 		    !ETHER_IS_MULTICAST(data->hw_addr))
3842 			return;
3843 	}
3844 
3845 	if (nvm_hw) {
3846 		/* Read the mac address from WFMP registers. */
3847 		uint32_t mac_addr0, mac_addr1;
3848 
3849 		if (!iwm_nic_lock(sc))
3850 			goto out;
3851 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3852 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3853 		iwm_nic_unlock(sc);
3854 
3855 		hw_addr = (const uint8_t *)&mac_addr0;
3856 		data->hw_addr[0] = hw_addr[3];
3857 		data->hw_addr[1] = hw_addr[2];
3858 		data->hw_addr[2] = hw_addr[1];
3859 		data->hw_addr[3] = hw_addr[0];
3860 
3861 		hw_addr = (const uint8_t *)&mac_addr1;
3862 		data->hw_addr[4] = hw_addr[1];
3863 		data->hw_addr[5] = hw_addr[0];
3864 
3865 		return;
3866 	}
3867 out:
3868 	printf("%s: mac address not found\n", DEVNAME(sc));
3869 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3870 }
3871 
3872 int
3873 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3874     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3875     const uint16_t *mac_override, const uint16_t *phy_sku,
3876     const uint16_t *regulatory, int n_regulatory)
3877 {
3878 	struct iwm_nvm_data *data = &sc->sc_nvm;
3879 	uint8_t hw_addr[ETHER_ADDR_LEN];
3880 	uint32_t sku;
3881 	uint16_t lar_config;
3882 
3883 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3884 
3885 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3886 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3887 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3888 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3889 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3890 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3891 
3892 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3893 	} else {
3894 		uint32_t radio_cfg =
3895 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3896 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3897 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3898 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3899 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3900 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3901 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3902 
3903 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3904 	}
3905 
3906 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3907 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3908 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3909 	data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE;
3910 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3911 
3912 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3913 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3914 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3915 				       IWM_NVM_LAR_OFFSET_8000;
3916 
3917 		lar_config = le16_to_cpup(regulatory + lar_offset);
3918 		data->lar_enabled = !!(lar_config &
3919 				       IWM_NVM_LAR_ENABLED_8000);
3920 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3921 	} else
3922 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3923 
3924 
3925 	/* The byte order is little endian 16 bit, meaning 214365 */
3926 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3927 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3928 		data->hw_addr[0] = hw_addr[1];
3929 		data->hw_addr[1] = hw_addr[0];
3930 		data->hw_addr[2] = hw_addr[3];
3931 		data->hw_addr[3] = hw_addr[2];
3932 		data->hw_addr[4] = hw_addr[5];
3933 		data->hw_addr[5] = hw_addr[4];
3934 	} else
3935 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3936 
3937 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3938 		if (sc->nvm_type == IWM_NVM_SDP) {
3939 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3940 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3941 		} else {
3942 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3943 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3944 		}
3945 	} else
3946 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3947 		    iwm_nvm_channels_8000,
3948 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3949 
3950 	data->calib_version = 255;   /* TODO:
3951 					this value will prevent some checks from
3952 					failing, we need to check if this
3953 					field is still needed, and if it does,
3954 					where is it in the NVM */
3955 
3956 	return 0;
3957 }
3958 
3959 int
3960 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3961 {
3962 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3963 	const uint16_t *regulatory = NULL;
3964 	int n_regulatory = 0;
3965 
3966 	/* Checking for required sections */
3967 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3968 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3969 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3970 			return ENOENT;
3971 		}
3972 
3973 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3974 
3975 		if (sc->nvm_type == IWM_NVM_SDP) {
3976 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3977 				return ENOENT;
3978 			regulatory = (const uint16_t *)
3979 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3980 			n_regulatory =
3981 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3982 		}
3983 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3984 		/* SW and REGULATORY sections are mandatory */
3985 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3986 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3987 			return ENOENT;
3988 		}
3989 		/* MAC_OVERRIDE or at least HW section must exist */
3990 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3991 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3992 			return ENOENT;
3993 		}
3994 
3995 		/* PHY_SKU section is mandatory in B0 */
3996 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3997 			return ENOENT;
3998 		}
3999 
4000 		regulatory = (const uint16_t *)
4001 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
4002 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
4003 		hw = (const uint16_t *)
4004 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
4005 		mac_override =
4006 			(const uint16_t *)
4007 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
4008 		phy_sku = (const uint16_t *)
4009 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
4010 	} else {
4011 		panic("unknown device family %d", sc->sc_device_family);
4012 	}
4013 
4014 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
4015 	calib = (const uint16_t *)
4016 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
4017 
4018 	/* XXX should pass in the length of every section */
4019 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4020 	    phy_sku, regulatory, n_regulatory);
4021 }
4022 
4023 int
4024 iwm_nvm_init(struct iwm_softc *sc)
4025 {
4026 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
4027 	int i, section, err;
4028 	uint16_t len;
4029 	uint8_t *buf;
4030 	const size_t bufsz = sc->sc_nvm_max_section_size;
4031 
4032 	memset(nvm_sections, 0, sizeof(nvm_sections));
4033 
4034 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
4035 	if (buf == NULL)
4036 		return ENOMEM;
4037 
4038 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
4039 		section = iwm_nvm_to_read[i];
4040 		KASSERT(section <= nitems(nvm_sections));
4041 
4042 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4043 		if (err) {
4044 			err = 0;
4045 			continue;
4046 		}
4047 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
4048 		if (nvm_sections[section].data == NULL) {
4049 			err = ENOMEM;
4050 			break;
4051 		}
4052 		memcpy(nvm_sections[section].data, buf, len);
4053 		nvm_sections[section].length = len;
4054 	}
4055 	free(buf, M_DEVBUF, bufsz);
4056 	if (err == 0)
4057 		err = iwm_parse_nvm_sections(sc, nvm_sections);
4058 
4059 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
4060 		if (nvm_sections[i].data != NULL)
4061 			free(nvm_sections[i].data, M_DEVBUF,
4062 			    nvm_sections[i].length);
4063 	}
4064 
4065 	return err;
4066 }
4067 
4068 int
4069 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4070     const uint8_t *section, uint32_t byte_cnt)
4071 {
4072 	int err = EINVAL;
4073 	uint32_t chunk_sz, offset;
4074 
4075 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
4076 
4077 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
4078 		uint32_t addr, len;
4079 		const uint8_t *data;
4080 
4081 		addr = dst_addr + offset;
4082 		len = MIN(chunk_sz, byte_cnt - offset);
4083 		data = section + offset;
4084 
4085 		err = iwm_firmware_load_chunk(sc, addr, data, len);
4086 		if (err)
4087 			break;
4088 	}
4089 
4090 	return err;
4091 }
4092 
4093 int
4094 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4095     const uint8_t *chunk, uint32_t byte_cnt)
4096 {
4097 	struct iwm_dma_info *dma = &sc->fw_dma;
4098 	int err;
4099 
4100 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
4101 	memcpy(dma->vaddr, chunk, byte_cnt);
4102 	bus_dmamap_sync(sc->sc_dmat,
4103 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
4104 
4105 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4106 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4107 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4108 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4109 		if (err)
4110 			return err;
4111 	}
4112 
4113 	sc->sc_fw_chunk_done = 0;
4114 
4115 	if (!iwm_nic_lock(sc))
4116 		return EBUSY;
4117 
4118 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4119 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4120 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4121 	    dst_addr);
4122 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4123 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4124 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4125 	    (iwm_get_dma_hi_addr(dma->paddr)
4126 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4127 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4128 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4129 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4130 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4131 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4132 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
4133 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4134 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4135 
4136 	iwm_nic_unlock(sc);
4137 
4138 	/* Wait for this segment to load. */
4139 	err = 0;
4140 	while (!sc->sc_fw_chunk_done) {
4141 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4142 		if (err)
4143 			break;
4144 	}
4145 
4146 	if (!sc->sc_fw_chunk_done)
4147 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4148 		    DEVNAME(sc), dst_addr, byte_cnt);
4149 
4150 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4151 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4152 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4153 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4154 		if (!err)
4155 			err = err2;
4156 	}
4157 
4158 	return err;
4159 }
4160 
4161 int
4162 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4163 {
4164 	struct iwm_fw_sects *fws;
4165 	int err, i;
4166 	void *data;
4167 	uint32_t dlen;
4168 	uint32_t offset;
4169 
4170 	fws = &sc->sc_fw.fw_sects[ucode_type];
4171 	for (i = 0; i < fws->fw_count; i++) {
4172 		data = fws->fw_sect[i].fws_data;
4173 		dlen = fws->fw_sect[i].fws_len;
4174 		offset = fws->fw_sect[i].fws_devoff;
4175 		if (dlen > sc->sc_fwdmasegsz) {
4176 			err = EFBIG;
4177 		} else
4178 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4179 		if (err) {
4180 			printf("%s: could not load firmware chunk %u of %u\n",
4181 			    DEVNAME(sc), i, fws->fw_count);
4182 			return err;
4183 		}
4184 	}
4185 
4186 	iwm_enable_interrupts(sc);
4187 
4188 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
4189 
4190 	return 0;
4191 }
4192 
4193 int
4194 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4195     int cpu, int *first_ucode_section)
4196 {
4197 	int shift_param;
4198 	int i, err = 0, sec_num = 0x1;
4199 	uint32_t val, last_read_idx = 0;
4200 	void *data;
4201 	uint32_t dlen;
4202 	uint32_t offset;
4203 
4204 	if (cpu == 1) {
4205 		shift_param = 0;
4206 		*first_ucode_section = 0;
4207 	} else {
4208 		shift_param = 16;
4209 		(*first_ucode_section)++;
4210 	}
4211 
4212 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
4213 		last_read_idx = i;
4214 		data = fws->fw_sect[i].fws_data;
4215 		dlen = fws->fw_sect[i].fws_len;
4216 		offset = fws->fw_sect[i].fws_devoff;
4217 
4218 		/*
4219 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4220 		 * CPU1 to CPU2.
4221 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
4222 		 * CPU2 non paged to CPU2 paging sec.
4223 		 */
4224 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
4225 		    offset == IWM_PAGING_SEPARATOR_SECTION)
4226 			break;
4227 
4228 		if (dlen > sc->sc_fwdmasegsz) {
4229 			err = EFBIG;
4230 		} else
4231 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4232 		if (err) {
4233 			printf("%s: could not load firmware chunk %d "
4234 			    "(error %d)\n", DEVNAME(sc), i, err);
4235 			return err;
4236 		}
4237 
4238 		/* Notify the ucode of the loaded section number and status */
4239 		if (iwm_nic_lock(sc)) {
4240 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4241 			val = val | (sec_num << shift_param);
4242 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4243 			sec_num = (sec_num << 1) | 0x1;
4244 			iwm_nic_unlock(sc);
4245 		} else {
4246 			err = EBUSY;
4247 			printf("%s: could not load firmware chunk %d "
4248 			    "(error %d)\n", DEVNAME(sc), i, err);
4249 			return err;
4250 		}
4251 	}
4252 
4253 	*first_ucode_section = last_read_idx;
4254 
4255 	if (iwm_nic_lock(sc)) {
4256 		if (cpu == 1)
4257 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4258 		else
4259 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4260 		iwm_nic_unlock(sc);
4261 	} else {
4262 		err = EBUSY;
4263 		printf("%s: could not finalize firmware loading (error %d)\n",
4264 		    DEVNAME(sc), err);
4265 		return err;
4266 	}
4267 
4268 	return 0;
4269 }
4270 
4271 int
4272 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4273 {
4274 	struct iwm_fw_sects *fws;
4275 	int err = 0;
4276 	int first_ucode_section;
4277 
4278 	fws = &sc->sc_fw.fw_sects[ucode_type];
4279 
4280 	/* configure the ucode to be ready to get the secured image */
4281 	/* release CPU reset */
4282 	if (iwm_nic_lock(sc)) {
4283 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4284 		    IWM_RELEASE_CPU_RESET_BIT);
4285 		iwm_nic_unlock(sc);
4286 	}
4287 
4288 	/* load to FW the binary Secured sections of CPU1 */
4289 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4290 	if (err)
4291 		return err;
4292 
4293 	/* load to FW the binary sections of CPU2 */
4294 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4295 	if (err)
4296 		return err;
4297 
4298 	iwm_enable_interrupts(sc);
4299 	return 0;
4300 }
4301 
4302 int
4303 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4304 {
4305 	int err;
4306 
4307 	splassert(IPL_NET);
4308 
4309 	sc->sc_uc.uc_intr = 0;
4310 	sc->sc_uc.uc_ok = 0;
4311 
4312 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4313 		err = iwm_load_firmware_8000(sc, ucode_type);
4314 	else
4315 		err = iwm_load_firmware_7000(sc, ucode_type);
4316 
4317 	if (err)
4318 		return err;
4319 
4320 	/* wait for the firmware to load */
4321 	err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4322 	if (err || !sc->sc_uc.uc_ok)
4323 		printf("%s: could not load firmware\n", DEVNAME(sc));
4324 
4325 	return err;
4326 }
4327 
4328 int
4329 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4330 {
4331 	int err;
4332 
4333 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4334 
4335 	err = iwm_nic_init(sc);
4336 	if (err) {
4337 		printf("%s: unable to init nic\n", DEVNAME(sc));
4338 		return err;
4339 	}
4340 
4341 	/* make sure rfkill handshake bits are cleared */
4342 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4343 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4344 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4345 
4346 	/* clear (again), then enable firmware load interrupt */
4347 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4348 	iwm_enable_fwload_interrupt(sc);
4349 
4350 	/* really make sure rfkill handshake bits are cleared */
4351 	/* maybe we should write a few times more?  just to make sure */
4352 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4353 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4354 
4355 	return iwm_load_firmware(sc, ucode_type);
4356 }
4357 
4358 int
4359 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4360 {
4361 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4362 		.valid = htole32(valid_tx_ant),
4363 	};
4364 
4365 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4366 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4367 }
4368 
4369 int
4370 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4371 {
4372 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4373 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4374 
4375 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |
4376 	    sc->sc_extra_phy_config);
4377 	phy_cfg_cmd.calib_control.event_trigger =
4378 	    sc->sc_default_calib[ucode_type].event_trigger;
4379 	phy_cfg_cmd.calib_control.flow_trigger =
4380 	    sc->sc_default_calib[ucode_type].flow_trigger;
4381 
4382 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4383 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4384 }
4385 
4386 int
4387 iwm_send_dqa_cmd(struct iwm_softc *sc)
4388 {
4389 	struct iwm_dqa_enable_cmd dqa_cmd = {
4390 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4391 	};
4392 	uint32_t cmd_id;
4393 
4394 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4395 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4396 }
4397 
4398 int
4399 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4400 	enum iwm_ucode_type ucode_type)
4401 {
4402 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4403 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4404 	int err;
4405 
4406 	err = iwm_read_firmware(sc);
4407 	if (err)
4408 		return err;
4409 
4410 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4411 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4412 	else
4413 		sc->cmdqid = IWM_CMD_QUEUE;
4414 
4415 	sc->sc_uc_current = ucode_type;
4416 	err = iwm_start_fw(sc, ucode_type);
4417 	if (err) {
4418 		sc->sc_uc_current = old_type;
4419 		return err;
4420 	}
4421 
4422 	err = iwm_post_alive(sc);
4423 	if (err)
4424 		return err;
4425 
4426 	/*
4427 	 * configure and operate fw paging mechanism.
4428 	 * driver configures the paging flow only once, CPU2 paging image
4429 	 * included in the IWM_UCODE_INIT image.
4430 	 */
4431 	if (fw->paging_mem_size) {
4432 		err = iwm_save_fw_paging(sc, fw);
4433 		if (err) {
4434 			printf("%s: failed to save the FW paging image\n",
4435 			    DEVNAME(sc));
4436 			return err;
4437 		}
4438 
4439 		err = iwm_send_paging_cmd(sc, fw);
4440 		if (err) {
4441 			printf("%s: failed to send the paging cmd\n",
4442 			    DEVNAME(sc));
4443 			iwm_free_fw_paging(sc);
4444 			return err;
4445 		}
4446 	}
4447 
4448 	return 0;
4449 }
4450 
4451 int
4452 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4453 {
4454 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4455 	int err, s;
4456 
4457 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4458 		printf("%s: radio is disabled by hardware switch\n",
4459 		    DEVNAME(sc));
4460 		return EPERM;
4461 	}
4462 
4463 	s = splnet();
4464 	sc->sc_init_complete = 0;
4465 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4466 	if (err) {
4467 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4468 		splx(s);
4469 		return err;
4470 	}
4471 
4472 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4473 		err = iwm_send_bt_init_conf(sc);
4474 		if (err) {
4475 			printf("%s: could not init bt coex (error %d)\n",
4476 			    DEVNAME(sc), err);
4477 			splx(s);
4478 			return err;
4479 		}
4480 	}
4481 
4482 	if (justnvm) {
4483 		err = iwm_nvm_init(sc);
4484 		if (err) {
4485 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4486 			splx(s);
4487 			return err;
4488 		}
4489 
4490 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4491 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4492 			    sc->sc_nvm.hw_addr);
4493 
4494 		splx(s);
4495 		return 0;
4496 	}
4497 
4498 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4499 	if (err) {
4500 		splx(s);
4501 		return err;
4502 	}
4503 
4504 	/* Send TX valid antennas before triggering calibrations */
4505 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4506 	if (err) {
4507 		splx(s);
4508 		return err;
4509 	}
4510 
4511 	/*
4512 	 * Send phy configurations command to init uCode
4513 	 * to start the 16.0 uCode init image internal calibrations.
4514 	 */
4515 	err = iwm_send_phy_cfg_cmd(sc);
4516 	if (err) {
4517 		splx(s);
4518 		return err;
4519 	}
4520 
4521 	/*
4522 	 * Nothing to do but wait for the init complete and phy DB
4523 	 * notifications from the firmware.
4524 	 */
4525 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4526 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4527 		    SEC_TO_NSEC(2));
4528 		if (err)
4529 			break;
4530 	}
4531 
4532 	splx(s);
4533 	return err;
4534 }
4535 
4536 int
4537 iwm_config_ltr(struct iwm_softc *sc)
4538 {
4539 	struct iwm_ltr_config_cmd cmd = {
4540 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4541 	};
4542 
4543 	if (!sc->sc_ltr_enabled)
4544 		return 0;
4545 
4546 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4547 }
4548 
4549 int
4550 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4551 {
4552 	struct iwm_rx_ring *ring = &sc->rxq;
4553 	struct iwm_rx_data *data = &ring->data[idx];
4554 	struct mbuf *m;
4555 	int err;
4556 	int fatal = 0;
4557 
4558 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4559 	if (m == NULL)
4560 		return ENOBUFS;
4561 
4562 	if (size <= MCLBYTES) {
4563 		MCLGET(m, M_DONTWAIT);
4564 	} else {
4565 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4566 	}
4567 	if ((m->m_flags & M_EXT) == 0) {
4568 		m_freem(m);
4569 		return ENOBUFS;
4570 	}
4571 
4572 	if (data->m != NULL) {
4573 		bus_dmamap_unload(sc->sc_dmat, data->map);
4574 		fatal = 1;
4575 	}
4576 
4577 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4578 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4579 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4580 	if (err) {
4581 		/* XXX */
4582 		if (fatal)
4583 			panic("iwm: could not load RX mbuf");
4584 		m_freem(m);
4585 		return err;
4586 	}
4587 	data->m = m;
4588 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4589 
4590 	/* Update RX descriptor. */
4591 	if (sc->sc_mqrx_supported) {
4592 		((uint64_t *)ring->desc)[idx] =
4593 		    htole64(data->map->dm_segs[0].ds_addr);
4594 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4595 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4596 		    BUS_DMASYNC_PREWRITE);
4597 	} else {
4598 		((uint32_t *)ring->desc)[idx] =
4599 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4600 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4601 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4602 		    BUS_DMASYNC_PREWRITE);
4603 	}
4604 
4605 	return 0;
4606 }
4607 
4608 /*
4609  * RSSI values are reported by the FW as positive values - need to negate
4610  * to obtain their dBM.  Account for missing antennas by replacing 0
4611  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4612  */
4613 int
4614 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4615 {
4616 	int energy_a, energy_b, energy_c, max_energy;
4617 	uint32_t val;
4618 
4619 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4620 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4621 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4622 	energy_a = energy_a ? -energy_a : -256;
4623 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4624 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4625 	energy_b = energy_b ? -energy_b : -256;
4626 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4627 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4628 	energy_c = energy_c ? -energy_c : -256;
4629 	max_energy = MAX(energy_a, energy_b);
4630 	max_energy = MAX(max_energy, energy_c);
4631 
4632 	return max_energy;
4633 }
4634 
4635 int
4636 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4637     struct iwm_rx_mpdu_desc *desc)
4638 {
4639 	int energy_a, energy_b;
4640 
4641 	energy_a = desc->v1.energy_a;
4642 	energy_b = desc->v1.energy_b;
4643 	energy_a = energy_a ? -energy_a : -256;
4644 	energy_b = energy_b ? -energy_b : -256;
4645 	return MAX(energy_a, energy_b);
4646 }
4647 
4648 void
4649 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4650     struct iwm_rx_data *data)
4651 {
4652 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4653 
4654 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4655 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4656 
4657 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4658 }
4659 
4660 /*
4661  * Retrieve the average noise (in dBm) among receivers.
4662  */
4663 int
4664 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4665 {
4666 	int i, total, nbant, noise;
4667 
4668 	total = nbant = noise = 0;
4669 	for (i = 0; i < 3; i++) {
4670 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4671 		if (noise) {
4672 			total += noise;
4673 			nbant++;
4674 		}
4675 	}
4676 
4677 	/* There should be at least one antenna but check anyway. */
4678 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4679 }
4680 
4681 int
4682 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4683     struct ieee80211_rxinfo *rxi)
4684 {
4685 	struct ieee80211com *ic = &sc->sc_ic;
4686 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4687 	struct ieee80211_frame *wh;
4688 	uint64_t pn, *prsc;
4689 	uint8_t *ivp;
4690 	uint8_t tid;
4691 	int hdrlen, hasqos;
4692 
4693 	wh = mtod(m, struct ieee80211_frame *);
4694 	hdrlen = ieee80211_get_hdrlen(wh);
4695 	ivp = (uint8_t *)wh + hdrlen;
4696 
4697 	/* Check that ExtIV bit is set. */
4698 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4699 		return 1;
4700 
4701 	hasqos = ieee80211_has_qos(wh);
4702 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4703 	prsc = &k->k_rsc[tid];
4704 
4705 	/* Extract the 48-bit PN from the CCMP header. */
4706 	pn = (uint64_t)ivp[0]       |
4707 	     (uint64_t)ivp[1] <<  8 |
4708 	     (uint64_t)ivp[4] << 16 |
4709 	     (uint64_t)ivp[5] << 24 |
4710 	     (uint64_t)ivp[6] << 32 |
4711 	     (uint64_t)ivp[7] << 40;
4712 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4713 		if (pn < *prsc) {
4714 			ic->ic_stats.is_ccmp_replays++;
4715 			return 1;
4716 		}
4717 	} else if (pn <= *prsc) {
4718 		ic->ic_stats.is_ccmp_replays++;
4719 		return 1;
4720 	}
4721 	/* Last seen packet number is updated in ieee80211_inputm(). */
4722 
4723 	/*
4724 	 * Some firmware versions strip the MIC, and some don't. It is not
4725 	 * clear which of the capability flags could tell us what to expect.
4726 	 * For now, keep things simple and just leave the MIC in place if
4727 	 * it is present.
4728 	 *
4729 	 * The IV will be stripped by ieee80211_inputm().
4730 	 */
4731 	return 0;
4732 }
4733 
4734 int
4735 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4736     struct ieee80211_rxinfo *rxi)
4737 {
4738 	struct ieee80211com *ic = &sc->sc_ic;
4739 	struct ifnet *ifp = IC2IFP(ic);
4740 	struct ieee80211_frame *wh;
4741 	struct ieee80211_node *ni;
4742 	int ret = 0;
4743 	uint8_t type, subtype;
4744 
4745 	wh = mtod(m, struct ieee80211_frame *);
4746 
4747 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4748 	if (type == IEEE80211_FC0_TYPE_CTL)
4749 		return 0;
4750 
4751 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4752 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4753 		return 0;
4754 
4755 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4756 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4757 		return 0;
4758 
4759 	ni = ieee80211_find_rxnode(ic, wh);
4760 	/* Handle hardware decryption. */
4761 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4762 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4763 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4764 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4765 			ic->ic_stats.is_ccmp_dec_errs++;
4766 			ret = 1;
4767 			goto out;
4768 		}
4769 		/* Check whether decryption was successful or not. */
4770 		if ((rx_pkt_status &
4771 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4772 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4773 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4774 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4775 			ic->ic_stats.is_ccmp_dec_errs++;
4776 			ret = 1;
4777 			goto out;
4778 		}
4779 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4780 	}
4781 out:
4782 	if (ret)
4783 		ifp->if_ierrors++;
4784 	ieee80211_release_node(ic, ni);
4785 	return ret;
4786 }
4787 
4788 void
4789 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4790     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4791     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4792     struct mbuf_list *ml)
4793 {
4794 	struct ieee80211com *ic = &sc->sc_ic;
4795 	struct ifnet *ifp = IC2IFP(ic);
4796 	struct ieee80211_frame *wh;
4797 	struct ieee80211_node *ni;
4798 
4799 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4800 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4801 
4802 	wh = mtod(m, struct ieee80211_frame *);
4803 	ni = ieee80211_find_rxnode(ic, wh);
4804 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4805 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4806 		ifp->if_ierrors++;
4807 		m_freem(m);
4808 		ieee80211_release_node(ic, ni);
4809 		return;
4810 	}
4811 
4812 #if NBPFILTER > 0
4813 	if (sc->sc_drvbpf != NULL) {
4814 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4815 		uint16_t chan_flags;
4816 
4817 		tap->wr_flags = 0;
4818 		if (is_shortpre)
4819 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4820 		tap->wr_chan_freq =
4821 		    htole16(ic->ic_channels[chanidx].ic_freq);
4822 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4823 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4824 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4825 			chan_flags &= ~IEEE80211_CHAN_HT;
4826 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4827 		}
4828 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4829 			chan_flags &= ~IEEE80211_CHAN_VHT;
4830 		tap->wr_chan_flags = htole16(chan_flags);
4831 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4832 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4833 		tap->wr_tsft = device_timestamp;
4834 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4835 			uint8_t mcs = (rate_n_flags &
4836 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4837 			    IWM_RATE_HT_MCS_NSS_MSK));
4838 			tap->wr_rate = (0x80 | mcs);
4839 		} else {
4840 			uint8_t rate = (rate_n_flags &
4841 			    IWM_RATE_LEGACY_RATE_MSK);
4842 			switch (rate) {
4843 			/* CCK rates. */
4844 			case  10: tap->wr_rate =   2; break;
4845 			case  20: tap->wr_rate =   4; break;
4846 			case  55: tap->wr_rate =  11; break;
4847 			case 110: tap->wr_rate =  22; break;
4848 			/* OFDM rates. */
4849 			case 0xd: tap->wr_rate =  12; break;
4850 			case 0xf: tap->wr_rate =  18; break;
4851 			case 0x5: tap->wr_rate =  24; break;
4852 			case 0x7: tap->wr_rate =  36; break;
4853 			case 0x9: tap->wr_rate =  48; break;
4854 			case 0xb: tap->wr_rate =  72; break;
4855 			case 0x1: tap->wr_rate =  96; break;
4856 			case 0x3: tap->wr_rate = 108; break;
4857 			/* Unknown rate: should not happen. */
4858 			default:  tap->wr_rate =   0;
4859 			}
4860 		}
4861 
4862 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4863 		    m, BPF_DIRECTION_IN);
4864 	}
4865 #endif
4866 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4867 	ieee80211_release_node(ic, ni);
4868 }
4869 
4870 void
4871 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4872     size_t maxlen, struct mbuf_list *ml)
4873 {
4874 	struct ieee80211com *ic = &sc->sc_ic;
4875 	struct ieee80211_rxinfo rxi;
4876 	struct iwm_rx_phy_info *phy_info;
4877 	struct iwm_rx_mpdu_res_start *rx_res;
4878 	int device_timestamp;
4879 	uint16_t phy_flags;
4880 	uint32_t len;
4881 	uint32_t rx_pkt_status;
4882 	int rssi, chanidx, rate_n_flags;
4883 
4884 	memset(&rxi, 0, sizeof(rxi));
4885 
4886 	phy_info = &sc->sc_last_phy_info;
4887 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4888 	len = le16toh(rx_res->byte_count);
4889 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4890 		/* Allow control frames in monitor mode. */
4891 		if (len < sizeof(struct ieee80211_frame_cts)) {
4892 			ic->ic_stats.is_rx_tooshort++;
4893 			IC2IFP(ic)->if_ierrors++;
4894 			m_freem(m);
4895 			return;
4896 		}
4897 	} else if (len < sizeof(struct ieee80211_frame)) {
4898 		ic->ic_stats.is_rx_tooshort++;
4899 		IC2IFP(ic)->if_ierrors++;
4900 		m_freem(m);
4901 		return;
4902 	}
4903 	if (len > maxlen - sizeof(*rx_res)) {
4904 		IC2IFP(ic)->if_ierrors++;
4905 		m_freem(m);
4906 		return;
4907 	}
4908 
4909 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4910 		m_freem(m);
4911 		return;
4912 	}
4913 
4914 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4915 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4916 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4917 		m_freem(m);
4918 		return; /* drop */
4919 	}
4920 
4921 	m->m_data = pktdata + sizeof(*rx_res);
4922 	m->m_pkthdr.len = m->m_len = len;
4923 
4924 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4925 		m_freem(m);
4926 		return;
4927 	}
4928 
4929 	chanidx = letoh32(phy_info->channel);
4930 	device_timestamp = le32toh(phy_info->system_timestamp);
4931 	phy_flags = letoh16(phy_info->phy_flags);
4932 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4933 
4934 	rssi = iwm_get_signal_strength(sc, phy_info);
4935 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4936 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4937 
4938 	rxi.rxi_rssi = rssi;
4939 	rxi.rxi_tstamp = device_timestamp;
4940 	rxi.rxi_chan = chanidx;
4941 
4942 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4943 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4944 	    rate_n_flags, device_timestamp, &rxi, ml);
4945 }
4946 
4947 void
4948 iwm_flip_address(uint8_t *addr)
4949 {
4950 	int i;
4951 	uint8_t mac_addr[ETHER_ADDR_LEN];
4952 
4953 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4954 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4955 	IEEE80211_ADDR_COPY(addr, mac_addr);
4956 }
4957 
4958 /*
4959  * Drop duplicate 802.11 retransmissions
4960  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4961  * and handle pseudo-duplicate frames which result from deaggregation
4962  * of A-MSDU frames in hardware.
4963  */
4964 int
4965 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4966     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4967 {
4968 	struct ieee80211com *ic = &sc->sc_ic;
4969 	struct iwm_node *in = (void *)ic->ic_bss;
4970 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4971 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4972 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4973 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4974 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4975 	int hasqos = ieee80211_has_qos(wh);
4976 	uint16_t seq;
4977 
4978 	if (type == IEEE80211_FC0_TYPE_CTL ||
4979 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4980 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4981 		return 0;
4982 
4983 	if (hasqos) {
4984 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4985 		if (tid > IWM_MAX_TID_COUNT)
4986 			tid = IWM_MAX_TID_COUNT;
4987 	}
4988 
4989 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4990 	subframe_idx = desc->amsdu_info &
4991 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4992 
4993 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4994 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4995 	    dup_data->last_seq[tid] == seq &&
4996 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4997 		return 1;
4998 
4999 	/*
5000 	 * Allow the same frame sequence number for all A-MSDU subframes
5001 	 * following the first subframe.
5002 	 * Otherwise these subframes would be discarded as replays.
5003 	 */
5004 	if (dup_data->last_seq[tid] == seq &&
5005 	    subframe_idx > dup_data->last_sub_frame[tid] &&
5006 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
5007 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5008 	}
5009 
5010 	dup_data->last_seq[tid] = seq;
5011 	dup_data->last_sub_frame[tid] = subframe_idx;
5012 
5013 	return 0;
5014 }
5015 
5016 /*
5017  * Returns true if sn2 - buffer_size < sn1 < sn2.
5018  * To be used only in order to compare reorder buffer head with NSSN.
5019  * We fully trust NSSN unless it is behind us due to reorder timeout.
5020  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
5021  */
5022 int
5023 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
5024 {
5025 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
5026 }
5027 
5028 void
5029 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5030     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
5031     uint16_t nssn, struct mbuf_list *ml)
5032 {
5033 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
5034 	uint16_t ssn = reorder_buf->head_sn;
5035 
5036 	/* ignore nssn smaller than head sn - this can happen due to timeout */
5037 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
5038 		goto set_timer;
5039 
5040 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
5041 		int index = ssn % reorder_buf->buf_size;
5042 		struct mbuf *m;
5043 		int chanidx, is_shortpre;
5044 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
5045 		struct ieee80211_rxinfo *rxi;
5046 
5047 		/* This data is the same for all A-MSDU subframes. */
5048 		chanidx = entries[index].chanidx;
5049 		rx_pkt_status = entries[index].rx_pkt_status;
5050 		is_shortpre = entries[index].is_shortpre;
5051 		rate_n_flags = entries[index].rate_n_flags;
5052 		device_timestamp = entries[index].device_timestamp;
5053 		rxi = &entries[index].rxi;
5054 
5055 		/*
5056 		 * Empty the list. Will have more than one frame for A-MSDU.
5057 		 * Empty list is valid as well since nssn indicates frames were
5058 		 * received.
5059 		 */
5060 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
5061 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5062 			    rate_n_flags, device_timestamp, rxi, ml);
5063 			reorder_buf->num_stored--;
5064 
5065 			/*
5066 			 * Allow the same frame sequence number and CCMP PN for
5067 			 * all A-MSDU subframes following the first subframe.
5068 			 * Otherwise they would be discarded as replays.
5069 			 */
5070 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5071 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5072 		}
5073 
5074 		ssn = (ssn + 1) & 0xfff;
5075 	}
5076 	reorder_buf->head_sn = nssn;
5077 
5078 set_timer:
5079 	if (reorder_buf->num_stored && !reorder_buf->removed) {
5080 		timeout_add_usec(&reorder_buf->reorder_timer,
5081 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
5082 	} else
5083 		timeout_del(&reorder_buf->reorder_timer);
5084 }
5085 
5086 int
5087 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5088     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5089 {
5090 	struct ieee80211com *ic = &sc->sc_ic;
5091 
5092 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5093 		/* we have a new (A-)MPDU ... */
5094 
5095 		/*
5096 		 * reset counter to 0 if we didn't have any oldsn in
5097 		 * the last A-MPDU (as detected by GP2 being identical)
5098 		 */
5099 		if (!buffer->consec_oldsn_prev_drop)
5100 			buffer->consec_oldsn_drops = 0;
5101 
5102 		/* either way, update our tracking state */
5103 		buffer->consec_oldsn_ampdu_gp2 = gp2;
5104 	} else if (buffer->consec_oldsn_prev_drop) {
5105 		/*
5106 		 * tracking state didn't change, and we had an old SN
5107 		 * indication before - do nothing in this case, we
5108 		 * already noted this one down and are waiting for the
5109 		 * next A-MPDU (by GP2)
5110 		 */
5111 		return 0;
5112 	}
5113 
5114 	/* return unless this MPDU has old SN */
5115 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
5116 		return 0;
5117 
5118 	/* update state */
5119 	buffer->consec_oldsn_prev_drop = 1;
5120 	buffer->consec_oldsn_drops++;
5121 
5122 	/* if limit is reached, send del BA and reset state */
5123 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
5124 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5125 		    0, tid);
5126 		buffer->consec_oldsn_prev_drop = 0;
5127 		buffer->consec_oldsn_drops = 0;
5128 		return 1;
5129 	}
5130 
5131 	return 0;
5132 }
5133 
5134 /*
5135  * Handle re-ordering of frames which were de-aggregated in hardware.
5136  * Returns 1 if the MPDU was consumed (buffered or dropped).
5137  * Returns 0 if the MPDU should be passed to upper layer.
5138  */
5139 int
5140 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5141     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5142     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5143     struct mbuf_list *ml)
5144 {
5145 	struct ieee80211com *ic = &sc->sc_ic;
5146 	struct ieee80211_frame *wh;
5147 	struct ieee80211_node *ni;
5148 	struct iwm_rxba_data *rxba;
5149 	struct iwm_reorder_buffer *buffer;
5150 	uint32_t reorder_data = le32toh(desc->reorder_data);
5151 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
5152 	int last_subframe =
5153 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
5154 	uint8_t tid;
5155 	uint8_t subframe_idx = (desc->amsdu_info &
5156 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5157 	struct iwm_reorder_buf_entry *entries;
5158 	int index;
5159 	uint16_t nssn, sn;
5160 	uint8_t baid, type, subtype;
5161 	int hasqos;
5162 
5163 	wh = mtod(m, struct ieee80211_frame *);
5164 	hasqos = ieee80211_has_qos(wh);
5165 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5166 
5167 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5168 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5169 
5170 	/*
5171 	 * We are only interested in Block Ack requests and unicast QoS data.
5172 	 */
5173 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5174 		return 0;
5175 	if (hasqos) {
5176 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5177 			return 0;
5178 	} else {
5179 		if (type != IEEE80211_FC0_TYPE_CTL ||
5180 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5181 			return 0;
5182 	}
5183 
5184 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
5185 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
5186 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5187 	    baid >= nitems(sc->sc_rxba_data))
5188 		return 0;
5189 
5190 	rxba = &sc->sc_rxba_data[baid];
5191 	if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5192 	    tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
5193 		return 0;
5194 
5195 	if (rxba->timeout != 0)
5196 		getmicrouptime(&rxba->last_rx);
5197 
5198 	/* Bypass A-MPDU re-ordering in net80211. */
5199 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5200 
5201 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
5202 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
5203 		IWM_RX_MPDU_REORDER_SN_SHIFT;
5204 
5205 	buffer = &rxba->reorder_buf;
5206 	entries = &rxba->entries[0];
5207 
5208 	if (!buffer->valid) {
5209 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
5210 			return 0;
5211 		buffer->valid = 1;
5212 	}
5213 
5214 	ni = ieee80211_find_rxnode(ic, wh);
5215 	if (type == IEEE80211_FC0_TYPE_CTL &&
5216 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5217 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5218 		goto drop;
5219 	}
5220 
5221 	/*
5222 	 * If there was a significant jump in the nssn - adjust.
5223 	 * If the SN is smaller than the NSSN it might need to first go into
5224 	 * the reorder buffer, in which case we just release up to it and the
5225 	 * rest of the function will take care of storing it and releasing up to
5226 	 * the nssn.
5227 	 */
5228 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5229 	    buffer->buf_size) ||
5230 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5231 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5232 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5233 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5234 	}
5235 
5236 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5237 	    device_timestamp)) {
5238 		 /* BA session will be torn down. */
5239 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5240 		goto drop;
5241 
5242 	}
5243 
5244 	/* drop any outdated packets */
5245 	if (SEQ_LT(sn, buffer->head_sn)) {
5246 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5247 		goto drop;
5248 	}
5249 
5250 	/* release immediately if allowed by nssn and no stored frames */
5251 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5252 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5253 		   (!is_amsdu || last_subframe))
5254 			buffer->head_sn = nssn;
5255 		ieee80211_release_node(ic, ni);
5256 		return 0;
5257 	}
5258 
5259 	/*
5260 	 * release immediately if there are no stored frames, and the sn is
5261 	 * equal to the head.
5262 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5263 	 * When we released everything, and we got the next frame in the
5264 	 * sequence, according to the NSSN we can't release immediately,
5265 	 * while technically there is no hole and we can move forward.
5266 	 */
5267 	if (!buffer->num_stored && sn == buffer->head_sn) {
5268 		if (!is_amsdu || last_subframe)
5269 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5270 		ieee80211_release_node(ic, ni);
5271 		return 0;
5272 	}
5273 
5274 	index = sn % buffer->buf_size;
5275 
5276 	/*
5277 	 * Check if we already stored this frame
5278 	 * As AMSDU is either received or not as whole, logic is simple:
5279 	 * If we have frames in that position in the buffer and the last frame
5280 	 * originated from AMSDU had a different SN then it is a retransmission.
5281 	 * If it is the same SN then if the subframe index is incrementing it
5282 	 * is the same AMSDU - otherwise it is a retransmission.
5283 	 */
5284 	if (!ml_empty(&entries[index].frames)) {
5285 		if (!is_amsdu) {
5286 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5287 			goto drop;
5288 		} else if (sn != buffer->last_amsdu ||
5289 		    buffer->last_sub_index >= subframe_idx) {
5290 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5291 			goto drop;
5292 		}
5293 	} else {
5294 		/* This data is the same for all A-MSDU subframes. */
5295 		entries[index].chanidx = chanidx;
5296 		entries[index].is_shortpre = is_shortpre;
5297 		entries[index].rate_n_flags = rate_n_flags;
5298 		entries[index].device_timestamp = device_timestamp;
5299 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5300 	}
5301 
5302 	/* put in reorder buffer */
5303 	ml_enqueue(&entries[index].frames, m);
5304 	buffer->num_stored++;
5305 	getmicrouptime(&entries[index].reorder_time);
5306 
5307 	if (is_amsdu) {
5308 		buffer->last_amsdu = sn;
5309 		buffer->last_sub_index = subframe_idx;
5310 	}
5311 
5312 	/*
5313 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5314 	 * The reason is that NSSN advances on the first sub-frame, and may
5315 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5316 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5317 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5318 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5319 	 * already ahead and it will be dropped.
5320 	 * If the last sub-frame is not on this queue - we will get frame
5321 	 * release notification with up to date NSSN.
5322 	 */
5323 	if (!is_amsdu || last_subframe)
5324 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5325 
5326 	ieee80211_release_node(ic, ni);
5327 	return 1;
5328 
5329 drop:
5330 	m_freem(m);
5331 	ieee80211_release_node(ic, ni);
5332 	return 1;
5333 }
5334 
5335 void
5336 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5337     size_t maxlen, struct mbuf_list *ml)
5338 {
5339 	struct ieee80211com *ic = &sc->sc_ic;
5340 	struct ieee80211_rxinfo rxi;
5341 	struct iwm_rx_mpdu_desc *desc;
5342 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5343 	int rssi;
5344 	uint8_t chanidx;
5345 	uint16_t phy_info;
5346 
5347 	memset(&rxi, 0, sizeof(rxi));
5348 
5349 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5350 
5351 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5352 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5353 		m_freem(m);
5354 		return; /* drop */
5355 	}
5356 
5357 	len = le16toh(desc->mpdu_len);
5358 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5359 		/* Allow control frames in monitor mode. */
5360 		if (len < sizeof(struct ieee80211_frame_cts)) {
5361 			ic->ic_stats.is_rx_tooshort++;
5362 			IC2IFP(ic)->if_ierrors++;
5363 			m_freem(m);
5364 			return;
5365 		}
5366 	} else if (len < sizeof(struct ieee80211_frame)) {
5367 		ic->ic_stats.is_rx_tooshort++;
5368 		IC2IFP(ic)->if_ierrors++;
5369 		m_freem(m);
5370 		return;
5371 	}
5372 	if (len > maxlen - sizeof(*desc)) {
5373 		IC2IFP(ic)->if_ierrors++;
5374 		m_freem(m);
5375 		return;
5376 	}
5377 
5378 	m->m_data = pktdata + sizeof(*desc);
5379 	m->m_pkthdr.len = m->m_len = len;
5380 
5381 	/* Account for padding following the frame header. */
5382 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5383 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5384 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5385 		if (type == IEEE80211_FC0_TYPE_CTL) {
5386 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5387 			case IEEE80211_FC0_SUBTYPE_CTS:
5388 				hdrlen = sizeof(struct ieee80211_frame_cts);
5389 				break;
5390 			case IEEE80211_FC0_SUBTYPE_ACK:
5391 				hdrlen = sizeof(struct ieee80211_frame_ack);
5392 				break;
5393 			default:
5394 				hdrlen = sizeof(struct ieee80211_frame_min);
5395 				break;
5396 			}
5397 		} else
5398 			hdrlen = ieee80211_get_hdrlen(wh);
5399 
5400 		if ((le16toh(desc->status) &
5401 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5402 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5403 			/* Padding is inserted after the IV. */
5404 			hdrlen += IEEE80211_CCMP_HDRLEN;
5405 		}
5406 
5407 		memmove(m->m_data + 2, m->m_data, hdrlen);
5408 		m_adj(m, 2);
5409 	}
5410 
5411 	/*
5412 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5413 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5414 	 * bit set in the frame header. We need to clear this bit ourselves.
5415 	 *
5416 	 * And we must allow the same CCMP PN for subframes following the
5417 	 * first subframe. Otherwise they would be discarded as replays.
5418 	 */
5419 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5420 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5421 		uint8_t subframe_idx = (desc->amsdu_info &
5422 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5423 		if (subframe_idx > 0)
5424 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5425 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5426 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5427 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5428 			    struct ieee80211_qosframe_addr4 *);
5429 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5430 
5431 			/* HW reverses addr3 and addr4. */
5432 			iwm_flip_address(qwh4->i_addr3);
5433 			iwm_flip_address(qwh4->i_addr4);
5434 		} else if (ieee80211_has_qos(wh) &&
5435 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5436 			struct ieee80211_qosframe *qwh = mtod(m,
5437 			    struct ieee80211_qosframe *);
5438 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5439 
5440 			/* HW reverses addr3. */
5441 			iwm_flip_address(qwh->i_addr3);
5442 		}
5443 	}
5444 
5445 	/*
5446 	 * Verify decryption before duplicate detection. The latter uses
5447 	 * the TID supplied in QoS frame headers and this TID is implicitly
5448 	 * verified as part of the CCMP nonce.
5449 	 */
5450 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5451 		m_freem(m);
5452 		return;
5453 	}
5454 
5455 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5456 		m_freem(m);
5457 		return;
5458 	}
5459 
5460 	phy_info = le16toh(desc->phy_info);
5461 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5462 	chanidx = desc->v1.channel;
5463 	device_timestamp = desc->v1.gp2_on_air_rise;
5464 
5465 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5466 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5467 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5468 
5469 	rxi.rxi_rssi = rssi;
5470 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5471 	rxi.rxi_chan = chanidx;
5472 
5473 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5474 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5475 	    rate_n_flags, device_timestamp, &rxi, ml))
5476 		return;
5477 
5478 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5479 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5480 	    rate_n_flags, device_timestamp, &rxi, ml);
5481 }
5482 
5483 void
5484 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5485 {
5486 	struct ieee80211com *ic = &sc->sc_ic;
5487 	struct iwm_node *in = (void *)ni;
5488 	int old_txmcs = ni->ni_txmcs;
5489 	int old_nss = ni->ni_vht_ss;
5490 
5491 	if (ni->ni_flags & IEEE80211_NODE_VHT)
5492 		ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni);
5493 	else
5494 		ieee80211_ra_choose(&in->in_rn, ic, ni);
5495 
5496 	/*
5497 	 * If RA has chosen a new TX rate we must update
5498 	 * the firmware's LQ rate table.
5499 	 */
5500 	if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss)
5501 		iwm_setrates(in, 1);
5502 }
5503 
5504 void
5505 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5506     int txmcs, uint8_t failure_frame, int txfail)
5507 {
5508 	struct ieee80211com *ic = &sc->sc_ic;
5509 	struct iwm_node *in = (void *)ni;
5510 
5511 	/* Ignore Tx reports which don't match our last LQ command. */
5512 	if (txmcs != ni->ni_txmcs) {
5513 		if (++in->lq_rate_mismatch > 15) {
5514 			/* Try to sync firmware with the driver... */
5515 			iwm_setrates(in, 1);
5516 			in->lq_rate_mismatch = 0;
5517 		}
5518 	} else {
5519 		int mcs = txmcs;
5520 		const struct ieee80211_ht_rateset *rs =
5521 		    ieee80211_ra_get_ht_rateset(txmcs,
5522 		        ieee80211_node_supports_ht_chan40(ni),
5523 			ieee80211_ra_use_ht_sgi(ni));
5524 		unsigned int retries = 0, i;
5525 
5526 		in->lq_rate_mismatch = 0;
5527 
5528 		for (i = 0; i < failure_frame; i++) {
5529 			if (mcs > rs->min_mcs) {
5530 				ieee80211_ra_add_stats_ht(&in->in_rn,
5531 				    ic, ni, mcs, 1, 1);
5532 				mcs--;
5533 			} else
5534 				retries++;
5535 		}
5536 
5537 		if (txfail && failure_frame == 0) {
5538 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5539 			    txmcs, 1, 1);
5540 		} else {
5541 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5542 			    mcs, retries + 1, retries);
5543 		}
5544 
5545 		iwm_ra_choose(sc, ni);
5546 	}
5547 }
5548 
5549 void
5550 iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5551     int txmcs, int nss, uint8_t failure_frame, int txfail)
5552 {
5553 	struct ieee80211com *ic = &sc->sc_ic;
5554 	struct iwm_node *in = (void *)ni;
5555 	uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5556 	uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5557 
5558 	/* Ignore Tx reports which don't match our last LQ command. */
5559 	if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) {
5560 		if (++in->lq_rate_mismatch > 15) {
5561 			/* Try to sync firmware with the driver... */
5562 			iwm_setrates(in, 1);
5563 			in->lq_rate_mismatch = 0;
5564 		}
5565 	} else {
5566 		int mcs = txmcs;
5567 		unsigned int retries = 0, i;
5568 
5569 		if (in->in_phyctxt) {
5570 			vht_chan_width = in->in_phyctxt->vht_chan_width;
5571 			sco = in->in_phyctxt->sco;
5572 		}
5573 		in->lq_rate_mismatch = 0;
5574 
5575 		for (i = 0; i < failure_frame; i++) {
5576 			if (mcs > 0) {
5577 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5578 				    ic, ni, mcs, nss, 1, 1);
5579 				if (vht_chan_width >=
5580 				    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5581 					/*
5582 					 * First 4 Tx attempts used same MCS,
5583 					 * twice at 80MHz and twice at 40MHz.
5584 					 */
5585 					if (i >= 4)
5586 						mcs--;
5587 				} else if (sco == IEEE80211_HTOP0_SCO_SCA ||
5588 				    sco == IEEE80211_HTOP0_SCO_SCB) {
5589 					/*
5590 					 * First 4 Tx attempts used same MCS,
5591 					 * four times at 40MHz.
5592 					 */
5593 					if (i >= 4)
5594 						mcs--;
5595 				} else
5596 					mcs--;
5597 			} else
5598 				retries++;
5599 		}
5600 
5601 		if (txfail && failure_frame == 0) {
5602 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5603 			    txmcs, nss, 1, 1);
5604 		} else {
5605 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5606 			    mcs, nss, retries + 1, retries);
5607 		}
5608 
5609 		iwm_ra_choose(sc, ni);
5610 	}
5611 }
5612 
5613 void
5614 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5615     struct iwm_node *in, int txmcs, int txrate)
5616 {
5617 	struct ieee80211com *ic = &sc->sc_ic;
5618 	struct ieee80211_node *ni = &in->in_ni;
5619 	struct ifnet *ifp = IC2IFP(ic);
5620 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5621 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5622 	uint32_t initial_rate = le32toh(tx_resp->initial_rate);
5623 	int txfail;
5624 
5625 	KASSERT(tx_resp->frame_count == 1);
5626 
5627 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5628 	    status != IWM_TX_STATUS_DIRECT_DONE);
5629 
5630 	/*
5631 	 * Update rate control statistics.
5632 	 * Only report frames which were actually queued with the currently
5633 	 * selected Tx rate. Because Tx queues are relatively long we may
5634 	 * encounter previously selected rates here during Tx bursts.
5635 	 * Providing feedback based on such frames can lead to suboptimal
5636 	 * Tx rate control decisions.
5637 	 */
5638 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5639 		if (txrate != ni->ni_txrate) {
5640 			if (++in->lq_rate_mismatch > 15) {
5641 				/* Try to sync firmware with the driver... */
5642 				iwm_setrates(in, 1);
5643 				in->lq_rate_mismatch = 0;
5644 			}
5645 		} else {
5646 			in->lq_rate_mismatch = 0;
5647 
5648 			in->in_amn.amn_txcnt++;
5649 			if (txfail)
5650 				in->in_amn.amn_retrycnt++;
5651 			if (tx_resp->failure_frame > 0)
5652 				in->in_amn.amn_retrycnt++;
5653 		}
5654 	} else if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5655 	    ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5656 	    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5657 		int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5658 		int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK) >>
5659 		    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5660 		iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5661 		    tx_resp->failure_frame, txfail);
5662 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5663 	    (initial_rate & IWM_RATE_MCS_HT_MSK)) {
5664 		int txmcs = initial_rate &
5665 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5666 		iwm_ht_single_rate_control(sc, ni, txmcs,
5667 		    tx_resp->failure_frame, txfail);
5668 	}
5669 
5670 	if (txfail)
5671 		ifp->if_oerrors++;
5672 }
5673 
5674 void
5675 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5676 {
5677 	struct ieee80211com *ic = &sc->sc_ic;
5678 
5679 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5680 	    BUS_DMASYNC_POSTWRITE);
5681 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5682 	m_freem(txd->m);
5683 	txd->m = NULL;
5684 
5685 	KASSERT(txd->in);
5686 	ieee80211_release_node(ic, &txd->in->in_ni);
5687 	txd->in = NULL;
5688 	txd->ampdu_nframes = 0;
5689 	txd->ampdu_txmcs = 0;
5690 	txd->ampdu_txnss = 0;
5691 }
5692 
5693 void
5694 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5695 {
5696 	struct iwm_tx_data *txd;
5697 
5698 	while (ring->tail != idx) {
5699 		txd = &ring->data[ring->tail];
5700 		if (txd->m != NULL) {
5701 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5702 			iwm_txd_done(sc, txd);
5703 			ring->queued--;
5704 		}
5705 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5706 	}
5707 
5708 	wakeup(ring);
5709 }
5710 
5711 void
5712 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5713     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5714     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5715     struct iwm_agg_tx_status *agg_status)
5716 {
5717 	struct ieee80211com *ic = &sc->sc_ic;
5718 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5719 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5720 	struct ieee80211_node *ni = &in->in_ni;
5721 	struct ieee80211_tx_ba *ba;
5722 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5723 	    status != IWM_TX_STATUS_DIRECT_DONE);
5724 	uint16_t seq;
5725 
5726 	if (ic->ic_state != IEEE80211_S_RUN)
5727 		return;
5728 
5729 	if (nframes > 1) {
5730 		int i;
5731  		/*
5732 		 * Collect information about this A-MPDU.
5733 		 */
5734 
5735 		for (i = 0; i < nframes; i++) {
5736 			uint8_t qid = agg_status[i].qid;
5737 			uint8_t idx = agg_status[i].idx;
5738 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5739 			    IWM_AGG_TX_STATE_STATUS_MSK);
5740 
5741 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5742 				continue;
5743 
5744 			if (qid != cmd_hdr->qid)
5745 				continue;
5746 
5747 			txdata = &txq->data[idx];
5748 			if (txdata->m == NULL)
5749 				continue;
5750 
5751 			/* The Tx rate was the same for all subframes. */
5752 			if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5753 			    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5754 				txdata->ampdu_txmcs = initial_rate &
5755 				    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5756 				txdata->ampdu_txnss = ((initial_rate &
5757 				    IWM_RATE_VHT_MCS_NSS_MSK) >>
5758 				    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5759 				txdata->ampdu_nframes = nframes;
5760 			} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5761 				txdata->ampdu_txmcs = initial_rate &
5762 				    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5763 				    IWM_RATE_HT_MCS_NSS_MSK);
5764 				txdata->ampdu_nframes = nframes;
5765 			}
5766 		}
5767 		return;
5768 	}
5769 
5770 	ba = &ni->ni_tx_ba[tid];
5771 	if (ba->ba_state != IEEE80211_BA_AGREED)
5772 		return;
5773 	if (SEQ_LT(ssn, ba->ba_winstart))
5774 		return;
5775 
5776 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5777 	seq = (ssn - 1) & 0xfff;
5778 
5779 	/*
5780 	 * Skip rate control if our Tx rate is fixed.
5781 	 * Don't report frames to MiRA which were sent at a different
5782 	 * Tx rate than ni->ni_txmcs.
5783 	 */
5784 	if (ic->ic_fixed_mcs == -1) {
5785 		if (txdata->ampdu_nframes > 1) {
5786 			/*
5787 			 * This frame was once part of an A-MPDU.
5788 			 * Report one failed A-MPDU Tx attempt.
5789 			 * The firmware might have made several such
5790 			 * attempts but we don't keep track of this.
5791 			 */
5792 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5793 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5794 				    ic, ni, txdata->ampdu_txmcs,
5795 				    txdata->ampdu_txnss, 1, 1);
5796 			} else {
5797 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5798 				    txdata->ampdu_txmcs, 1, 1);
5799 			}
5800 		}
5801 
5802 		/* Report the final single-frame Tx attempt. */
5803 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5804 		    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5805 			int txmcs = initial_rate &
5806 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5807 			int nss = ((initial_rate &
5808 			    IWM_RATE_VHT_MCS_NSS_MSK) >>
5809 			    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5810 			iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5811 			    failure_frame, txfail);
5812 		} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5813 			int txmcs = initial_rate &
5814 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5815 			   IWM_RATE_HT_MCS_NSS_MSK);
5816 			iwm_ht_single_rate_control(sc, ni, txmcs,
5817 			    failure_frame, txfail);
5818 		}
5819 	}
5820 
5821 	if (txfail)
5822 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5823 
5824 	/*
5825 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5826 	 * in firmware's BA window. Firmware is not going to retransmit any
5827 	 * frames before its BA window so mark them all as done.
5828 	 */
5829 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5830 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5831 	iwm_clear_oactive(sc, txq);
5832 }
5833 
5834 void
5835 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5836     struct iwm_rx_data *data)
5837 {
5838 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5839 	int idx = cmd_hdr->idx;
5840 	int qid = cmd_hdr->qid;
5841 	struct iwm_tx_ring *ring = &sc->txq[qid];
5842 	struct iwm_tx_data *txd;
5843 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5844 	uint32_t ssn;
5845 	uint32_t len = iwm_rx_packet_len(pkt);
5846 
5847 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5848 	    BUS_DMASYNC_POSTREAD);
5849 
5850 	/* Sanity checks. */
5851 	if (sizeof(*tx_resp) > len)
5852 		return;
5853 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5854 		return;
5855 	if (qid > IWM_LAST_AGG_TX_QUEUE)
5856 		return;
5857 	if (sizeof(*tx_resp) + sizeof(ssn) +
5858 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5859 		return;
5860 
5861 	sc->sc_tx_timer[qid] = 0;
5862 
5863 	txd = &ring->data[idx];
5864 	if (txd->m == NULL)
5865 		return;
5866 
5867 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5868 	ssn = le32toh(ssn) & 0xfff;
5869 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5870 		int status;
5871 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5872 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5873 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5874 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5875 	} else {
5876 		/*
5877 		 * Even though this is not an agg queue, we must only free
5878 		 * frames before the firmware's starting sequence number.
5879 		 */
5880 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5881 		iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5882 		iwm_clear_oactive(sc, ring);
5883 	}
5884 }
5885 
5886 void
5887 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5888 {
5889 	struct ieee80211com *ic = &sc->sc_ic;
5890 	struct ifnet *ifp = IC2IFP(ic);
5891 
5892 	if (ring->queued < IWM_TX_RING_LOMARK) {
5893 		sc->qfullmsk &= ~(1 << ring->qid);
5894 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5895 			ifq_clr_oactive(&ifp->if_snd);
5896 			/*
5897 			 * Well, we're in interrupt context, but then again
5898 			 * I guess net80211 does all sorts of stunts in
5899 			 * interrupt context, so maybe this is no biggie.
5900 			 */
5901 			(*ifp->if_start)(ifp);
5902 		}
5903 	}
5904 }
5905 
5906 void
5907 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5908     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5909 {
5910 	struct ieee80211com *ic = &sc->sc_ic;
5911 	struct iwm_node *in = (void *)ni;
5912 	int idx, end_idx;
5913 
5914 	/*
5915 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5916 	 */
5917 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5918 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5919 	while (idx != end_idx) {
5920 		struct iwm_tx_data *txdata = &txq->data[idx];
5921 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5922 			/*
5923 			 * We can assume that this subframe has been ACKed
5924 			 * because ACK failures come as single frames and
5925 			 * before failing an A-MPDU subframe the firmware
5926 			 * sends it as a single frame at least once.
5927 			 */
5928 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5929 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5930 				    ic, ni, txdata->ampdu_txmcs,
5931 				    txdata->ampdu_txnss, 1, 0);
5932 			} else {
5933 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5934 				    txdata->ampdu_txmcs, 1, 0);
5935 			}
5936 			/* Report this frame only once. */
5937 			txdata->ampdu_nframes = 0;
5938 		}
5939 
5940 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5941 	}
5942 
5943 	iwm_ra_choose(sc, ni);
5944 }
5945 
5946 void
5947 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5948 {
5949 	struct iwm_ba_notif *ban = (void *)pkt->data;
5950 	struct ieee80211com *ic = &sc->sc_ic;
5951 	struct ieee80211_node *ni = ic->ic_bss;
5952 	struct iwm_node *in = (void *)ni;
5953 	struct ieee80211_tx_ba *ba;
5954 	struct iwm_tx_ring *ring;
5955 	uint16_t seq, ssn;
5956 	int qid;
5957 
5958 	if (ic->ic_state != IEEE80211_S_RUN)
5959 		return;
5960 
5961 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5962 		return;
5963 
5964 	if (ban->sta_id != IWM_STATION_ID ||
5965 	    !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr))
5966 		return;
5967 
5968 	qid = le16toh(ban->scd_flow);
5969 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5970 		return;
5971 
5972 	/* Protect against a firmware bug where the queue/TID are off. */
5973 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5974 		return;
5975 
5976 	sc->sc_tx_timer[qid] = 0;
5977 
5978 	ba = &ni->ni_tx_ba[ban->tid];
5979 	if (ba->ba_state != IEEE80211_BA_AGREED)
5980 		return;
5981 
5982 	ring = &sc->txq[qid];
5983 
5984 	/*
5985 	 * The first bit in ban->bitmap corresponds to the sequence number
5986 	 * stored in the sequence control field ban->seq_ctl.
5987 	 * Multiple BA notifications in a row may be using this number, with
5988 	 * additional bits being set in cba->bitmap. It is unclear how the
5989 	 * firmware decides to shift this window forward.
5990 	 * We rely on ba->ba_winstart instead.
5991 	 */
5992 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
5993 
5994 	/*
5995 	 * The firmware's new BA window starting sequence number
5996 	 * corresponds to the first hole in ban->scd_ssn, implying
5997 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
5998 	 * have been acked.
5999 	 */
6000 	ssn = le16toh(ban->scd_ssn);
6001 
6002 	if (SEQ_LT(ssn, ba->ba_winstart))
6003 		return;
6004 
6005 	/* Skip rate control if our Tx rate is fixed. */
6006 	if (ic->ic_fixed_mcs == -1)
6007 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
6008 		    ba->ba_winstart, ssn);
6009 
6010 	/*
6011 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
6012 	 * in firmware's BA window. Firmware is not going to retransmit any
6013 	 * frames before its BA window so mark them all as done.
6014 	 */
6015 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
6016 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
6017 	iwm_clear_oactive(sc, ring);
6018 }
6019 
6020 void
6021 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6022     struct iwm_rx_data *data)
6023 {
6024 	struct ieee80211com *ic = &sc->sc_ic;
6025 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
6026 	uint32_t missed;
6027 
6028 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
6029 	    (ic->ic_state != IEEE80211_S_RUN))
6030 		return;
6031 
6032 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
6033 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
6034 
6035 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
6036 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
6037 		if (ic->ic_if.if_flags & IFF_DEBUG)
6038 			printf("%s: receiving no beacons from %s; checking if "
6039 			    "this AP is still responding to probe requests\n",
6040 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
6041 		/*
6042 		 * Rather than go directly to scan state, try to send a
6043 		 * directed probe request first. If that fails then the
6044 		 * state machine will drop us into scanning after timing
6045 		 * out waiting for a probe response.
6046 		 */
6047 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
6048 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
6049 	}
6050 
6051 }
6052 
6053 int
6054 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6055 {
6056 	struct iwm_binding_cmd cmd;
6057 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
6058 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
6059 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
6060 	uint32_t status;
6061 	size_t len;
6062 
6063 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6064 		panic("binding already added");
6065 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6066 		panic("binding already removed");
6067 
6068 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
6069 		return EINVAL;
6070 
6071 	memset(&cmd, 0, sizeof(cmd));
6072 
6073 	cmd.id_and_color
6074 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6075 	cmd.action = htole32(action);
6076 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6077 
6078 	cmd.macs[0] = htole32(mac_id);
6079 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
6080 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
6081 
6082 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
6083 	    !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
6084 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
6085 	else
6086 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
6087 
6088 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
6089 		len = sizeof(cmd);
6090 	else
6091 		len = sizeof(struct iwm_binding_cmd_v1);
6092 	status = 0;
6093 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
6094 	    &status);
6095 	if (err == 0 && status != 0)
6096 		err = EIO;
6097 
6098 	return err;
6099 }
6100 
6101 void
6102 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6103     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
6104 {
6105 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
6106 
6107 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6108 	    ctxt->color));
6109 	cmd->action = htole32(action);
6110 	cmd->apply_time = htole32(apply_time);
6111 }
6112 
6113 void
6114 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6115     struct ieee80211_channel *chan, uint8_t chains_static,
6116     uint8_t chains_dynamic, uint8_t sco, uint8_t vht_chan_width)
6117 {
6118 	struct ieee80211com *ic = &sc->sc_ic;
6119 	uint8_t active_cnt, idle_cnt;
6120 
6121 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6122 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6123 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
6124 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6125 		cmd->ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6126 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6127 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6128 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6129 			/* secondary chan above -> control chan below */
6130 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6131 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6132 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6133 			/* secondary chan below -> control chan above */
6134 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6135 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6136 		} else {
6137 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6138 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6139 		}
6140 	} else {
6141 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6142 		cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6143 	}
6144 
6145 	/* Set rx the chains */
6146 	idle_cnt = chains_static;
6147 	active_cnt = chains_dynamic;
6148 
6149 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6150 					IWM_PHY_RX_CHAIN_VALID_POS);
6151 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6152 	cmd->rxchain_info |= htole32(active_cnt <<
6153 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6154 
6155 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6156 }
6157 
6158 uint8_t
6159 iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
6160 {
6161 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
6162 	int primary_idx = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
6163 	/*
6164 	 * The FW is expected to check the control channel position only
6165 	 * when in HT/VHT and the channel width is not 20MHz. Return
6166 	 * this value as the default one:
6167 	 */
6168 	uint8_t pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6169 
6170 	switch (primary_idx - center_idx) {
6171 	case -6:
6172 		pos = IWM_PHY_VHT_CTRL_POS_2_BELOW;
6173 		break;
6174 	case -2:
6175 		pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6176 		break;
6177 	case 2:
6178 		pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6179 		break;
6180 	case 6:
6181 		pos = IWM_PHY_VHT_CTRL_POS_2_ABOVE;
6182 		break;
6183 	default:
6184 		break;
6185 	}
6186 
6187 	return pos;
6188 }
6189 
6190 int
6191 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6192     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6193     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6194 {
6195 	struct ieee80211com *ic = &sc->sc_ic;
6196 	struct iwm_phy_context_cmd_uhb cmd;
6197 	uint8_t active_cnt, idle_cnt;
6198 	struct ieee80211_channel *chan = ctxt->channel;
6199 
6200 	memset(&cmd, 0, sizeof(cmd));
6201 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6202 	    ctxt->color));
6203 	cmd.action = htole32(action);
6204 	cmd.apply_time = htole32(apply_time);
6205 
6206 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6207 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6208 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
6209 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6210 		cmd.ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6211 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6212 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6213 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6214 			/* secondary chan above -> control chan below */
6215 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6216 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6217 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6218 			/* secondary chan below -> control chan above */
6219 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6220 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6221 		} else {
6222 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6223 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6224 		}
6225 	} else {
6226 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6227 		cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6228 	}
6229 
6230 	idle_cnt = chains_static;
6231 	active_cnt = chains_dynamic;
6232 	cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6233 					IWM_PHY_RX_CHAIN_VALID_POS);
6234 	cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6235 	cmd.rxchain_info |= htole32(active_cnt <<
6236 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6237 	cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6238 
6239 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6240 }
6241 
6242 int
6243 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6244     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6245     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6246 {
6247 	struct iwm_phy_context_cmd cmd;
6248 
6249 	/*
6250 	 * Intel increased the size of the fw_channel_info struct and neglected
6251 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6252 	 * member in the middle.
6253 	 * To keep things simple we use a separate function to handle the larger
6254 	 * variant of the phy context command.
6255 	 */
6256 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6257 		return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6258 		    chains_dynamic, action, apply_time, sco, vht_chan_width);
6259 
6260 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6261 
6262 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6263 	    chains_static, chains_dynamic, sco, vht_chan_width);
6264 
6265 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6266 	    sizeof(struct iwm_phy_context_cmd), &cmd);
6267 }
6268 
6269 int
6270 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6271 {
6272 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6273 	struct iwm_tfd *desc;
6274 	struct iwm_tx_data *txdata;
6275 	struct iwm_device_cmd *cmd;
6276 	struct mbuf *m;
6277 	bus_addr_t paddr;
6278 	uint32_t addr_lo;
6279 	int err = 0, i, paylen, off, s;
6280 	int idx, code, async, group_id;
6281 	size_t hdrlen, datasz;
6282 	uint8_t *data;
6283 	int generation = sc->sc_generation;
6284 
6285 	code = hcmd->id;
6286 	async = hcmd->flags & IWM_CMD_ASYNC;
6287 	idx = ring->cur;
6288 
6289 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
6290 		paylen += hcmd->len[i];
6291 	}
6292 
6293 	/* If this command waits for a response, allocate response buffer. */
6294 	hcmd->resp_pkt = NULL;
6295 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
6296 		uint8_t *resp_buf;
6297 		KASSERT(!async);
6298 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
6299 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
6300 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
6301 			return ENOSPC;
6302 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
6303 		    M_NOWAIT | M_ZERO);
6304 		if (resp_buf == NULL)
6305 			return ENOMEM;
6306 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
6307 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6308 	} else {
6309 		sc->sc_cmd_resp_pkt[idx] = NULL;
6310 	}
6311 
6312 	s = splnet();
6313 
6314 	desc = &ring->desc[idx];
6315 	txdata = &ring->data[idx];
6316 
6317 	group_id = iwm_cmd_groupid(code);
6318 	if (group_id != 0) {
6319 		hdrlen = sizeof(cmd->hdr_wide);
6320 		datasz = sizeof(cmd->data_wide);
6321 	} else {
6322 		hdrlen = sizeof(cmd->hdr);
6323 		datasz = sizeof(cmd->data);
6324 	}
6325 
6326 	if (paylen > datasz) {
6327 		/* Command is too large to fit in pre-allocated space. */
6328 		size_t totlen = hdrlen + paylen;
6329 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
6330 			printf("%s: firmware command too long (%zd bytes)\n",
6331 			    DEVNAME(sc), totlen);
6332 			err = EINVAL;
6333 			goto out;
6334 		}
6335 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
6336 		if (m == NULL) {
6337 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6338 			    DEVNAME(sc), totlen);
6339 			err = ENOMEM;
6340 			goto out;
6341 		}
6342 		cmd = mtod(m, struct iwm_device_cmd *);
6343 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6344 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6345 		if (err) {
6346 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6347 			    DEVNAME(sc), totlen);
6348 			m_freem(m);
6349 			goto out;
6350 		}
6351 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6352 		paddr = txdata->map->dm_segs[0].ds_addr;
6353 	} else {
6354 		cmd = &ring->cmd[idx];
6355 		paddr = txdata->cmd_paddr;
6356 	}
6357 
6358 	if (group_id != 0) {
6359 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6360 		cmd->hdr_wide.group_id = group_id;
6361 		cmd->hdr_wide.qid = ring->qid;
6362 		cmd->hdr_wide.idx = idx;
6363 		cmd->hdr_wide.length = htole16(paylen);
6364 		cmd->hdr_wide.version = iwm_cmd_version(code);
6365 		data = cmd->data_wide;
6366 	} else {
6367 		cmd->hdr.code = code;
6368 		cmd->hdr.flags = 0;
6369 		cmd->hdr.qid = ring->qid;
6370 		cmd->hdr.idx = idx;
6371 		data = cmd->data;
6372 	}
6373 
6374 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
6375 		if (hcmd->len[i] == 0)
6376 			continue;
6377 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
6378 		off += hcmd->len[i];
6379 	}
6380 	KASSERT(off == paylen);
6381 
6382 	/* lo field is not aligned */
6383 	addr_lo = htole32((uint32_t)paddr);
6384 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
6385 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
6386 	    | ((hdrlen + paylen) << 4));
6387 	desc->num_tbs = 1;
6388 
6389 	if (paylen > datasz) {
6390 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6391 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6392 	} else {
6393 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6394 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6395 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6396 	}
6397 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6398 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6399 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6400 
6401 	/*
6402 	 * Wake up the NIC to make sure that the firmware will see the host
6403 	 * command - we will let the NIC sleep once all the host commands
6404 	 * returned. This needs to be done only on 7000 family NICs.
6405 	 */
6406 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6407 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6408 			err = EBUSY;
6409 			goto out;
6410 		}
6411 	}
6412 
6413 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6414 
6415 	/* Kick command ring. */
6416 	ring->queued++;
6417 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6418 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6419 
6420 	if (!async) {
6421 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
6422 		if (err == 0) {
6423 			/* if hardware is no longer up, return error */
6424 			if (generation != sc->sc_generation) {
6425 				err = ENXIO;
6426 				goto out;
6427 			}
6428 
6429 			/* Response buffer will be freed in iwm_free_resp(). */
6430 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6431 			sc->sc_cmd_resp_pkt[idx] = NULL;
6432 		} else if (generation == sc->sc_generation) {
6433 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6434 			    sc->sc_cmd_resp_len[idx]);
6435 			sc->sc_cmd_resp_pkt[idx] = NULL;
6436 		}
6437 	}
6438  out:
6439 	splx(s);
6440 
6441 	return err;
6442 }
6443 
6444 int
6445 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6446     uint16_t len, const void *data)
6447 {
6448 	struct iwm_host_cmd cmd = {
6449 		.id = id,
6450 		.len = { len, },
6451 		.data = { data, },
6452 		.flags = flags,
6453 	};
6454 
6455 	return iwm_send_cmd(sc, &cmd);
6456 }
6457 
6458 int
6459 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6460     uint32_t *status)
6461 {
6462 	struct iwm_rx_packet *pkt;
6463 	struct iwm_cmd_response *resp;
6464 	int err, resp_len;
6465 
6466 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
6467 	cmd->flags |= IWM_CMD_WANT_RESP;
6468 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6469 
6470 	err = iwm_send_cmd(sc, cmd);
6471 	if (err)
6472 		return err;
6473 
6474 	pkt = cmd->resp_pkt;
6475 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
6476 		return EIO;
6477 
6478 	resp_len = iwm_rx_packet_payload_len(pkt);
6479 	if (resp_len != sizeof(*resp)) {
6480 		iwm_free_resp(sc, cmd);
6481 		return EIO;
6482 	}
6483 
6484 	resp = (void *)pkt->data;
6485 	*status = le32toh(resp->status);
6486 	iwm_free_resp(sc, cmd);
6487 	return err;
6488 }
6489 
6490 int
6491 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6492     const void *data, uint32_t *status)
6493 {
6494 	struct iwm_host_cmd cmd = {
6495 		.id = id,
6496 		.len = { len, },
6497 		.data = { data, },
6498 	};
6499 
6500 	return iwm_send_cmd_status(sc, &cmd, status);
6501 }
6502 
6503 void
6504 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6505 {
6506 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
6507 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6508 	hcmd->resp_pkt = NULL;
6509 }
6510 
6511 void
6512 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6513 {
6514 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6515 	struct iwm_tx_data *data;
6516 
6517 	if (qid != sc->cmdqid) {
6518 		return;	/* Not a command ack. */
6519 	}
6520 
6521 	data = &ring->data[idx];
6522 
6523 	if (data->m != NULL) {
6524 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6525 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6526 		bus_dmamap_unload(sc->sc_dmat, data->map);
6527 		m_freem(data->m);
6528 		data->m = NULL;
6529 	}
6530 	wakeup(&ring->desc[idx]);
6531 
6532 	if (ring->queued == 0) {
6533 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6534 		    DEVNAME(sc), code));
6535 	} else if (--ring->queued == 0) {
6536 		/*
6537 		 * 7000 family NICs are locked while commands are in progress.
6538 		 * All commands are now done so we may unlock the NIC again.
6539 		 */
6540 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6541 			iwm_nic_unlock(sc);
6542 	}
6543 }
6544 
6545 void
6546 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6547     uint16_t len)
6548 {
6549 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6550 	uint16_t val;
6551 
6552 	scd_bc_tbl = sc->sched_dma.vaddr;
6553 
6554 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6555 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6556 		len = roundup(len, 4) / 4;
6557 
6558 	val = htole16(sta_id << 12 | len);
6559 
6560 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6561 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6562 
6563 	/* Update TX scheduler. */
6564 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6565 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6566 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6567 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6568 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6569 }
6570 
6571 void
6572 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6573 {
6574 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6575 	uint16_t val;
6576 
6577 	scd_bc_tbl = sc->sched_dma.vaddr;
6578 
6579 	val = htole16(1 | (sta_id << 12));
6580 
6581 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6582 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6583 
6584 	/* Update TX scheduler. */
6585 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6586 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6587 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6588 
6589 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6590 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6591 }
6592 
6593 /*
6594  * Fill in various bit for management frames, and leave them
6595  * unfilled for data frames (firmware takes care of that).
6596  * Return the selected legacy TX rate, or zero if HT/VHT is used.
6597  */
6598 uint8_t
6599 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6600     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6601 {
6602 	struct ieee80211com *ic = &sc->sc_ic;
6603 	struct ieee80211_node *ni = &in->in_ni;
6604 	const struct iwm_rate *rinfo;
6605 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6606 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6607 	int ridx, rate_flags;
6608 	uint8_t rate = 0;
6609 
6610 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6611 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6612 
6613 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6614 	    type != IEEE80211_FC0_TYPE_DATA) {
6615 		/* for non-data, use the lowest supported rate */
6616 		ridx = min_ridx;
6617 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6618 	} else if (ic->ic_fixed_mcs != -1) {
6619 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6620 			ridx = IWM_FIRST_OFDM_RATE;
6621 		else
6622 			ridx = sc->sc_fixed_ridx;
6623 	} else if (ic->ic_fixed_rate != -1) {
6624 		ridx = sc->sc_fixed_ridx;
6625  	} else {
6626 		int i;
6627 		/* Use firmware rateset retry table. */
6628 		tx->initial_rate_index = 0;
6629 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6630 		if (ni->ni_flags & IEEE80211_NODE_HT) /* VHT implies HT */
6631 			return 0;
6632 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6633 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6634 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6635 			if (iwm_rates[i].rate == (ni->ni_txrate &
6636 			    IEEE80211_RATE_VAL)) {
6637 				ridx = i;
6638 				break;
6639 			}
6640 		}
6641 		return iwm_rates[ridx].rate & 0xff;
6642 	}
6643 
6644 	rinfo = &iwm_rates[ridx];
6645 	if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 &&
6646 	    iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6647 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6648 	else
6649 		rate_flags = iwm_valid_siso_ant_rate_mask(sc);
6650 	if (IWM_RIDX_IS_CCK(ridx))
6651 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6652 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6653 	    type == IEEE80211_FC0_TYPE_DATA &&
6654 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6655 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
6656 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
6657 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
6658 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
6659 		    ieee80211_node_supports_vht_chan80(ni))
6660 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
6661 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
6662 		    ieee80211_node_supports_ht_chan40(ni))
6663 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6664 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6665 			rate_flags |= IWM_RATE_MCS_VHT_MSK;
6666 		else
6667 			rate_flags |= IWM_RATE_MCS_HT_MSK;
6668 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
6669 		    in->in_phyctxt != NULL &&
6670 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
6671 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_80;
6672 			if (ieee80211_node_supports_vht_sgi80(ni))
6673 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6674 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
6675 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
6676 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
6677 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40;
6678 			if (ieee80211_node_supports_ht_sgi40(ni))
6679 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6680 		} else if (ieee80211_node_supports_ht_sgi20(ni))
6681 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6682 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6683 			/*
6684 			 * ifmedia only provides an MCS index, no NSS.
6685 			 * Use a fixed SISO rate.
6686 			 */
6687 			tx->rate_n_flags = htole32(rate_flags |
6688 			    (ic->ic_fixed_mcs &
6689 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK));
6690 		} else
6691 			tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6692 	} else
6693 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6694 
6695 	return rate;
6696 }
6697 
6698 #define TB0_SIZE 16
6699 int
6700 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6701 {
6702 	struct ieee80211com *ic = &sc->sc_ic;
6703 	struct iwm_node *in = (void *)ni;
6704 	struct iwm_tx_ring *ring;
6705 	struct iwm_tx_data *data;
6706 	struct iwm_tfd *desc;
6707 	struct iwm_device_cmd *cmd;
6708 	struct iwm_tx_cmd *tx;
6709 	struct ieee80211_frame *wh;
6710 	struct ieee80211_key *k = NULL;
6711 	uint8_t rate;
6712 	uint8_t *ivp;
6713 	uint32_t flags;
6714 	u_int hdrlen;
6715 	bus_dma_segment_t *seg;
6716 	uint8_t tid, type, subtype;
6717 	int i, totlen, err, pad;
6718 	int qid, hasqos;
6719 
6720 	wh = mtod(m, struct ieee80211_frame *);
6721 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6722 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6723 	if (type == IEEE80211_FC0_TYPE_CTL)
6724 		hdrlen = sizeof(struct ieee80211_frame_min);
6725 	else
6726 		hdrlen = ieee80211_get_hdrlen(wh);
6727 
6728 	hasqos = ieee80211_has_qos(wh);
6729 	if (type == IEEE80211_FC0_TYPE_DATA)
6730 		tid = IWM_TID_NON_QOS;
6731 	else
6732 		tid = IWM_MAX_TID_COUNT;
6733 
6734 	/*
6735 	 * Map EDCA categories to Tx data queues.
6736 	 *
6737 	 * We use static data queue assignments even in DQA mode. We do not
6738 	 * need to share Tx queues between stations because we only implement
6739 	 * client mode; the firmware's station table contains only one entry
6740 	 * which represents our access point.
6741 	 */
6742 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6743 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6744 	else
6745 		qid = ac;
6746 
6747 	/* If possible, put this frame on an aggregation queue. */
6748 	if (hasqos) {
6749 		struct ieee80211_tx_ba *ba;
6750 		uint16_t qos = ieee80211_get_qos(wh);
6751 		int qostid = qos & IEEE80211_QOS_TID;
6752 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6753 
6754 		ba = &ni->ni_tx_ba[qostid];
6755 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6756 		    type == IEEE80211_FC0_TYPE_DATA &&
6757 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6758 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6759 		    ba->ba_state == IEEE80211_BA_AGREED) {
6760 			qid = agg_qid;
6761 			tid = qostid;
6762 			ac = ieee80211_up_to_ac(ic, qostid);
6763 		}
6764 	}
6765 
6766 	ring = &sc->txq[qid];
6767 	desc = &ring->desc[ring->cur];
6768 	memset(desc, 0, sizeof(*desc));
6769 	data = &ring->data[ring->cur];
6770 
6771 	cmd = &ring->cmd[ring->cur];
6772 	cmd->hdr.code = IWM_TX_CMD;
6773 	cmd->hdr.flags = 0;
6774 	cmd->hdr.qid = ring->qid;
6775 	cmd->hdr.idx = ring->cur;
6776 
6777 	tx = (void *)cmd->data;
6778 	memset(tx, 0, sizeof(*tx));
6779 
6780 	rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6781 
6782 #if NBPFILTER > 0
6783 	if (sc->sc_drvbpf != NULL) {
6784 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6785 		uint16_t chan_flags;
6786 
6787 		tap->wt_flags = 0;
6788 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6789 		chan_flags = ni->ni_chan->ic_flags;
6790 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6791 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6792 			chan_flags &= ~IEEE80211_CHAN_HT;
6793 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6794 		}
6795 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6796 			chan_flags &= ~IEEE80211_CHAN_VHT;
6797 		tap->wt_chan_flags = htole16(chan_flags);
6798 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6799 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6800 		    type == IEEE80211_FC0_TYPE_DATA) {
6801 			tap->wt_rate = (0x80 | ni->ni_txmcs);
6802 		} else
6803 			tap->wt_rate = rate;
6804 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6805 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6806 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6807 
6808 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6809 		    m, BPF_DIRECTION_OUT);
6810 	}
6811 #endif
6812 	totlen = m->m_pkthdr.len;
6813 
6814 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6815 		k = ieee80211_get_txkey(ic, wh, ni);
6816 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6817 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6818 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6819 				return ENOBUFS;
6820 			/* 802.11 header may have moved. */
6821 			wh = mtod(m, struct ieee80211_frame *);
6822 			totlen = m->m_pkthdr.len;
6823 			k = NULL; /* skip hardware crypto below */
6824 		} else {
6825 			/* HW appends CCMP MIC */
6826 			totlen += IEEE80211_CCMP_HDRLEN;
6827 		}
6828 	}
6829 
6830 	flags = 0;
6831 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6832 		flags |= IWM_TX_CMD_FLG_ACK;
6833 	}
6834 
6835 	if (type == IEEE80211_FC0_TYPE_DATA &&
6836 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6837 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6838 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6839 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6840 
6841 	tx->sta_id = IWM_STATION_ID;
6842 
6843 	if (type == IEEE80211_FC0_TYPE_MGT) {
6844 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6845 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6846 			tx->pm_frame_timeout = htole16(3);
6847 		else
6848 			tx->pm_frame_timeout = htole16(2);
6849 	} else {
6850 		if (type == IEEE80211_FC0_TYPE_CTL &&
6851 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6852 			struct ieee80211_frame_min *mwh;
6853 			uint8_t *barfrm;
6854 			uint16_t ctl;
6855 			mwh = mtod(m, struct ieee80211_frame_min *);
6856 			barfrm = (uint8_t *)&mwh[1];
6857 			ctl = LE_READ_2(barfrm);
6858 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6859 			    IEEE80211_BA_TID_INFO_SHIFT;
6860 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6861 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6862 		}
6863 
6864 		tx->pm_frame_timeout = htole16(0);
6865 	}
6866 
6867 	if (hdrlen & 3) {
6868 		/* First segment length must be a multiple of 4. */
6869 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6870 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6871 		pad = 4 - (hdrlen & 3);
6872 	} else
6873 		pad = 0;
6874 
6875 	tx->len = htole16(totlen);
6876 	tx->tid_tspec = tid;
6877 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6878 
6879 	/* Set physical address of "scratch area". */
6880 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6881 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6882 
6883 	/* Copy 802.11 header in TX command. */
6884 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6885 
6886 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6887 		/* Trim 802.11 header and prepend CCMP IV. */
6888 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6889 		ivp = mtod(m, u_int8_t *);
6890 		k->k_tsc++;	/* increment the 48-bit PN */
6891 		ivp[0] = k->k_tsc; /* PN0 */
6892 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6893 		ivp[2] = 0;        /* Rsvd */
6894 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6895 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6896 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6897 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6898 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6899 
6900 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6901 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6902 		/* TX scheduler includes CCMP MIC length. */
6903 		totlen += IEEE80211_CCMP_MICLEN;
6904 	} else {
6905 		/* Trim 802.11 header. */
6906 		m_adj(m, hdrlen);
6907 		tx->sec_ctl = 0;
6908 	}
6909 
6910 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6911 	if (!hasqos)
6912 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6913 
6914 	tx->tx_flags |= htole32(flags);
6915 
6916 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6917 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6918 	if (err && err != EFBIG) {
6919 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6920 		m_freem(m);
6921 		return err;
6922 	}
6923 	if (err) {
6924 		/* Too many DMA segments, linearize mbuf. */
6925 		if (m_defrag(m, M_DONTWAIT)) {
6926 			m_freem(m);
6927 			return ENOBUFS;
6928 		}
6929 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6930 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6931 		if (err) {
6932 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6933 			    err);
6934 			m_freem(m);
6935 			return err;
6936 		}
6937 	}
6938 	data->m = m;
6939 	data->in = in;
6940 	data->txmcs = ni->ni_txmcs;
6941 	data->txrate = ni->ni_txrate;
6942 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6943 	data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
6944 
6945 	/* Fill TX descriptor. */
6946 	desc->num_tbs = 2 + data->map->dm_nsegs;
6947 
6948 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6949 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6950 	    (TB0_SIZE << 4));
6951 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6952 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6953 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6954 	      + hdrlen + pad - TB0_SIZE) << 4));
6955 
6956 	/* Other DMA segments are for data payload. */
6957 	seg = data->map->dm_segs;
6958 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6959 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6960 		desc->tbs[i+2].hi_n_len = \
6961 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6962 		    | ((seg->ds_len) << 4));
6963 	}
6964 
6965 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6966 	    BUS_DMASYNC_PREWRITE);
6967 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6968 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6969 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6970 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6971 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6972 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6973 
6974 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6975 
6976 	/* Kick TX ring. */
6977 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6978 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6979 
6980 	/* Mark TX ring as full if we reach a certain threshold. */
6981 	if (++ring->queued > IWM_TX_RING_HIMARK) {
6982 		sc->qfullmsk |= 1 << ring->qid;
6983 	}
6984 
6985 	if (ic->ic_if.if_flags & IFF_UP)
6986 		sc->sc_tx_timer[ring->qid] = 15;
6987 
6988 	return 0;
6989 }
6990 
6991 int
6992 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
6993 {
6994 	struct iwm_tx_path_flush_cmd flush_cmd = {
6995 		.sta_id = htole32(IWM_STATION_ID),
6996 		.tid_mask = htole16(0xffff),
6997 	};
6998 	int err;
6999 
7000 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
7001 	    sizeof(flush_cmd), &flush_cmd);
7002 	if (err)
7003                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
7004 	return err;
7005 }
7006 
7007 #define IWM_FLUSH_WAIT_MS	2000
7008 
7009 int
7010 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
7011 {
7012 	int i, err;
7013 
7014 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
7015 		struct iwm_tx_ring *ring = &sc->txq[i];
7016 
7017 		if (i == sc->cmdqid)
7018 			continue;
7019 
7020 		while (ring->queued > 0) {
7021 			err = tsleep_nsec(ring, 0, "iwmflush",
7022 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
7023 			if (err)
7024 				return err;
7025 		}
7026 	}
7027 
7028 	return 0;
7029 }
7030 
7031 void
7032 iwm_led_enable(struct iwm_softc *sc)
7033 {
7034 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
7035 }
7036 
7037 void
7038 iwm_led_disable(struct iwm_softc *sc)
7039 {
7040 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
7041 }
7042 
7043 int
7044 iwm_led_is_enabled(struct iwm_softc *sc)
7045 {
7046 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
7047 }
7048 
7049 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
7050 
7051 void
7052 iwm_led_blink_timeout(void *arg)
7053 {
7054 	struct iwm_softc *sc = arg;
7055 
7056 	if (iwm_led_is_enabled(sc))
7057 		iwm_led_disable(sc);
7058 	else
7059 		iwm_led_enable(sc);
7060 
7061 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7062 }
7063 
7064 void
7065 iwm_led_blink_start(struct iwm_softc *sc)
7066 {
7067 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7068 	iwm_led_enable(sc);
7069 }
7070 
7071 void
7072 iwm_led_blink_stop(struct iwm_softc *sc)
7073 {
7074 	timeout_del(&sc->sc_led_blink_to);
7075 	iwm_led_disable(sc);
7076 }
7077 
7078 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
7079 
7080 int
7081 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7082     struct iwm_beacon_filter_cmd *cmd)
7083 {
7084 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
7085 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
7086 }
7087 
7088 void
7089 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7090     struct iwm_beacon_filter_cmd *cmd)
7091 {
7092 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
7093 }
7094 
7095 int
7096 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7097 {
7098 	struct iwm_beacon_filter_cmd cmd = {
7099 		IWM_BF_CMD_CONFIG_DEFAULTS,
7100 		.bf_enable_beacon_filter = htole32(1),
7101 		.ba_enable_beacon_abort = htole32(enable),
7102 	};
7103 
7104 	if (!sc->sc_bf.bf_enabled)
7105 		return 0;
7106 
7107 	sc->sc_bf.ba_enabled = enable;
7108 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7109 	return iwm_beacon_filter_send_cmd(sc, &cmd);
7110 }
7111 
7112 void
7113 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7114     struct iwm_mac_power_cmd *cmd)
7115 {
7116 	struct ieee80211com *ic = &sc->sc_ic;
7117 	struct ieee80211_node *ni = &in->in_ni;
7118 	int dtim_period, dtim_msec, keep_alive;
7119 
7120 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7121 	    in->in_color));
7122 	if (ni->ni_dtimperiod)
7123 		dtim_period = ni->ni_dtimperiod;
7124 	else
7125 		dtim_period = 1;
7126 
7127 	/*
7128 	 * Regardless of power management state the driver must set
7129 	 * keep alive period. FW will use it for sending keep alive NDPs
7130 	 * immediately after association. Check that keep alive period
7131 	 * is at least 3 * DTIM.
7132 	 */
7133 	dtim_msec = dtim_period * ni->ni_intval;
7134 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
7135 	keep_alive = roundup(keep_alive, 1000) / 1000;
7136 	cmd->keep_alive_seconds = htole16(keep_alive);
7137 
7138 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7139 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7140 }
7141 
7142 int
7143 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7144 {
7145 	int err;
7146 	int ba_enable;
7147 	struct iwm_mac_power_cmd cmd;
7148 
7149 	memset(&cmd, 0, sizeof(cmd));
7150 
7151 	iwm_power_build_cmd(sc, in, &cmd);
7152 
7153 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
7154 	    sizeof(cmd), &cmd);
7155 	if (err != 0)
7156 		return err;
7157 
7158 	ba_enable = !!(cmd.flags &
7159 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
7160 	return iwm_update_beacon_abort(sc, in, ba_enable);
7161 }
7162 
7163 int
7164 iwm_power_update_device(struct iwm_softc *sc)
7165 {
7166 	struct iwm_device_power_cmd cmd = { };
7167 	struct ieee80211com *ic = &sc->sc_ic;
7168 
7169 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7170 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7171 
7172 	return iwm_send_cmd_pdu(sc,
7173 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
7174 }
7175 
7176 int
7177 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7178 {
7179 	struct iwm_beacon_filter_cmd cmd = {
7180 		IWM_BF_CMD_CONFIG_DEFAULTS,
7181 		.bf_enable_beacon_filter = htole32(1),
7182 	};
7183 	int err;
7184 
7185 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7186 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7187 
7188 	if (err == 0)
7189 		sc->sc_bf.bf_enabled = 1;
7190 
7191 	return err;
7192 }
7193 
7194 int
7195 iwm_disable_beacon_filter(struct iwm_softc *sc)
7196 {
7197 	struct iwm_beacon_filter_cmd cmd;
7198 	int err;
7199 
7200 	memset(&cmd, 0, sizeof(cmd));
7201 
7202 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7203 	if (err == 0)
7204 		sc->sc_bf.bf_enabled = 0;
7205 
7206 	return err;
7207 }
7208 
7209 int
7210 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7211 {
7212 	struct iwm_add_sta_cmd add_sta_cmd;
7213 	int err;
7214 	uint32_t status, aggsize;
7215 	const uint32_t max_aggsize = (IWM_STA_FLG_MAX_AGG_SIZE_64K >>
7216 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT);
7217 	size_t cmdsize;
7218 	struct ieee80211com *ic = &sc->sc_ic;
7219 
7220 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
7221 		panic("STA already added");
7222 
7223 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
7224 
7225 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7226 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7227 	else
7228 		add_sta_cmd.sta_id = IWM_STATION_ID;
7229 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
7230 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7231 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
7232 		else
7233 			add_sta_cmd.station_type = IWM_STA_LINK;
7234 	}
7235 	add_sta_cmd.mac_id_n_color
7236 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
7237 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7238 		int qid;
7239 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
7240 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7241 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7242 		else
7243 			qid = IWM_AUX_QUEUE;
7244 		in->tfd_queue_msk |= (1 << qid);
7245 	} else {
7246 		int ac;
7247 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7248 			int qid = ac;
7249 			if (isset(sc->sc_enabled_capa,
7250 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7251 				qid += IWM_DQA_MIN_MGMT_QUEUE;
7252 			in->tfd_queue_msk |= (1 << qid);
7253 		}
7254 	}
7255 	if (!update) {
7256 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7257 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7258 			    etherbroadcastaddr);
7259 		else
7260 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7261 			    in->in_macaddr);
7262 	}
7263 	add_sta_cmd.add_modify = update ? 1 : 0;
7264 	add_sta_cmd.station_flags_msk
7265 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
7266 	if (update) {
7267 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
7268 		    IWM_STA_MODIFY_TID_DISABLE_TX);
7269 	}
7270 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
7271 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
7272 
7273 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7274 		add_sta_cmd.station_flags_msk
7275 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
7276 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
7277 
7278 		if (iwm_mimo_enabled(sc)) {
7279 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7280 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
7281 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
7282 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
7283 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
7284 					add_sta_cmd.station_flags |=
7285 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7286 				}
7287 			} else {
7288 				if (in->in_ni.ni_rxmcs[1] != 0) {
7289 					add_sta_cmd.station_flags |=
7290 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7291 				}
7292 				if (in->in_ni.ni_rxmcs[2] != 0) {
7293 					add_sta_cmd.station_flags |=
7294 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
7295 				}
7296 			}
7297 		}
7298 
7299 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7300 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7301 			add_sta_cmd.station_flags |= htole32(
7302 			    IWM_STA_FLG_FAT_EN_40MHZ);
7303 		}
7304 
7305 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7306 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7307 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
7308 				add_sta_cmd.station_flags |= htole32(
7309 				    IWM_STA_FLG_FAT_EN_80MHZ);
7310 			}
7311 			aggsize = (in->in_ni.ni_vhtcaps &
7312 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
7313 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
7314 		} else {
7315 			aggsize = (in->in_ni.ni_ampdu_param &
7316 			    IEEE80211_AMPDU_PARAM_LE);
7317 		}
7318 		if (aggsize > max_aggsize)
7319 			aggsize = max_aggsize;
7320 		add_sta_cmd.station_flags |= htole32((aggsize <<
7321 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT) &
7322 		    IWM_STA_FLG_MAX_AGG_SIZE_MSK);
7323 
7324 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
7325 		case IEEE80211_AMPDU_PARAM_SS_2:
7326 			add_sta_cmd.station_flags
7327 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
7328 			break;
7329 		case IEEE80211_AMPDU_PARAM_SS_4:
7330 			add_sta_cmd.station_flags
7331 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
7332 			break;
7333 		case IEEE80211_AMPDU_PARAM_SS_8:
7334 			add_sta_cmd.station_flags
7335 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
7336 			break;
7337 		case IEEE80211_AMPDU_PARAM_SS_16:
7338 			add_sta_cmd.station_flags
7339 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
7340 			break;
7341 		default:
7342 			break;
7343 		}
7344 	}
7345 
7346 	status = IWM_ADD_STA_SUCCESS;
7347 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7348 		cmdsize = sizeof(add_sta_cmd);
7349 	else
7350 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7351 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7352 	    &add_sta_cmd, &status);
7353 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7354 		err = EIO;
7355 
7356 	return err;
7357 }
7358 
7359 int
7360 iwm_add_aux_sta(struct iwm_softc *sc)
7361 {
7362 	struct iwm_add_sta_cmd cmd;
7363 	int err, qid;
7364 	uint32_t status;
7365 	size_t cmdsize;
7366 
7367 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7368 		qid = IWM_DQA_AUX_QUEUE;
7369 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7370 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
7371 	} else {
7372 		qid = IWM_AUX_QUEUE;
7373 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7374 	}
7375 	if (err)
7376 		return err;
7377 
7378 	memset(&cmd, 0, sizeof(cmd));
7379 	cmd.sta_id = IWM_AUX_STA_ID;
7380 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7381 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
7382 	cmd.mac_id_n_color =
7383 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
7384 	cmd.tfd_queue_msk = htole32(1 << qid);
7385 	cmd.tid_disable_tx = htole16(0xffff);
7386 
7387 	status = IWM_ADD_STA_SUCCESS;
7388 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7389 		cmdsize = sizeof(cmd);
7390 	else
7391 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7392 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7393 	    &status);
7394 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7395 		err = EIO;
7396 
7397 	return err;
7398 }
7399 
7400 int
7401 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7402 {
7403 	struct iwm_add_sta_cmd cmd;
7404 	int err;
7405 	uint32_t status;
7406 	size_t cmdsize;
7407 
7408 	memset(&cmd, 0, sizeof(cmd));
7409 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7410 	    in->in_color));
7411 	cmd.sta_id = IWM_STATION_ID;
7412 	cmd.add_modify = IWM_STA_MODE_MODIFY;
7413 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
7414 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
7415 
7416 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7417 		cmdsize = sizeof(cmd);
7418 	else
7419 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7420 
7421 	status = IWM_ADD_STA_SUCCESS;
7422 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7423 	    cmdsize, &cmd, &status);
7424 	if (err) {
7425 		printf("%s: could not update sta (error %d)\n",
7426 		    DEVNAME(sc), err);
7427 		return err;
7428 	}
7429 
7430 	switch (status & IWM_ADD_STA_STATUS_MASK) {
7431 	case IWM_ADD_STA_SUCCESS:
7432 		break;
7433 	default:
7434 		err = EIO;
7435 		printf("%s: Couldn't %s draining for station\n",
7436 		    DEVNAME(sc), drain ? "enable" : "disable");
7437 		break;
7438 	}
7439 
7440 	return err;
7441 }
7442 
7443 int
7444 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7445 {
7446 	int err;
7447 
7448 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
7449 
7450 	err = iwm_drain_sta(sc, in, 1);
7451 	if (err)
7452 		goto done;
7453 
7454 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7455 	if (err) {
7456 		printf("%s: could not flush Tx path (error %d)\n",
7457 		    DEVNAME(sc), err);
7458 		goto done;
7459 	}
7460 
7461 	/*
7462 	 * Flushing Tx rings may fail if the AP has disappeared.
7463 	 * We can rely on iwm_newstate_task() to reset everything and begin
7464 	 * scanning again if we are left with outstanding frames on queues.
7465 	 */
7466 	err = iwm_wait_tx_queues_empty(sc);
7467 	if (err)
7468 		goto done;
7469 
7470 	err = iwm_drain_sta(sc, in, 0);
7471 done:
7472 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7473 	return err;
7474 }
7475 
7476 int
7477 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7478 {
7479 	struct ieee80211com *ic = &sc->sc_ic;
7480 	struct iwm_rm_sta_cmd rm_sta_cmd;
7481 	int err;
7482 
7483 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7484 		panic("sta already removed");
7485 
7486 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7487 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7488 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7489 	else
7490 		rm_sta_cmd.sta_id = IWM_STATION_ID;
7491 
7492 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7493 	    &rm_sta_cmd);
7494 
7495 	return err;
7496 }
7497 
7498 uint16_t
7499 iwm_scan_rx_chain(struct iwm_softc *sc)
7500 {
7501 	uint16_t rx_chain;
7502 	uint8_t rx_ant;
7503 
7504 	rx_ant = iwm_fw_valid_rx_ant(sc);
7505 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
7506 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
7507 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
7508 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
7509 	return htole16(rx_chain);
7510 }
7511 
7512 uint32_t
7513 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7514 {
7515 	uint32_t tx_ant;
7516 	int i, ind;
7517 
7518 	for (i = 0, ind = sc->sc_scan_last_antenna;
7519 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
7520 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
7521 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7522 			sc->sc_scan_last_antenna = ind;
7523 			break;
7524 		}
7525 	}
7526 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7527 
7528 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
7529 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
7530 				   tx_ant);
7531 	else
7532 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
7533 }
7534 
7535 uint8_t
7536 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7537     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7538 {
7539 	struct ieee80211com *ic = &sc->sc_ic;
7540 	struct ieee80211_channel *c;
7541 	uint8_t nchan;
7542 
7543 	for (nchan = 0, c = &ic->ic_channels[1];
7544 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7545 	    nchan < sc->sc_capa_n_scan_channels;
7546 	    c++) {
7547 		if (c->ic_flags == 0)
7548 			continue;
7549 
7550 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
7551 		chan->iter_count = htole16(1);
7552 		chan->iter_interval = 0;
7553 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
7554 		if (n_ssids != 0 && !bgscan)
7555 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
7556 		chan++;
7557 		nchan++;
7558 	}
7559 
7560 	return nchan;
7561 }
7562 
7563 uint8_t
7564 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7565     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7566 {
7567 	struct ieee80211com *ic = &sc->sc_ic;
7568 	struct ieee80211_channel *c;
7569 	uint8_t nchan;
7570 
7571 	for (nchan = 0, c = &ic->ic_channels[1];
7572 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7573 	    nchan < sc->sc_capa_n_scan_channels;
7574 	    c++) {
7575 		if (c->ic_flags == 0)
7576 			continue;
7577 
7578 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7579 		chan->iter_count = 1;
7580 		chan->iter_interval = htole16(0);
7581 		if (n_ssids != 0 && !bgscan)
7582 			chan->flags = htole32(1 << 0); /* select SSID 0 */
7583 		chan++;
7584 		nchan++;
7585 	}
7586 
7587 	return nchan;
7588 }
7589 
7590 int
7591 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7592 {
7593 	struct iwm_scan_probe_req preq2;
7594 	int err, i;
7595 
7596 	err = iwm_fill_probe_req(sc, &preq2);
7597 	if (err)
7598 		return err;
7599 
7600 	preq1->mac_header = preq2.mac_header;
7601 	for (i = 0; i < nitems(preq1->band_data); i++)
7602 		preq1->band_data[i] = preq2.band_data[i];
7603 	preq1->common_data = preq2.common_data;
7604 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7605 	return 0;
7606 }
7607 
7608 int
7609 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7610 {
7611 	struct ieee80211com *ic = &sc->sc_ic;
7612 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7613 	struct ieee80211_rateset *rs;
7614 	size_t remain = sizeof(preq->buf);
7615 	uint8_t *frm, *pos;
7616 
7617 	memset(preq, 0, sizeof(*preq));
7618 
7619 	if (remain < sizeof(*wh) + 2)
7620 		return ENOBUFS;
7621 
7622 	/*
7623 	 * Build a probe request frame.  Most of the following code is a
7624 	 * copy & paste of what is done in net80211.
7625 	 */
7626 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7627 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7628 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7629 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7630 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7631 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7632 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7633 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7634 
7635 	frm = (uint8_t *)(wh + 1);
7636 
7637 	*frm++ = IEEE80211_ELEMID_SSID;
7638 	*frm++ = 0;
7639 	/* hardware inserts SSID */
7640 
7641 	/* Tell firmware where the MAC header and SSID IE are. */
7642 	preq->mac_header.offset = 0;
7643 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7644 	remain -= frm - (uint8_t *)wh;
7645 
7646 	/* Fill in 2GHz IEs and tell firmware where they are. */
7647 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7648 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7649 		if (remain < 4 + rs->rs_nrates)
7650 			return ENOBUFS;
7651 	} else if (remain < 2 + rs->rs_nrates)
7652 		return ENOBUFS;
7653 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7654 	pos = frm;
7655 	frm = ieee80211_add_rates(frm, rs);
7656 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7657 		frm = ieee80211_add_xrates(frm, rs);
7658 	remain -= frm - pos;
7659 
7660 	if (isset(sc->sc_enabled_capa,
7661 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7662 		if (remain < 3)
7663 			return ENOBUFS;
7664 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7665 		*frm++ = 1;
7666 		*frm++ = 0;
7667 		remain -= 3;
7668 	}
7669 	preq->band_data[0].len = htole16(frm - pos);
7670 
7671 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7672 		/* Fill in 5GHz IEs. */
7673 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7674 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7675 			if (remain < 4 + rs->rs_nrates)
7676 				return ENOBUFS;
7677 		} else if (remain < 2 + rs->rs_nrates)
7678 			return ENOBUFS;
7679 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7680 		pos = frm;
7681 		frm = ieee80211_add_rates(frm, rs);
7682 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7683 			frm = ieee80211_add_xrates(frm, rs);
7684 		preq->band_data[1].len = htole16(frm - pos);
7685 		remain -= frm - pos;
7686 		if (ic->ic_flags & IEEE80211_F_VHTON) {
7687 			if (remain < 14)
7688 				return ENOBUFS;
7689 			frm = ieee80211_add_vhtcaps(frm, ic);
7690 			remain -= frm - pos;
7691 		}
7692 	}
7693 
7694 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7695 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7696 	pos = frm;
7697 	if (ic->ic_flags & IEEE80211_F_HTON) {
7698 		if (remain < 28)
7699 			return ENOBUFS;
7700 		frm = ieee80211_add_htcaps(frm, ic);
7701 		/* XXX add WME info? */
7702 		remain -= frm - pos;
7703 	}
7704 
7705 	preq->common_data.len = htole16(frm - pos);
7706 
7707 	return 0;
7708 }
7709 
7710 int
7711 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7712 {
7713 	struct ieee80211com *ic = &sc->sc_ic;
7714 	struct iwm_host_cmd hcmd = {
7715 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7716 		.len = { 0, },
7717 		.data = { NULL, },
7718 		.flags = 0,
7719 	};
7720 	struct iwm_scan_req_lmac *req;
7721 	struct iwm_scan_probe_req_v1 *preq;
7722 	size_t req_len;
7723 	int err, async = bgscan;
7724 
7725 	req_len = sizeof(struct iwm_scan_req_lmac) +
7726 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7727 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7728 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7729 		return ENOMEM;
7730 	req = malloc(req_len, M_DEVBUF,
7731 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7732 	if (req == NULL)
7733 		return ENOMEM;
7734 
7735 	hcmd.len[0] = (uint16_t)req_len;
7736 	hcmd.data[0] = (void *)req;
7737 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7738 
7739 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7740 	req->active_dwell = 10;
7741 	req->passive_dwell = 110;
7742 	req->fragmented_dwell = 44;
7743 	req->extended_dwell = 90;
7744 	if (bgscan) {
7745 		req->max_out_time = htole32(120);
7746 		req->suspend_time = htole32(120);
7747 	} else {
7748 		req->max_out_time = htole32(0);
7749 		req->suspend_time = htole32(0);
7750 	}
7751 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7752 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7753 	req->iter_num = htole32(1);
7754 	req->delay = 0;
7755 
7756 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7757 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7758 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7759 	if (ic->ic_des_esslen == 0)
7760 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7761 	else
7762 		req->scan_flags |=
7763 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7764 	if (isset(sc->sc_enabled_capa,
7765 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7766 	    isset(sc->sc_enabled_capa,
7767 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7768 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7769 
7770 	req->flags = htole32(IWM_PHY_BAND_24);
7771 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7772 		req->flags |= htole32(IWM_PHY_BAND_5);
7773 	req->filter_flags =
7774 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7775 
7776 	/* Tx flags 2 GHz. */
7777 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7778 	    IWM_TX_CMD_FLG_BT_DIS);
7779 	req->tx_cmd[0].rate_n_flags =
7780 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7781 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7782 
7783 	/* Tx flags 5 GHz. */
7784 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7785 	    IWM_TX_CMD_FLG_BT_DIS);
7786 	req->tx_cmd[1].rate_n_flags =
7787 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7788 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7789 
7790 	/* Check if we're doing an active directed scan. */
7791 	if (ic->ic_des_esslen != 0) {
7792 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7793 		req->direct_scan[0].len = ic->ic_des_esslen;
7794 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7795 		    ic->ic_des_esslen);
7796 	}
7797 
7798 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7799 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7800 	    ic->ic_des_esslen != 0, bgscan);
7801 
7802 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7803 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7804 	    sc->sc_capa_n_scan_channels));
7805 	err = iwm_fill_probe_req_v1(sc, preq);
7806 	if (err) {
7807 		free(req, M_DEVBUF, req_len);
7808 		return err;
7809 	}
7810 
7811 	/* Specify the scan plan: We'll do one iteration. */
7812 	req->schedule[0].iterations = 1;
7813 	req->schedule[0].full_scan_mul = 1;
7814 
7815 	/* Disable EBS. */
7816 	req->channel_opt[0].non_ebs_ratio = 1;
7817 	req->channel_opt[1].non_ebs_ratio = 1;
7818 
7819 	err = iwm_send_cmd(sc, &hcmd);
7820 	free(req, M_DEVBUF, req_len);
7821 	return err;
7822 }
7823 
7824 int
7825 iwm_config_umac_scan(struct iwm_softc *sc)
7826 {
7827 	struct ieee80211com *ic = &sc->sc_ic;
7828 	struct iwm_scan_config *scan_config;
7829 	int err, nchan;
7830 	size_t cmd_size;
7831 	struct ieee80211_channel *c;
7832 	struct iwm_host_cmd hcmd = {
7833 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7834 		.flags = 0,
7835 	};
7836 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7837 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7838 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7839 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7840 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7841 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7842 	    IWM_SCAN_CONFIG_RATE_54M);
7843 
7844 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7845 
7846 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7847 	if (scan_config == NULL)
7848 		return ENOMEM;
7849 
7850 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7851 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7852 	scan_config->legacy_rates = htole32(rates |
7853 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7854 
7855 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7856 	scan_config->dwell_active = 10;
7857 	scan_config->dwell_passive = 110;
7858 	scan_config->dwell_fragmented = 44;
7859 	scan_config->dwell_extended = 90;
7860 	scan_config->out_of_channel_time = htole32(0);
7861 	scan_config->suspend_time = htole32(0);
7862 
7863 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7864 
7865 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7866 	scan_config->channel_flags = 0;
7867 
7868 	for (c = &ic->ic_channels[1], nchan = 0;
7869 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7870 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7871 		if (c->ic_flags == 0)
7872 			continue;
7873 		scan_config->channel_array[nchan++] =
7874 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7875 	}
7876 
7877 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7878 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7879 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7880 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7881 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7882 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7883 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7884 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7885 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7886 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7887 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7888 
7889 	hcmd.data[0] = scan_config;
7890 	hcmd.len[0] = cmd_size;
7891 
7892 	err = iwm_send_cmd(sc, &hcmd);
7893 	free(scan_config, M_DEVBUF, cmd_size);
7894 	return err;
7895 }
7896 
7897 int
7898 iwm_umac_scan_size(struct iwm_softc *sc)
7899 {
7900 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7901 	int tail_size;
7902 
7903 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7904 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7905 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7906 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7907 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7908 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7909 	else
7910 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7911 
7912 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7913 	    sc->sc_capa_n_scan_channels + tail_size;
7914 }
7915 
7916 struct iwm_scan_umac_chan_param *
7917 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7918     struct iwm_scan_req_umac *req)
7919 {
7920 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7921 		return &req->v8.channel;
7922 
7923 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7924 		return &req->v7.channel;
7925 
7926 	return &req->v1.channel;
7927 }
7928 
7929 void *
7930 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7931 {
7932 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7933 		return (void *)&req->v8.data;
7934 
7935 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7936 		return (void *)&req->v7.data;
7937 
7938 	return (void *)&req->v1.data;
7939 
7940 }
7941 
7942 /* adaptive dwell max budget time [TU] for full scan */
7943 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7944 /* adaptive dwell max budget time [TU] for directed scan */
7945 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7946 /* adaptive dwell default high band APs number */
7947 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7948 /* adaptive dwell default low band APs number */
7949 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7950 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7951 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7952 
7953 int
7954 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7955 {
7956 	struct ieee80211com *ic = &sc->sc_ic;
7957 	struct iwm_host_cmd hcmd = {
7958 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7959 		.len = { 0, },
7960 		.data = { NULL, },
7961 		.flags = 0,
7962 	};
7963 	struct iwm_scan_req_umac *req;
7964 	void *cmd_data, *tail_data;
7965 	struct iwm_scan_req_umac_tail_v2 *tail;
7966 	struct iwm_scan_req_umac_tail_v1 *tailv1;
7967 	struct iwm_scan_umac_chan_param *chanparam;
7968 	size_t req_len;
7969 	int err, async = bgscan;
7970 
7971 	req_len = iwm_umac_scan_size(sc);
7972 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
7973 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7974 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7975 		return ERANGE;
7976 	req = malloc(req_len, M_DEVBUF,
7977 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7978 	if (req == NULL)
7979 		return ENOMEM;
7980 
7981 	hcmd.len[0] = (uint16_t)req_len;
7982 	hcmd.data[0] = (void *)req;
7983 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7984 
7985 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7986 		req->v7.adwell_default_n_aps_social =
7987 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7988 		req->v7.adwell_default_n_aps =
7989 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
7990 
7991 		if (ic->ic_des_esslen != 0)
7992 			req->v7.adwell_max_budget =
7993 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7994 		else
7995 			req->v7.adwell_max_budget =
7996 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7997 
7998 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
7999 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8000 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8001 
8002 		if (isset(sc->sc_ucode_api,
8003 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8004 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
8005 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
8006 		} else {
8007 			req->v7.active_dwell = 10;
8008 			req->v7.passive_dwell = 110;
8009 			req->v7.fragmented_dwell = 44;
8010 		}
8011 	} else {
8012 		/* These timings correspond to iwlwifi's UNASSOC scan. */
8013 		req->v1.active_dwell = 10;
8014 		req->v1.passive_dwell = 110;
8015 		req->v1.fragmented_dwell = 44;
8016 		req->v1.extended_dwell = 90;
8017 
8018 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8019 	}
8020 
8021 	if (bgscan) {
8022 		const uint32_t timeout = htole32(120);
8023 		if (isset(sc->sc_ucode_api,
8024 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8025 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8026 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8027 		} else if (isset(sc->sc_ucode_api,
8028 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8029 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8030 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8031 		} else {
8032 			req->v1.max_out_time = timeout;
8033 			req->v1.suspend_time = timeout;
8034 		}
8035 	}
8036 
8037 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8038 
8039 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
8040 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8041 	chanparam->count = iwm_umac_scan_fill_channels(sc,
8042 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
8043 	    ic->ic_des_esslen != 0, bgscan);
8044 	chanparam->flags = 0;
8045 
8046 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
8047 	    sc->sc_capa_n_scan_channels;
8048 	tail = tail_data;
8049 	/* tail v1 layout differs in preq and direct_scan member fields. */
8050 	tailv1 = tail_data;
8051 
8052 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
8053 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
8054 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8055 		req->v8.general_flags2 =
8056 			IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
8057 	}
8058 
8059 	if (ic->ic_des_esslen != 0) {
8060 		if (isset(sc->sc_ucode_api,
8061 		    IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
8062 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8063 			tail->direct_scan[0].len = ic->ic_des_esslen;
8064 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
8065 			    ic->ic_des_esslen);
8066 		} else {
8067 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8068 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
8069 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
8070 			    ic->ic_des_esslen);
8071 		}
8072 		req->general_flags |=
8073 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
8074 	} else
8075 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
8076 
8077 	if (isset(sc->sc_enabled_capa,
8078 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
8079 	    isset(sc->sc_enabled_capa,
8080 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
8081 		req->general_flags |=
8082 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
8083 
8084 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8085 		req->general_flags |=
8086 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
8087 	} else {
8088 		req->general_flags |=
8089 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
8090 	}
8091 
8092 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
8093 		err = iwm_fill_probe_req(sc, &tail->preq);
8094 	else
8095 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8096 	if (err) {
8097 		free(req, M_DEVBUF, req_len);
8098 		return err;
8099 	}
8100 
8101 	/* Specify the scan plan: We'll do one iteration. */
8102 	tail->schedule[0].interval = 0;
8103 	tail->schedule[0].iter_count = 1;
8104 
8105 	err = iwm_send_cmd(sc, &hcmd);
8106 	free(req, M_DEVBUF, req_len);
8107 	return err;
8108 }
8109 
8110 void
8111 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8112 {
8113 	struct ieee80211com *ic = &sc->sc_ic;
8114 	struct ifnet *ifp = IC2IFP(ic);
8115 	char alpha2[3];
8116 
8117 	snprintf(alpha2, sizeof(alpha2), "%c%c",
8118 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
8119 
8120 	if (ifp->if_flags & IFF_DEBUG) {
8121 		printf("%s: firmware has detected regulatory domain '%s' "
8122 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
8123 	}
8124 
8125 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
8126 }
8127 
8128 uint8_t
8129 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
8130 {
8131 	int i;
8132 	uint8_t rval;
8133 
8134 	for (i = 0; i < rs->rs_nrates; i++) {
8135 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
8136 		if (rval == iwm_rates[ridx].rate)
8137 			return rs->rs_rates[i];
8138 	}
8139 
8140 	return 0;
8141 }
8142 
8143 int
8144 iwm_rval2ridx(int rval)
8145 {
8146 	int ridx;
8147 
8148 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
8149 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
8150 			continue;
8151 		if (rval == iwm_rates[ridx].rate)
8152 			break;
8153 	}
8154 
8155        return ridx;
8156 }
8157 
8158 void
8159 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8160     int *ofdm_rates)
8161 {
8162 	struct ieee80211_node *ni = &in->in_ni;
8163 	struct ieee80211_rateset *rs = &ni->ni_rates;
8164 	int lowest_present_ofdm = -1;
8165 	int lowest_present_cck = -1;
8166 	uint8_t cck = 0;
8167 	uint8_t ofdm = 0;
8168 	int i;
8169 
8170 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
8171 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
8172 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
8173 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8174 				continue;
8175 			cck |= (1 << i);
8176 			if (lowest_present_cck == -1 || lowest_present_cck > i)
8177 				lowest_present_cck = i;
8178 		}
8179 	}
8180 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
8181 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8182 			continue;
8183 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
8184 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
8185 			lowest_present_ofdm = i;
8186 	}
8187 
8188 	/*
8189 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
8190 	 * variables. This isn't sufficient though, as there might not
8191 	 * be all the right rates in the bitmap. E.g. if the only basic
8192 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
8193 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
8194 	 *
8195 	 *    [...] a STA responding to a received frame shall transmit
8196 	 *    its Control Response frame [...] at the highest rate in the
8197 	 *    BSSBasicRateSet parameter that is less than or equal to the
8198 	 *    rate of the immediately previous frame in the frame exchange
8199 	 *    sequence ([...]) and that is of the same modulation class
8200 	 *    ([...]) as the received frame. If no rate contained in the
8201 	 *    BSSBasicRateSet parameter meets these conditions, then the
8202 	 *    control frame sent in response to a received frame shall be
8203 	 *    transmitted at the highest mandatory rate of the PHY that is
8204 	 *    less than or equal to the rate of the received frame, and
8205 	 *    that is of the same modulation class as the received frame.
8206 	 *
8207 	 * As a consequence, we need to add all mandatory rates that are
8208 	 * lower than all of the basic rates to these bitmaps.
8209 	 */
8210 
8211 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
8212 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
8213 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
8214 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
8215 	/* 6M already there or needed so always add */
8216 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
8217 
8218 	/*
8219 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
8220 	 * Note, however:
8221 	 *  - if no CCK rates are basic, it must be ERP since there must
8222 	 *    be some basic rates at all, so they're OFDM => ERP PHY
8223 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
8224 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
8225 	 *  - if 5.5M is basic, 1M and 2M are mandatory
8226 	 *  - if 2M is basic, 1M is mandatory
8227 	 *  - if 1M is basic, that's the only valid ACK rate.
8228 	 * As a consequence, it's not as complicated as it sounds, just add
8229 	 * any lower rates to the ACK rate bitmap.
8230 	 */
8231 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
8232 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
8233 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
8234 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
8235 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
8236 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
8237 	/* 1M already there or needed so always add */
8238 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
8239 
8240 	*cck_rates = cck;
8241 	*ofdm_rates = ofdm;
8242 }
8243 
8244 void
8245 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8246     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
8247 {
8248 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
8249 	struct ieee80211com *ic = &sc->sc_ic;
8250 	struct ieee80211_node *ni = ic->ic_bss;
8251 	int cck_ack_rates, ofdm_ack_rates;
8252 	int i;
8253 
8254 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
8255 	    in->in_color));
8256 	cmd->action = htole32(action);
8257 
8258 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8259 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
8260 	else if (ic->ic_opmode == IEEE80211_M_STA)
8261 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
8262 	else
8263 		panic("unsupported operating mode %d", ic->ic_opmode);
8264 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
8265 
8266 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
8267 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8268 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
8269 		return;
8270 	}
8271 
8272 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
8273 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8274 	cmd->cck_rates = htole32(cck_ack_rates);
8275 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
8276 
8277 	cmd->cck_short_preamble
8278 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
8279 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
8280 	cmd->short_slot
8281 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
8282 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
8283 
8284 	for (i = 0; i < EDCA_NUM_AC; i++) {
8285 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8286 		int txf = iwm_ac_to_tx_fifo[i];
8287 
8288 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
8289 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
8290 		cmd->ac[txf].aifsn = ac->ac_aifsn;
8291 		cmd->ac[txf].fifos_mask = (1 << txf);
8292 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
8293 	}
8294 	if (ni->ni_flags & IEEE80211_NODE_QOS)
8295 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
8296 
8297 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8298 		enum ieee80211_htprot htprot =
8299 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
8300 		switch (htprot) {
8301 		case IEEE80211_HTPROT_NONE:
8302 			break;
8303 		case IEEE80211_HTPROT_NONMEMBER:
8304 		case IEEE80211_HTPROT_NONHT_MIXED:
8305 			cmd->protection_flags |=
8306 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8307 			    IWM_MAC_PROT_FLG_FAT_PROT);
8308 			break;
8309 		case IEEE80211_HTPROT_20MHZ:
8310 			if (in->in_phyctxt &&
8311 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8312 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
8313 				cmd->protection_flags |=
8314 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8315 				    IWM_MAC_PROT_FLG_FAT_PROT);
8316 			}
8317 			break;
8318 		default:
8319 			break;
8320 		}
8321 
8322 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
8323 	}
8324 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8325 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
8326 
8327 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
8328 #undef IWM_EXP2
8329 }
8330 
8331 void
8332 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8333     struct iwm_mac_data_sta *sta, int assoc)
8334 {
8335 	struct ieee80211_node *ni = &in->in_ni;
8336 	uint32_t dtim_off;
8337 	uint64_t tsf;
8338 
8339 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
8340 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
8341 	tsf = letoh64(tsf);
8342 
8343 	sta->is_assoc = htole32(assoc);
8344 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
8345 	sta->dtim_tsf = htole64(tsf + dtim_off);
8346 	sta->bi = htole32(ni->ni_intval);
8347 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
8348 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
8349 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
8350 	sta->listen_interval = htole32(10);
8351 	sta->assoc_id = htole32(ni->ni_associd);
8352 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
8353 }
8354 
8355 int
8356 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8357     int assoc)
8358 {
8359 	struct ieee80211com *ic = &sc->sc_ic;
8360 	struct ieee80211_node *ni = &in->in_ni;
8361 	struct iwm_mac_ctx_cmd cmd;
8362 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8363 
8364 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
8365 		panic("MAC already added");
8366 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
8367 		panic("MAC already removed");
8368 
8369 	memset(&cmd, 0, sizeof(cmd));
8370 
8371 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8372 
8373 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8374 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
8375 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
8376 		    IWM_MAC_FILTER_ACCEPT_GRP |
8377 		    IWM_MAC_FILTER_IN_BEACON |
8378 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
8379 		    IWM_MAC_FILTER_IN_CRC32);
8380 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8381 		/*
8382 		 * Allow beacons to pass through as long as we are not
8383 		 * associated or we do not have dtim period information.
8384 		 */
8385 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
8386 	else
8387 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8388 
8389 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8390 }
8391 
8392 int
8393 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8394 {
8395 	struct iwm_time_quota_cmd_v1 cmd;
8396 	int i, idx, num_active_macs, quota, quota_rem;
8397 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
8398 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
8399 	uint16_t id;
8400 
8401 	memset(&cmd, 0, sizeof(cmd));
8402 
8403 	/* currently, PHY ID == binding ID */
8404 	if (in && in->in_phyctxt) {
8405 		id = in->in_phyctxt->id;
8406 		KASSERT(id < IWM_MAX_BINDINGS);
8407 		colors[id] = in->in_phyctxt->color;
8408 		if (running)
8409 			n_ifs[id] = 1;
8410 	}
8411 
8412 	/*
8413 	 * The FW's scheduling session consists of
8414 	 * IWM_MAX_QUOTA fragments. Divide these fragments
8415 	 * equally between all the bindings that require quota
8416 	 */
8417 	num_active_macs = 0;
8418 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8419 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
8420 		num_active_macs += n_ifs[i];
8421 	}
8422 
8423 	quota = 0;
8424 	quota_rem = 0;
8425 	if (num_active_macs) {
8426 		quota = IWM_MAX_QUOTA / num_active_macs;
8427 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
8428 	}
8429 
8430 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8431 		if (colors[i] < 0)
8432 			continue;
8433 
8434 		cmd.quotas[idx].id_and_color =
8435 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
8436 
8437 		if (n_ifs[i] <= 0) {
8438 			cmd.quotas[idx].quota = htole32(0);
8439 			cmd.quotas[idx].max_duration = htole32(0);
8440 		} else {
8441 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8442 			cmd.quotas[idx].max_duration = htole32(0);
8443 		}
8444 		idx++;
8445 	}
8446 
8447 	/* Give the remainder of the session to the first binding */
8448 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
8449 
8450 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8451 		struct iwm_time_quota_cmd cmd_v2;
8452 
8453 		memset(&cmd_v2, 0, sizeof(cmd_v2));
8454 		for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8455 			cmd_v2.quotas[i].id_and_color =
8456 			    cmd.quotas[i].id_and_color;
8457 			cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8458 			cmd_v2.quotas[i].max_duration =
8459 			    cmd.quotas[i].max_duration;
8460 		}
8461 		return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8462 		    sizeof(cmd_v2), &cmd_v2);
8463 	}
8464 
8465 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8466 }
8467 
8468 void
8469 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8470 {
8471 	int s = splnet();
8472 
8473 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8474 		splx(s);
8475 		return;
8476 	}
8477 
8478 	refcnt_take(&sc->task_refs);
8479 	if (!task_add(taskq, task))
8480 		refcnt_rele_wake(&sc->task_refs);
8481 	splx(s);
8482 }
8483 
8484 void
8485 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8486 {
8487 	if (task_del(taskq, task))
8488 		refcnt_rele(&sc->task_refs);
8489 }
8490 
8491 int
8492 iwm_scan(struct iwm_softc *sc)
8493 {
8494 	struct ieee80211com *ic = &sc->sc_ic;
8495 	struct ifnet *ifp = IC2IFP(ic);
8496 	int err;
8497 
8498 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8499 		err = iwm_scan_abort(sc);
8500 		if (err) {
8501 			printf("%s: could not abort background scan\n",
8502 			    DEVNAME(sc));
8503 			return err;
8504 		}
8505 	}
8506 
8507 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8508 		err = iwm_umac_scan(sc, 0);
8509 	else
8510 		err = iwm_lmac_scan(sc, 0);
8511 	if (err) {
8512 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8513 		return err;
8514 	}
8515 
8516 	/*
8517 	 * The current mode might have been fixed during association.
8518 	 * Ensure all channels get scanned.
8519 	 */
8520 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
8521 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8522 
8523 	sc->sc_flags |= IWM_FLAG_SCANNING;
8524 	if (ifp->if_flags & IFF_DEBUG)
8525 		printf("%s: %s -> %s\n", ifp->if_xname,
8526 		    ieee80211_state_name[ic->ic_state],
8527 		    ieee80211_state_name[IEEE80211_S_SCAN]);
8528 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8529 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
8530 		ieee80211_node_cleanup(ic, ic->ic_bss);
8531 	}
8532 	ic->ic_state = IEEE80211_S_SCAN;
8533 	iwm_led_blink_start(sc);
8534 	wakeup(&ic->ic_state); /* wake iwm_init() */
8535 
8536 	return 0;
8537 }
8538 
8539 int
8540 iwm_bgscan(struct ieee80211com *ic)
8541 {
8542 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8543 	int err;
8544 
8545 	if (sc->sc_flags & IWM_FLAG_SCANNING)
8546 		return 0;
8547 
8548 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8549 		err = iwm_umac_scan(sc, 1);
8550 	else
8551 		err = iwm_lmac_scan(sc, 1);
8552 	if (err) {
8553 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8554 		return err;
8555 	}
8556 
8557 	sc->sc_flags |= IWM_FLAG_BGSCAN;
8558 	return 0;
8559 }
8560 
8561 void
8562 iwm_bgscan_done(struct ieee80211com *ic,
8563     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8564 {
8565 	struct iwm_softc *sc = ic->ic_softc;
8566 
8567 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8568 	sc->bgscan_unref_arg = arg;
8569 	sc->bgscan_unref_arg_size = arg_size;
8570 	iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
8571 }
8572 
8573 void
8574 iwm_bgscan_done_task(void *arg)
8575 {
8576 	struct iwm_softc *sc = arg;
8577 	struct ieee80211com *ic = &sc->sc_ic;
8578 	struct iwm_node *in = (void *)ic->ic_bss;
8579 	struct ieee80211_node *ni = &in->in_ni;
8580 	int tid, err = 0, s = splnet();
8581 
8582 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
8583 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
8584 	    ic->ic_state != IEEE80211_S_RUN) {
8585 		err = ENXIO;
8586 		goto done;
8587 	}
8588 
8589 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
8590 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
8591 
8592 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8593 			continue;
8594 
8595 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8596 		if (err)
8597 			goto done;
8598 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8599 		if (err)
8600 			goto done;
8601 		in->tfd_queue_msk &= ~(1 << qid);
8602 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8603 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
8604 		    IEEE80211_ACTION_DELBA,
8605 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
8606 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
8607 #endif
8608 		ieee80211_node_tx_ba_clear(ni, tid);
8609 	}
8610 
8611 	err = iwm_flush_sta(sc, in);
8612 	if (err)
8613 		goto done;
8614 
8615 	/*
8616 	 * Tx queues have been flushed and Tx agg has been stopped.
8617 	 * Allow roaming to proceed.
8618 	 */
8619 	ni->ni_unref_arg = sc->bgscan_unref_arg;
8620 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8621 	sc->bgscan_unref_arg = NULL;
8622 	sc->bgscan_unref_arg_size = 0;
8623 	ieee80211_node_tx_stopped(ic, &in->in_ni);
8624 done:
8625 	if (err) {
8626 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8627 		sc->bgscan_unref_arg = NULL;
8628 		sc->bgscan_unref_arg_size = 0;
8629 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8630 			task_add(systq, &sc->init_task);
8631 	}
8632 	refcnt_rele_wake(&sc->task_refs);
8633 	splx(s);
8634 }
8635 
8636 int
8637 iwm_umac_scan_abort(struct iwm_softc *sc)
8638 {
8639 	struct iwm_umac_scan_abort cmd = { 0 };
8640 
8641 	return iwm_send_cmd_pdu(sc,
8642 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
8643 	    0, sizeof(cmd), &cmd);
8644 }
8645 
8646 int
8647 iwm_lmac_scan_abort(struct iwm_softc *sc)
8648 {
8649 	struct iwm_host_cmd cmd = {
8650 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
8651 	};
8652 	int err, status;
8653 
8654 	err = iwm_send_cmd_status(sc, &cmd, &status);
8655 	if (err)
8656 		return err;
8657 
8658 	if (status != IWM_CAN_ABORT_STATUS) {
8659 		/*
8660 		 * The scan abort will return 1 for success or
8661 		 * 2 for "failure".  A failure condition can be
8662 		 * due to simply not being in an active scan which
8663 		 * can occur if we send the scan abort before the
8664 		 * microcode has notified us that a scan is completed.
8665 		 */
8666 		return EBUSY;
8667 	}
8668 
8669 	return 0;
8670 }
8671 
8672 int
8673 iwm_scan_abort(struct iwm_softc *sc)
8674 {
8675 	int err;
8676 
8677 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8678 		err = iwm_umac_scan_abort(sc);
8679 	else
8680 		err = iwm_lmac_scan_abort(sc);
8681 
8682 	if (err == 0)
8683 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8684 	return err;
8685 }
8686 
8687 int
8688 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8689     struct ieee80211_channel *chan, uint8_t chains_static,
8690     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8691     uint8_t vht_chan_width)
8692 {
8693 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8694 	int err;
8695 
8696 	if (isset(sc->sc_enabled_capa,
8697 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8698 	    (phyctxt->channel->ic_flags & band_flags) !=
8699 	    (chan->ic_flags & band_flags)) {
8700 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8701 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8702 		    vht_chan_width);
8703 		if (err) {
8704 			printf("%s: could not remove PHY context "
8705 			    "(error %d)\n", DEVNAME(sc), err);
8706 			return err;
8707 		}
8708 		phyctxt->channel = chan;
8709 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8710 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time, sco,
8711 		    vht_chan_width);
8712 		if (err) {
8713 			printf("%s: could not add PHY context "
8714 			    "(error %d)\n", DEVNAME(sc), err);
8715 			return err;
8716 		}
8717 	} else {
8718 		phyctxt->channel = chan;
8719 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8720 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8721 		    vht_chan_width);
8722 		if (err) {
8723 			printf("%s: could not update PHY context (error %d)\n",
8724 			    DEVNAME(sc), err);
8725 			return err;
8726 		}
8727 	}
8728 
8729 	phyctxt->sco = sco;
8730 	phyctxt->vht_chan_width = vht_chan_width;
8731 	return 0;
8732 }
8733 
8734 int
8735 iwm_auth(struct iwm_softc *sc)
8736 {
8737 	struct ieee80211com *ic = &sc->sc_ic;
8738 	struct iwm_node *in = (void *)ic->ic_bss;
8739 	uint32_t duration;
8740 	int generation = sc->sc_generation, err;
8741 
8742 	splassert(IPL_NET);
8743 
8744 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8745 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8746 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8747 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8748 		if (err)
8749 			return err;
8750 	} else {
8751 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8752 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8753 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8754 		if (err)
8755 			return err;
8756 	}
8757 	in->in_phyctxt = &sc->sc_phyctxt[0];
8758 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8759 	iwm_setrates(in, 0);
8760 
8761 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8762 	if (err) {
8763 		printf("%s: could not add MAC context (error %d)\n",
8764 		    DEVNAME(sc), err);
8765 		return err;
8766  	}
8767 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8768 
8769 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8770 	if (err) {
8771 		printf("%s: could not add binding (error %d)\n",
8772 		    DEVNAME(sc), err);
8773 		goto rm_mac_ctxt;
8774 	}
8775 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8776 
8777 	in->tid_disable_ampdu = 0xffff;
8778 	err = iwm_add_sta_cmd(sc, in, 0);
8779 	if (err) {
8780 		printf("%s: could not add sta (error %d)\n",
8781 		    DEVNAME(sc), err);
8782 		goto rm_binding;
8783 	}
8784 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8785 
8786 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8787 		return 0;
8788 
8789 	/*
8790 	 * Prevent the FW from wandering off channel during association
8791 	 * by "protecting" the session with a time event.
8792 	 */
8793 	if (in->in_ni.ni_intval)
8794 		duration = in->in_ni.ni_intval * 2;
8795 	else
8796 		duration = IEEE80211_DUR_TU;
8797 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8798 
8799 	return 0;
8800 
8801 rm_binding:
8802 	if (generation == sc->sc_generation) {
8803 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8804 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8805 	}
8806 rm_mac_ctxt:
8807 	if (generation == sc->sc_generation) {
8808 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8809 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8810 	}
8811 	return err;
8812 }
8813 
8814 int
8815 iwm_deauth(struct iwm_softc *sc)
8816 {
8817 	struct ieee80211com *ic = &sc->sc_ic;
8818 	struct iwm_node *in = (void *)ic->ic_bss;
8819 	int err;
8820 
8821 	splassert(IPL_NET);
8822 
8823 	iwm_unprotect_session(sc, in);
8824 
8825 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8826 		err = iwm_flush_sta(sc, in);
8827 		if (err)
8828 			return err;
8829 		err = iwm_rm_sta_cmd(sc, in);
8830 		if (err) {
8831 			printf("%s: could not remove STA (error %d)\n",
8832 			    DEVNAME(sc), err);
8833 			return err;
8834 		}
8835 		in->tid_disable_ampdu = 0xffff;
8836 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8837 		sc->sc_rx_ba_sessions = 0;
8838 		sc->ba_rx.start_tidmask = 0;
8839 		sc->ba_rx.stop_tidmask = 0;
8840 		sc->tx_ba_queue_mask = 0;
8841 		sc->ba_tx.start_tidmask = 0;
8842 		sc->ba_tx.stop_tidmask = 0;
8843 	}
8844 
8845 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8846 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8847 		if (err) {
8848 			printf("%s: could not remove binding (error %d)\n",
8849 			    DEVNAME(sc), err);
8850 			return err;
8851 		}
8852 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8853 	}
8854 
8855 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8856 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8857 		if (err) {
8858 			printf("%s: could not remove MAC context (error %d)\n",
8859 			    DEVNAME(sc), err);
8860 			return err;
8861 		}
8862 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8863 	}
8864 
8865 	/* Move unused PHY context to a default channel. */
8866 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8867 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8868 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8869 	if (err)
8870 		return err;
8871 
8872 	return 0;
8873 }
8874 
8875 int
8876 iwm_run(struct iwm_softc *sc)
8877 {
8878 	struct ieee80211com *ic = &sc->sc_ic;
8879 	struct iwm_node *in = (void *)ic->ic_bss;
8880 	struct ieee80211_node *ni = &in->in_ni;
8881 	int err;
8882 
8883 	splassert(IPL_NET);
8884 
8885 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8886 		/* Add a MAC context and a sniffing STA. */
8887 		err = iwm_auth(sc);
8888 		if (err)
8889 			return err;
8890 	}
8891 
8892 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8893 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8894 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8895 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8896 		    in->in_phyctxt->channel, chains, chains,
8897 		    0, IEEE80211_HTOP0_SCO_SCN,
8898 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8899 		if (err) {
8900 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8901 			return err;
8902 		}
8903 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8904 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8905 		uint8_t sco, vht_chan_width;
8906 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8907 		    ieee80211_node_supports_ht_chan40(ni))
8908 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8909 		else
8910 			sco = IEEE80211_HTOP0_SCO_SCN;
8911 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8912 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8913 		    ieee80211_node_supports_vht_chan80(ni))
8914 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8915 		else
8916 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8917 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8918 		    in->in_phyctxt->channel, chains, chains,
8919 		    0, sco, vht_chan_width);
8920 		if (err) {
8921 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8922 			return err;
8923 		}
8924 	}
8925 
8926 	/* Update STA again to apply HT and VHT settings. */
8927 	err = iwm_add_sta_cmd(sc, in, 1);
8928 	if (err) {
8929 		printf("%s: could not update STA (error %d)\n",
8930 		    DEVNAME(sc), err);
8931 		return err;
8932 	}
8933 
8934 	/* We have now been assigned an associd by the AP. */
8935 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8936 	if (err) {
8937 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8938 		return err;
8939 	}
8940 
8941 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8942 	if (err) {
8943 		printf("%s: could not set sf full on (error %d)\n",
8944 		    DEVNAME(sc), err);
8945 		return err;
8946 	}
8947 
8948 	err = iwm_allow_mcast(sc);
8949 	if (err) {
8950 		printf("%s: could not allow mcast (error %d)\n",
8951 		    DEVNAME(sc), err);
8952 		return err;
8953 	}
8954 
8955 	err = iwm_power_update_device(sc);
8956 	if (err) {
8957 		printf("%s: could not send power command (error %d)\n",
8958 		    DEVNAME(sc), err);
8959 		return err;
8960 	}
8961 #ifdef notyet
8962 	/*
8963 	 * Disabled for now. Default beacon filter settings
8964 	 * prevent net80211 from getting ERP and HT protection
8965 	 * updates from beacons.
8966 	 */
8967 	err = iwm_enable_beacon_filter(sc, in);
8968 	if (err) {
8969 		printf("%s: could not enable beacon filter\n",
8970 		    DEVNAME(sc));
8971 		return err;
8972 	}
8973 #endif
8974 	err = iwm_power_mac_update_mode(sc, in);
8975 	if (err) {
8976 		printf("%s: could not update MAC power (error %d)\n",
8977 		    DEVNAME(sc), err);
8978 		return err;
8979 	}
8980 
8981 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
8982 		err = iwm_update_quotas(sc, in, 1);
8983 		if (err) {
8984 			printf("%s: could not update quotas (error %d)\n",
8985 			    DEVNAME(sc), err);
8986 			return err;
8987 		}
8988 	}
8989 
8990 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
8991 	ieee80211_ra_node_init(&in->in_rn);
8992 	ieee80211_ra_vht_node_init(&in->in_rn_vht);
8993 
8994 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8995 		iwm_led_blink_start(sc);
8996 		return 0;
8997 	}
8998 
8999 	/* Start at lowest available bit-rate, AMRR will raise. */
9000 	in->in_ni.ni_txrate = 0;
9001 	in->in_ni.ni_txmcs = 0;
9002 	in->in_ni.ni_vht_ss = 1;
9003 	iwm_setrates(in, 0);
9004 
9005 	timeout_add_msec(&sc->sc_calib_to, 500);
9006 	iwm_led_enable(sc);
9007 
9008 	return 0;
9009 }
9010 
9011 int
9012 iwm_run_stop(struct iwm_softc *sc)
9013 {
9014 	struct ieee80211com *ic = &sc->sc_ic;
9015 	struct iwm_node *in = (void *)ic->ic_bss;
9016 	struct ieee80211_node *ni = &in->in_ni;
9017 	int err, i, tid;
9018 
9019 	splassert(IPL_NET);
9020 
9021 	/*
9022 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
9023 	 * for this when moving out of RUN state since it runs in a
9024 	 * separate thread.
9025 	 * Note that in->in_ni (struct ieee80211_node) already represents
9026 	 * our new access point in case we are roaming between APs.
9027 	 * This means we cannot rely on struct ieee802111_node to tell
9028 	 * us which BA sessions exist.
9029 	 */
9030 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9031 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9032 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
9033 			continue;
9034 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9035 		if (err)
9036 			return err;
9037 		iwm_clear_reorder_buffer(sc, rxba);
9038 		if (sc->sc_rx_ba_sessions > 0)
9039 			sc->sc_rx_ba_sessions--;
9040 	}
9041 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
9042 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
9043 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9044 			continue;
9045 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9046 		if (err)
9047 			return err;
9048 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
9049 		if (err)
9050 			return err;
9051 		in->tfd_queue_msk &= ~(1 << qid);
9052 	}
9053 	ieee80211_ba_del(ni);
9054 
9055 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9056 		iwm_led_blink_stop(sc);
9057 
9058 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
9059 	if (err)
9060 		return err;
9061 
9062 	iwm_disable_beacon_filter(sc);
9063 
9064 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9065 		err = iwm_update_quotas(sc, in, 0);
9066 		if (err) {
9067 			printf("%s: could not update quotas (error %d)\n",
9068 			    DEVNAME(sc), err);
9069 			return err;
9070 		}
9071 	}
9072 
9073 	/* Mark station as disassociated. */
9074 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
9075 	if (err) {
9076 		printf("%s: failed to update MAC\n", DEVNAME(sc));
9077 		return err;
9078 	}
9079 
9080 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
9081 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
9082 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9083 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
9084 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9085 		if (err) {
9086 			printf("%s: failed to update PHY\n", DEVNAME(sc));
9087 			return err;
9088 		}
9089 	}
9090 
9091 	return 0;
9092 }
9093 
9094 struct ieee80211_node *
9095 iwm_node_alloc(struct ieee80211com *ic)
9096 {
9097 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
9098 }
9099 
9100 int
9101 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9102     struct ieee80211_key *k)
9103 {
9104 	struct iwm_softc *sc = ic->ic_softc;
9105 	struct iwm_add_sta_key_cmd_v1 cmd;
9106 
9107 	memset(&cmd, 0, sizeof(cmd));
9108 
9109 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9110 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9111 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9112 	    IWM_STA_KEY_FLG_KEYID_MSK));
9113 	if (k->k_flags & IEEE80211_KEY_GROUP)
9114 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9115 
9116 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9117 	cmd.common.key_offset = 0;
9118 	cmd.common.sta_id = IWM_STATION_ID;
9119 
9120 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9121 	    sizeof(cmd), &cmd);
9122 }
9123 
9124 int
9125 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9126     struct ieee80211_key *k)
9127 {
9128 	struct iwm_softc *sc = ic->ic_softc;
9129 	struct iwm_add_sta_key_cmd cmd;
9130 
9131 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9132 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
9133 		/* Fallback to software crypto for other ciphers. */
9134 		return (ieee80211_set_key(ic, ni, k));
9135 	}
9136 
9137 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9138 		return iwm_set_key_v1(ic, ni, k);
9139 
9140 	memset(&cmd, 0, sizeof(cmd));
9141 
9142 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9143 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9144 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9145 	    IWM_STA_KEY_FLG_KEYID_MSK));
9146 	if (k->k_flags & IEEE80211_KEY_GROUP)
9147 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9148 
9149 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9150 	cmd.common.key_offset = 0;
9151 	cmd.common.sta_id = IWM_STATION_ID;
9152 
9153 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
9154 
9155 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9156 	    sizeof(cmd), &cmd);
9157 }
9158 
9159 void
9160 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9161     struct ieee80211_key *k)
9162 {
9163 	struct iwm_softc *sc = ic->ic_softc;
9164 	struct iwm_add_sta_key_cmd_v1 cmd;
9165 
9166 	memset(&cmd, 0, sizeof(cmd));
9167 
9168 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9169 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9170 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9171 	    IWM_STA_KEY_FLG_KEYID_MSK));
9172 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9173 	cmd.common.key_offset = 0;
9174 	cmd.common.sta_id = IWM_STATION_ID;
9175 
9176 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9177 }
9178 
9179 void
9180 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9181     struct ieee80211_key *k)
9182 {
9183 	struct iwm_softc *sc = ic->ic_softc;
9184 	struct iwm_add_sta_key_cmd cmd;
9185 
9186 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9187 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
9188 		/* Fallback to software crypto for other ciphers. */
9189                 ieee80211_delete_key(ic, ni, k);
9190 		return;
9191 	}
9192 
9193 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9194 		return iwm_delete_key_v1(ic, ni, k);
9195 
9196 	memset(&cmd, 0, sizeof(cmd));
9197 
9198 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9199 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9200 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9201 	    IWM_STA_KEY_FLG_KEYID_MSK));
9202 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9203 	cmd.common.key_offset = 0;
9204 	cmd.common.sta_id = IWM_STATION_ID;
9205 
9206 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9207 }
9208 
9209 void
9210 iwm_calib_timeout(void *arg)
9211 {
9212 	struct iwm_softc *sc = arg;
9213 	struct ieee80211com *ic = &sc->sc_ic;
9214 	struct iwm_node *in = (void *)ic->ic_bss;
9215 	struct ieee80211_node *ni = &in->in_ni;
9216 	int s;
9217 
9218 	s = splnet();
9219 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
9220 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
9221 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
9222 		int old_txrate = ni->ni_txrate;
9223 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9224 		/*
9225 		 * If AMRR has chosen a new TX rate we must update
9226 		 * the firwmare's LQ rate table.
9227 		 * ni_txrate may change again before the task runs so
9228 		 * cache the chosen rate in the iwm_node structure.
9229 		 */
9230 		if (ni->ni_txrate != old_txrate)
9231 			iwm_setrates(in, 1);
9232 	}
9233 
9234 	splx(s);
9235 
9236 	timeout_add_msec(&sc->sc_calib_to, 500);
9237 }
9238 
9239 void
9240 iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9241 {
9242 	struct ieee80211_node *ni = &in->in_ni;
9243 	struct ieee80211com *ic = ni->ni_ic;
9244 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9245 	int ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9246 	int i, tab, txmcs;
9247 
9248 	/*
9249 	 * Fill the LQ rate selection table with VHT rates in descending
9250 	 * order, i.e. with the node's current TX rate first. Keep reducing
9251 	 * channel width during later Tx attempts, and eventually fall back
9252 	 * to legacy OFDM. Do not mix SISO and MIMO rates.
9253 	 */
9254 	lqcmd->mimo_delim = 0;
9255 	txmcs = ni->ni_txmcs;
9256 	for (i = 0; i < nitems(lqcmd->rs_table); i++) {
9257 		if (txmcs >= 0) {
9258 			tab = IWM_RATE_MCS_VHT_MSK;
9259 			tab |= txmcs & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9260 			tab |= ((ni->ni_vht_ss - 1) <<
9261 			    IWM_RATE_VHT_MCS_NSS_POS) &
9262 			    IWM_RATE_VHT_MCS_NSS_MSK;
9263 			if (ni->ni_vht_ss > 1)
9264 				tab |= IWM_RATE_MCS_ANT_AB_MSK;
9265 			else
9266 				tab |= iwm_valid_siso_ant_rate_mask(sc);
9267 
9268 			/*
9269 			 * First two Tx attempts may use 80MHz/40MHz/SGI.
9270 			 * Next two Tx attempts may use 40MHz/SGI.
9271 			 * Beyond that use 20 MHz and decrease the rate.
9272 			 * As a special case, MCS 9 is invalid on 20 Mhz.
9273 			 */
9274 			if (txmcs == 9) {
9275 				if (i < 2 && in->in_phyctxt->vht_chan_width >=
9276 				    IEEE80211_VHTOP0_CHAN_WIDTH_80)
9277 					tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9278 				else
9279 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9280 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9281 				if (i < 4) {
9282 					if (ieee80211_ra_vht_use_sgi(ni))
9283 						tab |= IWM_RATE_MCS_SGI_MSK;
9284 				} else
9285 					txmcs--;
9286 			} else if (i < 2 && in->in_phyctxt->vht_chan_width >=
9287 			    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
9288 				tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9289 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9290 				if (ieee80211_ra_vht_use_sgi(ni))
9291 					tab |= IWM_RATE_MCS_SGI_MSK;
9292 			} else if (i < 4 &&
9293 			    in->in_phyctxt->vht_chan_width >=
9294 			    IEEE80211_VHTOP0_CHAN_WIDTH_HT &&
9295 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
9296 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
9297 				tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9298 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9299 				if (ieee80211_ra_vht_use_sgi(ni))
9300 					tab |= IWM_RATE_MCS_SGI_MSK;
9301 			} else if (txmcs >= 0)
9302 				txmcs--;
9303 		} else {
9304 			/* Fill the rest with the lowest possible rate. */
9305 			tab = iwm_rates[ridx_min].plcp;
9306 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9307 			if (ni->ni_vht_ss > 1 && lqcmd->mimo_delim == 0)
9308 				lqcmd->mimo_delim = i;
9309 		}
9310 
9311 		lqcmd->rs_table[i] = htole32(tab);
9312 	}
9313 }
9314 
9315 void
9316 iwm_set_rate_table(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9317 {
9318 	struct ieee80211_node *ni = &in->in_ni;
9319 	struct ieee80211com *ic = ni->ni_ic;
9320 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9321 	struct ieee80211_rateset *rs = &ni->ni_rates;
9322 	int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
9323 
9324 	/*
9325 	 * Fill the LQ rate selection table with legacy and/or HT rates
9326 	 * in descending order, i.e. with the node's current TX rate first.
9327 	 * In cases where throughput of an HT rate corresponds to a legacy
9328 	 * rate it makes no sense to add both. We rely on the fact that
9329 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
9330 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
9331 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
9332 	 */
9333 	j = 0;
9334 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9335 	mimo = iwm_is_mimo_ht_mcs(ni->ni_txmcs);
9336 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
9337 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
9338 		uint8_t plcp = iwm_rates[ridx].plcp;
9339 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
9340 
9341 		if (j >= nitems(lqcmd->rs_table))
9342 			break;
9343 		tab = 0;
9344 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9345 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
9346 				continue;
9347 	 		/* Do not mix SISO and MIMO HT rates. */
9348 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
9349 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9350 				continue;
9351 			for (i = ni->ni_txmcs; i >= 0; i--) {
9352 				if (isclr(ni->ni_rxmcs, i))
9353 					continue;
9354 				if (ridx != iwm_ht_mcs2ridx[i])
9355 					continue;
9356 				tab = ht_plcp;
9357 				tab |= IWM_RATE_MCS_HT_MSK;
9358 				/* First two Tx attempts may use 40MHz/SGI. */
9359 				if (j > 1)
9360 					break;
9361 				if (in->in_phyctxt->sco ==
9362 				    IEEE80211_HTOP0_SCO_SCA ||
9363 				    in->in_phyctxt->sco ==
9364 				    IEEE80211_HTOP0_SCO_SCB) {
9365 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9366 					tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9367 				}
9368 				if (ieee80211_ra_use_ht_sgi(ni))
9369 					tab |= IWM_RATE_MCS_SGI_MSK;
9370 				break;
9371 			}
9372 		} else if (plcp != IWM_RATE_INVM_PLCP) {
9373 			for (i = ni->ni_txrate; i >= 0; i--) {
9374 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9375 				    IEEE80211_RATE_VAL)) {
9376 					tab = plcp;
9377 					break;
9378 				}
9379 			}
9380 		}
9381 
9382 		if (tab == 0)
9383 			continue;
9384 
9385 		if (iwm_is_mimo_ht_plcp(ht_plcp))
9386 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
9387 		else
9388 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9389 
9390 		if (IWM_RIDX_IS_CCK(ridx))
9391 			tab |= IWM_RATE_MCS_CCK_MSK;
9392 		lqcmd->rs_table[j++] = htole32(tab);
9393 	}
9394 
9395 	lqcmd->mimo_delim = (mimo ? j : 0);
9396 
9397 	/* Fill the rest with the lowest possible rate */
9398 	while (j < nitems(lqcmd->rs_table)) {
9399 		tab = iwm_rates[ridx_min].plcp;
9400 		if (IWM_RIDX_IS_CCK(ridx_min))
9401 			tab |= IWM_RATE_MCS_CCK_MSK;
9402 		tab |= iwm_valid_siso_ant_rate_mask(sc);
9403 		lqcmd->rs_table[j++] = htole32(tab);
9404 	}
9405 }
9406 
9407 void
9408 iwm_setrates(struct iwm_node *in, int async)
9409 {
9410 	struct ieee80211_node *ni = &in->in_ni;
9411 	struct ieee80211com *ic = ni->ni_ic;
9412 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9413 	struct iwm_lq_cmd lqcmd;
9414 	struct iwm_host_cmd cmd = {
9415 		.id = IWM_LQ_CMD,
9416 		.len = { sizeof(lqcmd), },
9417 	};
9418 
9419 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
9420 
9421 	memset(&lqcmd, 0, sizeof(lqcmd));
9422 	lqcmd.sta_id = IWM_STATION_ID;
9423 
9424 	if (ic->ic_flags & IEEE80211_F_USEPROT)
9425 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
9426 
9427 	if (ni->ni_flags & IEEE80211_NODE_VHT)
9428 		iwm_set_rate_table_vht(in, &lqcmd);
9429 	else
9430 		iwm_set_rate_table(in, &lqcmd);
9431 
9432 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
9433 	    (iwm_fw_valid_tx_ant(sc) & IWM_ANT_B))
9434 		lqcmd.single_stream_ant_msk = IWM_ANT_B;
9435 	else
9436 		lqcmd.single_stream_ant_msk = IWM_ANT_A;
9437 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
9438 
9439 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
9440 	lqcmd.agg_disable_start_th = 3;
9441 	lqcmd.agg_frame_cnt_limit = 0x3f;
9442 
9443 	cmd.data[0] = &lqcmd;
9444 	iwm_send_cmd(sc, &cmd);
9445 }
9446 
9447 int
9448 iwm_media_change(struct ifnet *ifp)
9449 {
9450 	struct iwm_softc *sc = ifp->if_softc;
9451 	struct ieee80211com *ic = &sc->sc_ic;
9452 	uint8_t rate, ridx;
9453 	int err;
9454 
9455 	err = ieee80211_media_change(ifp);
9456 	if (err != ENETRESET)
9457 		return err;
9458 
9459 	if (ic->ic_fixed_mcs != -1)
9460 		sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9461 	else if (ic->ic_fixed_rate != -1) {
9462 		rate = ic->ic_sup_rates[ic->ic_curmode].
9463 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
9464 		/* Map 802.11 rate to HW rate index. */
9465 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
9466 			if (iwm_rates[ridx].rate == rate)
9467 				break;
9468 		sc->sc_fixed_ridx = ridx;
9469 	}
9470 
9471 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9472 	    (IFF_UP | IFF_RUNNING)) {
9473 		iwm_stop(ifp);
9474 		err = iwm_init(ifp);
9475 	}
9476 	return err;
9477 }
9478 
9479 void
9480 iwm_newstate_task(void *psc)
9481 {
9482 	struct iwm_softc *sc = (struct iwm_softc *)psc;
9483 	struct ieee80211com *ic = &sc->sc_ic;
9484 	enum ieee80211_state nstate = sc->ns_nstate;
9485 	enum ieee80211_state ostate = ic->ic_state;
9486 	int arg = sc->ns_arg;
9487 	int err = 0, s = splnet();
9488 
9489 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9490 		/* iwm_stop() is waiting for us. */
9491 		refcnt_rele_wake(&sc->task_refs);
9492 		splx(s);
9493 		return;
9494 	}
9495 
9496 	if (ostate == IEEE80211_S_SCAN) {
9497 		if (nstate == ostate) {
9498 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
9499 				refcnt_rele_wake(&sc->task_refs);
9500 				splx(s);
9501 				return;
9502 			}
9503 			/* Firmware is no longer scanning. Do another scan. */
9504 			goto next_scan;
9505 		} else
9506 			iwm_led_blink_stop(sc);
9507 	}
9508 
9509 	if (nstate <= ostate) {
9510 		switch (ostate) {
9511 		case IEEE80211_S_RUN:
9512 			err = iwm_run_stop(sc);
9513 			if (err)
9514 				goto out;
9515 			/* FALLTHROUGH */
9516 		case IEEE80211_S_ASSOC:
9517 		case IEEE80211_S_AUTH:
9518 			if (nstate <= IEEE80211_S_AUTH) {
9519 				err = iwm_deauth(sc);
9520 				if (err)
9521 					goto out;
9522 			}
9523 			/* FALLTHROUGH */
9524 		case IEEE80211_S_SCAN:
9525 		case IEEE80211_S_INIT:
9526 			break;
9527 		}
9528 
9529 		/* Die now if iwm_stop() was called while we were sleeping. */
9530 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9531 			refcnt_rele_wake(&sc->task_refs);
9532 			splx(s);
9533 			return;
9534 		}
9535 	}
9536 
9537 	switch (nstate) {
9538 	case IEEE80211_S_INIT:
9539 		break;
9540 
9541 	case IEEE80211_S_SCAN:
9542 next_scan:
9543 		err = iwm_scan(sc);
9544 		if (err)
9545 			break;
9546 		refcnt_rele_wake(&sc->task_refs);
9547 		splx(s);
9548 		return;
9549 
9550 	case IEEE80211_S_AUTH:
9551 		err = iwm_auth(sc);
9552 		break;
9553 
9554 	case IEEE80211_S_ASSOC:
9555 		break;
9556 
9557 	case IEEE80211_S_RUN:
9558 		err = iwm_run(sc);
9559 		break;
9560 	}
9561 
9562 out:
9563 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9564 		if (err)
9565 			task_add(systq, &sc->init_task);
9566 		else
9567 			sc->sc_newstate(ic, nstate, arg);
9568 	}
9569 	refcnt_rele_wake(&sc->task_refs);
9570 	splx(s);
9571 }
9572 
9573 int
9574 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9575 {
9576 	struct ifnet *ifp = IC2IFP(ic);
9577 	struct iwm_softc *sc = ifp->if_softc;
9578 
9579 	/*
9580 	 * Prevent attempts to transition towards the same state, unless
9581 	 * we are scanning in which case a SCAN -> SCAN transition
9582 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9583 	 * to support band-steering.
9584 	 */
9585 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9586 	    nstate != IEEE80211_S_AUTH)
9587 		return 0;
9588 
9589 	if (ic->ic_state == IEEE80211_S_RUN) {
9590 		timeout_del(&sc->sc_calib_to);
9591 		iwm_del_task(sc, systq, &sc->ba_task);
9592 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9593 		iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9594 		iwm_del_task(sc, systq, &sc->bgscan_done_task);
9595 	}
9596 
9597 	sc->ns_nstate = nstate;
9598 	sc->ns_arg = arg;
9599 
9600 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9601 
9602 	return 0;
9603 }
9604 
9605 void
9606 iwm_endscan(struct iwm_softc *sc)
9607 {
9608 	struct ieee80211com *ic = &sc->sc_ic;
9609 
9610 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9611 		return;
9612 
9613 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9614 	ieee80211_end_scan(&ic->ic_if);
9615 }
9616 
9617 /*
9618  * Aging and idle timeouts for the different possible scenarios
9619  * in default configuration
9620  */
9621 static const uint32_t
9622 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9623 	{
9624 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9625 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9626 	},
9627 	{
9628 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
9629 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9630 	},
9631 	{
9632 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
9633 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
9634 	},
9635 	{
9636 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
9637 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
9638 	},
9639 	{
9640 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
9641 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
9642 	},
9643 };
9644 
9645 /*
9646  * Aging and idle timeouts for the different possible scenarios
9647  * in single BSS MAC configuration.
9648  */
9649 static const uint32_t
9650 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9651 	{
9652 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
9653 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
9654 	},
9655 	{
9656 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
9657 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
9658 	},
9659 	{
9660 		htole32(IWM_SF_MCAST_AGING_TIMER),
9661 		htole32(IWM_SF_MCAST_IDLE_TIMER)
9662 	},
9663 	{
9664 		htole32(IWM_SF_BA_AGING_TIMER),
9665 		htole32(IWM_SF_BA_IDLE_TIMER)
9666 	},
9667 	{
9668 		htole32(IWM_SF_TX_RE_AGING_TIMER),
9669 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
9670 	},
9671 };
9672 
9673 void
9674 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9675     struct ieee80211_node *ni)
9676 {
9677 	int i, j, watermark;
9678 
9679 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
9680 
9681 	/*
9682 	 * If we are in association flow - check antenna configuration
9683 	 * capabilities of the AP station, and choose the watermark accordingly.
9684 	 */
9685 	if (ni) {
9686 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9687 			if (ni->ni_rxmcs[1] != 0)
9688 				watermark = IWM_SF_W_MARK_MIMO2;
9689 			else
9690 				watermark = IWM_SF_W_MARK_SISO;
9691 		} else {
9692 			watermark = IWM_SF_W_MARK_LEGACY;
9693 		}
9694 	/* default watermark value for unassociated mode. */
9695 	} else {
9696 		watermark = IWM_SF_W_MARK_MIMO2;
9697 	}
9698 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
9699 
9700 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
9701 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
9702 			sf_cmd->long_delay_timeouts[i][j] =
9703 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
9704 		}
9705 	}
9706 
9707 	if (ni) {
9708 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
9709 		       sizeof(iwm_sf_full_timeout));
9710 	} else {
9711 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
9712 		       sizeof(iwm_sf_full_timeout_def));
9713 	}
9714 
9715 }
9716 
9717 int
9718 iwm_sf_config(struct iwm_softc *sc, int new_state)
9719 {
9720 	struct ieee80211com *ic = &sc->sc_ic;
9721 	struct iwm_sf_cfg_cmd sf_cmd = {
9722 		.state = htole32(new_state),
9723 	};
9724 	int err = 0;
9725 
9726 #if 0	/* only used for models with sdio interface, in iwlwifi */
9727 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9728 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
9729 #endif
9730 
9731 	switch (new_state) {
9732 	case IWM_SF_UNINIT:
9733 	case IWM_SF_INIT_OFF:
9734 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
9735 		break;
9736 	case IWM_SF_FULL_ON:
9737 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9738 		break;
9739 	default:
9740 		return EINVAL;
9741 	}
9742 
9743 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9744 				   sizeof(sf_cmd), &sf_cmd);
9745 	return err;
9746 }
9747 
9748 int
9749 iwm_send_bt_init_conf(struct iwm_softc *sc)
9750 {
9751 	struct iwm_bt_coex_cmd bt_cmd;
9752 
9753 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
9754 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
9755 
9756 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9757 	    &bt_cmd);
9758 }
9759 
9760 int
9761 iwm_send_soc_conf(struct iwm_softc *sc)
9762 {
9763 	struct iwm_soc_configuration_cmd cmd;
9764 	int err;
9765 	uint32_t cmd_id, flags = 0;
9766 
9767 	memset(&cmd, 0, sizeof(cmd));
9768 
9769 	/*
9770 	 * In VER_1 of this command, the discrete value is considered
9771 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9772 	 * values in VER_1, this is backwards-compatible with VER_2,
9773 	 * as long as we don't set any other flag bits.
9774 	 */
9775 	if (!sc->sc_integrated) { /* VER_1 */
9776 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9777 	} else { /* VER_2 */
9778 		uint8_t scan_cmd_ver;
9779 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9780 			flags |= (sc->sc_ltr_delay &
9781 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9782 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9783 		    IWM_SCAN_REQ_UMAC);
9784 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
9785 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9786 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9787 	}
9788 	cmd.flags = htole32(flags);
9789 
9790 	cmd.latency = htole32(sc->sc_xtal_latency);
9791 
9792 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
9793 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9794 	if (err)
9795 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9796 	return err;
9797 }
9798 
9799 int
9800 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9801 {
9802 	struct iwm_mcc_update_cmd mcc_cmd;
9803 	struct iwm_host_cmd hcmd = {
9804 		.id = IWM_MCC_UPDATE_CMD,
9805 		.flags = IWM_CMD_WANT_RESP,
9806 		.resp_pkt_len = IWM_CMD_RESP_MAX,
9807 		.data = { &mcc_cmd },
9808 	};
9809 	struct iwm_rx_packet *pkt;
9810 	size_t resp_len;
9811 	int err;
9812 	int resp_v3 = isset(sc->sc_enabled_capa,
9813 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
9814 
9815 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9816 	    !sc->sc_nvm.lar_enabled) {
9817 		return 0;
9818 	}
9819 
9820 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9821 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9822 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9823 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9824 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
9825 	else
9826 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
9827 
9828 	if (resp_v3) { /* same size as resp_v2 */
9829 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9830 	} else {
9831 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9832 	}
9833 
9834 	err = iwm_send_cmd(sc, &hcmd);
9835 	if (err)
9836 		return err;
9837 
9838 	pkt = hcmd.resp_pkt;
9839 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
9840 		err = EIO;
9841 		goto out;
9842 	}
9843 
9844 	if (resp_v3) {
9845 		struct iwm_mcc_update_resp_v3 *resp;
9846 		resp_len = iwm_rx_packet_payload_len(pkt);
9847 		if (resp_len < sizeof(*resp)) {
9848 			err = EIO;
9849 			goto out;
9850 		}
9851 
9852 		resp = (void *)pkt->data;
9853 		if (resp_len != sizeof(*resp) +
9854 		    resp->n_channels * sizeof(resp->channels[0])) {
9855 			err = EIO;
9856 			goto out;
9857 		}
9858 	} else {
9859 		struct iwm_mcc_update_resp_v1 *resp_v1;
9860 		resp_len = iwm_rx_packet_payload_len(pkt);
9861 		if (resp_len < sizeof(*resp_v1)) {
9862 			err = EIO;
9863 			goto out;
9864 		}
9865 
9866 		resp_v1 = (void *)pkt->data;
9867 		if (resp_len != sizeof(*resp_v1) +
9868 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9869 			err = EIO;
9870 			goto out;
9871 		}
9872 	}
9873 out:
9874 	iwm_free_resp(sc, &hcmd);
9875 	return err;
9876 }
9877 
9878 int
9879 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9880 {
9881 	struct iwm_temp_report_ths_cmd cmd;
9882 	int err;
9883 
9884 	/*
9885 	 * In order to give responsibility for critical-temperature-kill
9886 	 * and TX backoff to FW we need to send an empty temperature
9887 	 * reporting command at init time.
9888 	 */
9889 	memset(&cmd, 0, sizeof(cmd));
9890 
9891 	err = iwm_send_cmd_pdu(sc,
9892 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
9893 	    0, sizeof(cmd), &cmd);
9894 	if (err)
9895 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9896 		    DEVNAME(sc), err);
9897 
9898 	return err;
9899 }
9900 
9901 void
9902 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9903 {
9904 	struct iwm_host_cmd cmd = {
9905 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
9906 		.len = { sizeof(uint32_t), },
9907 		.data = { &backoff, },
9908 	};
9909 
9910 	iwm_send_cmd(sc, &cmd);
9911 }
9912 
9913 void
9914 iwm_free_fw_paging(struct iwm_softc *sc)
9915 {
9916 	int i;
9917 
9918 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9919 		return;
9920 
9921 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
9922 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9923 	}
9924 
9925 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9926 }
9927 
9928 int
9929 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9930 {
9931 	int sec_idx, idx;
9932 	uint32_t offset = 0;
9933 
9934 	/*
9935 	 * find where is the paging image start point:
9936 	 * if CPU2 exist and it's in paging format, then the image looks like:
9937 	 * CPU1 sections (2 or more)
9938 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9939 	 * CPU2 sections (not paged)
9940 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9941 	 * non paged to CPU2 paging sec
9942 	 * CPU2 paging CSS
9943 	 * CPU2 paging image (including instruction and data)
9944 	 */
9945 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
9946 		if (image->fw_sect[sec_idx].fws_devoff ==
9947 		    IWM_PAGING_SEPARATOR_SECTION) {
9948 			sec_idx++;
9949 			break;
9950 		}
9951 	}
9952 
9953 	/*
9954 	 * If paging is enabled there should be at least 2 more sections left
9955 	 * (one for CSS and one for Paging data)
9956 	 */
9957 	if (sec_idx >= nitems(image->fw_sect) - 1) {
9958 		printf("%s: Paging: Missing CSS and/or paging sections\n",
9959 		    DEVNAME(sc));
9960 		iwm_free_fw_paging(sc);
9961 		return EINVAL;
9962 	}
9963 
9964 	/* copy the CSS block to the dram */
9965 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
9966 	    DEVNAME(sc), sec_idx));
9967 
9968 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
9969 	    image->fw_sect[sec_idx].fws_data,
9970 	    sc->fw_paging_db[0].fw_paging_size);
9971 
9972 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
9973 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
9974 
9975 	sec_idx++;
9976 
9977 	/*
9978 	 * copy the paging blocks to the dram
9979 	 * loop index start from 1 since that CSS block already copied to dram
9980 	 * and CSS index is 0.
9981 	 * loop stop at num_of_paging_blk since that last block is not full.
9982 	 */
9983 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
9984 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9985 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9986 		    sc->fw_paging_db[idx].fw_paging_size);
9987 
9988 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
9989 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
9990 
9991 		offset += sc->fw_paging_db[idx].fw_paging_size;
9992 	}
9993 
9994 	/* copy the last paging block */
9995 	if (sc->num_of_pages_in_last_blk > 0) {
9996 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9997 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9998 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
9999 
10000 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
10001 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10002 	}
10003 
10004 	return 0;
10005 }
10006 
10007 int
10008 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10009 {
10010 	int blk_idx = 0;
10011 	int error, num_of_pages;
10012 
10013 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
10014 		int i;
10015 		/* Device got reset, and we setup firmware paging again */
10016 		bus_dmamap_sync(sc->sc_dmat,
10017 		    sc->fw_paging_db[0].fw_paging_block.map,
10018 		    0, IWM_FW_PAGING_SIZE,
10019 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10020 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10021 			bus_dmamap_sync(sc->sc_dmat,
10022 			    sc->fw_paging_db[i].fw_paging_block.map,
10023 			    0, IWM_PAGING_BLOCK_SIZE,
10024 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10025 		}
10026 		return 0;
10027 	}
10028 
10029 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
10030 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
10031 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
10032 #endif
10033 
10034 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
10035 	sc->num_of_paging_blk =
10036 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
10037 
10038 	sc->num_of_pages_in_last_blk =
10039 		num_of_pages -
10040 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
10041 
10042 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
10043 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
10044 	    sc->num_of_paging_blk,
10045 	    sc->num_of_pages_in_last_blk));
10046 
10047 	/* allocate block of 4Kbytes for paging CSS */
10048 	error = iwm_dma_contig_alloc(sc->sc_dmat,
10049 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
10050 	    4096);
10051 	if (error) {
10052 		/* free all the previous pages since we failed */
10053 		iwm_free_fw_paging(sc);
10054 		return ENOMEM;
10055 	}
10056 
10057 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
10058 
10059 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
10060 	    DEVNAME(sc)));
10061 
10062 	/*
10063 	 * allocate blocks in dram.
10064 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
10065 	 */
10066 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10067 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
10068 		/* XXX Use iwm_dma_contig_alloc for allocating */
10069 		error = iwm_dma_contig_alloc(sc->sc_dmat,
10070 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
10071 		    IWM_PAGING_BLOCK_SIZE, 4096);
10072 		if (error) {
10073 			/* free all the previous pages since we failed */
10074 			iwm_free_fw_paging(sc);
10075 			return ENOMEM;
10076 		}
10077 
10078 		sc->fw_paging_db[blk_idx].fw_paging_size =
10079 		    IWM_PAGING_BLOCK_SIZE;
10080 
10081 		DPRINTF((
10082 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
10083 		    DEVNAME(sc)));
10084 	}
10085 
10086 	return 0;
10087 }
10088 
10089 int
10090 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10091 {
10092 	int ret;
10093 
10094 	ret = iwm_alloc_fw_paging_mem(sc, fw);
10095 	if (ret)
10096 		return ret;
10097 
10098 	return iwm_fill_paging_mem(sc, fw);
10099 }
10100 
10101 /* send paging cmd to FW in case CPU2 has paging image */
10102 int
10103 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10104 {
10105 	int blk_idx;
10106 	uint32_t dev_phy_addr;
10107 	struct iwm_fw_paging_cmd fw_paging_cmd = {
10108 		.flags =
10109 			htole32(IWM_PAGING_CMD_IS_SECURED |
10110 				IWM_PAGING_CMD_IS_ENABLED |
10111 				(sc->num_of_pages_in_last_blk <<
10112 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
10113 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
10114 		.block_num = htole32(sc->num_of_paging_blk),
10115 	};
10116 
10117 	/* loop for for all paging blocks + CSS block */
10118 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10119 		dev_phy_addr = htole32(
10120 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
10121 		    IWM_PAGE_2_EXP_SIZE);
10122 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
10123 		bus_dmamap_sync(sc->sc_dmat,
10124 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
10125 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
10126 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10127 	}
10128 
10129 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
10130 					       IWM_LONG_GROUP, 0),
10131 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
10132 }
10133 
10134 int
10135 iwm_init_hw(struct iwm_softc *sc)
10136 {
10137 	struct ieee80211com *ic = &sc->sc_ic;
10138 	int err, i, ac, qid, s;
10139 
10140 	err = iwm_run_init_mvm_ucode(sc, 0);
10141 	if (err)
10142 		return err;
10143 
10144 	/* Should stop and start HW since INIT image just loaded. */
10145 	iwm_stop_device(sc);
10146 	err = iwm_start_hw(sc);
10147 	if (err) {
10148 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10149 		return err;
10150 	}
10151 
10152 	/* Restart, this time with the regular firmware */
10153 	s = splnet();
10154 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10155 	if (err) {
10156 		printf("%s: could not load firmware\n", DEVNAME(sc));
10157 		splx(s);
10158 		return err;
10159 	}
10160 
10161 	if (!iwm_nic_lock(sc)) {
10162 		splx(s);
10163 		return EBUSY;
10164 	}
10165 
10166 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10167 	if (err) {
10168 		printf("%s: could not init tx ant config (error %d)\n",
10169 		    DEVNAME(sc), err);
10170 		goto err;
10171 	}
10172 
10173 	err = iwm_send_phy_db_data(sc);
10174 	if (err) {
10175 		printf("%s: could not init phy db (error %d)\n",
10176 		    DEVNAME(sc), err);
10177 		goto err;
10178 	}
10179 
10180 	err = iwm_send_phy_cfg_cmd(sc);
10181 	if (err) {
10182 		printf("%s: could not send phy config (error %d)\n",
10183 		    DEVNAME(sc), err);
10184 		goto err;
10185 	}
10186 
10187 	err = iwm_send_bt_init_conf(sc);
10188 	if (err) {
10189 		printf("%s: could not init bt coex (error %d)\n",
10190 		    DEVNAME(sc), err);
10191 		goto err;
10192 	}
10193 
10194 	if (isset(sc->sc_enabled_capa,
10195 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
10196 		err = iwm_send_soc_conf(sc);
10197 		if (err)
10198 			goto err;
10199 	}
10200 
10201 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
10202 		err = iwm_send_dqa_cmd(sc);
10203 		if (err)
10204 			goto err;
10205 	}
10206 
10207 	/* Add auxiliary station for scanning */
10208 	err = iwm_add_aux_sta(sc);
10209 	if (err) {
10210 		printf("%s: could not add aux station (error %d)\n",
10211 		    DEVNAME(sc), err);
10212 		goto err;
10213 	}
10214 
10215 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
10216 		/*
10217 		 * The channel used here isn't relevant as it's
10218 		 * going to be overwritten in the other flows.
10219 		 * For now use the first channel we have.
10220 		 */
10221 		sc->sc_phyctxt[i].id = i;
10222 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10223 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10224 		    IWM_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
10225 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
10226 		if (err) {
10227 			printf("%s: could not add phy context %d (error %d)\n",
10228 			    DEVNAME(sc), i, err);
10229 			goto err;
10230 		}
10231 	}
10232 
10233 	/* Initialize tx backoffs to the minimum. */
10234 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
10235 		iwm_tt_tx_backoff(sc, 0);
10236 
10237 
10238 	err = iwm_config_ltr(sc);
10239 	if (err) {
10240 		printf("%s: PCIe LTR configuration failed (error %d)\n",
10241 		    DEVNAME(sc), err);
10242 	}
10243 
10244 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
10245 		err = iwm_send_temp_report_ths_cmd(sc);
10246 		if (err)
10247 			goto err;
10248 	}
10249 
10250 	err = iwm_power_update_device(sc);
10251 	if (err) {
10252 		printf("%s: could not send power command (error %d)\n",
10253 		    DEVNAME(sc), err);
10254 		goto err;
10255 	}
10256 
10257 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
10258 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
10259 		if (err) {
10260 			printf("%s: could not init LAR (error %d)\n",
10261 			    DEVNAME(sc), err);
10262 			goto err;
10263 		}
10264 	}
10265 
10266 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
10267 		err = iwm_config_umac_scan(sc);
10268 		if (err) {
10269 			printf("%s: could not configure scan (error %d)\n",
10270 			    DEVNAME(sc), err);
10271 			goto err;
10272 		}
10273 	}
10274 
10275 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10276 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10277 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
10278 		else
10279 			qid = IWM_AUX_QUEUE;
10280 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
10281 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
10282 		if (err) {
10283 			printf("%s: could not enable monitor inject Tx queue "
10284 			    "(error %d)\n", DEVNAME(sc), err);
10285 			goto err;
10286 		}
10287 	} else {
10288 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
10289 			if (isset(sc->sc_enabled_capa,
10290 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10291 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
10292 			else
10293 				qid = ac;
10294 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
10295 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
10296 			if (err) {
10297 				printf("%s: could not enable Tx queue %d "
10298 				    "(error %d)\n", DEVNAME(sc), ac, err);
10299 				goto err;
10300 			}
10301 		}
10302 	}
10303 
10304 	err = iwm_disable_beacon_filter(sc);
10305 	if (err) {
10306 		printf("%s: could not disable beacon filter (error %d)\n",
10307 		    DEVNAME(sc), err);
10308 		goto err;
10309 	}
10310 
10311 err:
10312 	iwm_nic_unlock(sc);
10313 	splx(s);
10314 	return err;
10315 }
10316 
10317 /* Allow multicast from our BSSID. */
10318 int
10319 iwm_allow_mcast(struct iwm_softc *sc)
10320 {
10321 	struct ieee80211com *ic = &sc->sc_ic;
10322 	struct iwm_node *in = (void *)ic->ic_bss;
10323 	struct iwm_mcast_filter_cmd *cmd;
10324 	size_t size;
10325 	int err;
10326 
10327 	size = roundup(sizeof(*cmd), 4);
10328 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
10329 	if (cmd == NULL)
10330 		return ENOMEM;
10331 	cmd->filter_own = 1;
10332 	cmd->port_id = 0;
10333 	cmd->count = 0;
10334 	cmd->pass_all = 1;
10335 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
10336 
10337 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
10338 	    0, size, cmd);
10339 	free(cmd, M_DEVBUF, size);
10340 	return err;
10341 }
10342 
10343 int
10344 iwm_init(struct ifnet *ifp)
10345 {
10346 	struct iwm_softc *sc = ifp->if_softc;
10347 	struct ieee80211com *ic = &sc->sc_ic;
10348 	int err, generation;
10349 
10350 	rw_assert_wrlock(&sc->ioctl_rwl);
10351 
10352 	generation = ++sc->sc_generation;
10353 
10354 	KASSERT(sc->task_refs.r_refs == 0);
10355 	refcnt_init(&sc->task_refs);
10356 
10357 	err = iwm_preinit(sc);
10358 	if (err)
10359 		return err;
10360 
10361 	err = iwm_start_hw(sc);
10362 	if (err) {
10363 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10364 		return err;
10365 	}
10366 
10367 	err = iwm_init_hw(sc);
10368 	if (err) {
10369 		if (generation == sc->sc_generation)
10370 			iwm_stop(ifp);
10371 		return err;
10372 	}
10373 
10374 	if (sc->sc_nvm.sku_cap_11n_enable)
10375 		iwm_setup_ht_rates(sc);
10376 	if (sc->sc_nvm.sku_cap_11ac_enable)
10377 		iwm_setup_vht_rates(sc);
10378 
10379 	ifq_clr_oactive(&ifp->if_snd);
10380 	ifp->if_flags |= IFF_RUNNING;
10381 
10382 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10383 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10384 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
10385 		return 0;
10386 	}
10387 
10388 	ieee80211_begin_scan(ifp);
10389 
10390 	/*
10391 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10392 	 * Wait until the transition to SCAN state has completed.
10393 	 */
10394 	do {
10395 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
10396 		    SEC_TO_NSEC(1));
10397 		if (generation != sc->sc_generation)
10398 			return ENXIO;
10399 		if (err) {
10400 			iwm_stop(ifp);
10401 			return err;
10402 		}
10403 	} while (ic->ic_state != IEEE80211_S_SCAN);
10404 
10405 	return 0;
10406 }
10407 
10408 void
10409 iwm_start(struct ifnet *ifp)
10410 {
10411 	struct iwm_softc *sc = ifp->if_softc;
10412 	struct ieee80211com *ic = &sc->sc_ic;
10413 	struct ieee80211_node *ni;
10414 	struct ether_header *eh;
10415 	struct mbuf *m;
10416 	int ac = EDCA_AC_BE; /* XXX */
10417 
10418 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
10419 		return;
10420 
10421 	for (;;) {
10422 		/* why isn't this done per-queue? */
10423 		if (sc->qfullmsk != 0) {
10424 			ifq_set_oactive(&ifp->if_snd);
10425 			break;
10426 		}
10427 
10428 		/* Don't queue additional frames while flushing Tx queues. */
10429 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
10430 			break;
10431 
10432 		/* need to send management frames even if we're not RUNning */
10433 		m = mq_dequeue(&ic->ic_mgtq);
10434 		if (m) {
10435 			ni = m->m_pkthdr.ph_cookie;
10436 			goto sendit;
10437 		}
10438 
10439 		if (ic->ic_state != IEEE80211_S_RUN ||
10440 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
10441 			break;
10442 
10443 		m = ifq_dequeue(&ifp->if_snd);
10444 		if (!m)
10445 			break;
10446 		if (m->m_len < sizeof (*eh) &&
10447 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
10448 			ifp->if_oerrors++;
10449 			continue;
10450 		}
10451 #if NBPFILTER > 0
10452 		if (ifp->if_bpf != NULL)
10453 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
10454 #endif
10455 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
10456 			ifp->if_oerrors++;
10457 			continue;
10458 		}
10459 
10460  sendit:
10461 #if NBPFILTER > 0
10462 		if (ic->ic_rawbpf != NULL)
10463 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
10464 #endif
10465 		if (iwm_tx(sc, m, ni, ac) != 0) {
10466 			ieee80211_release_node(ic, ni);
10467 			ifp->if_oerrors++;
10468 			continue;
10469 		}
10470 
10471 		if (ifp->if_flags & IFF_UP)
10472 			ifp->if_timer = 1;
10473 	}
10474 
10475 	return;
10476 }
10477 
10478 void
10479 iwm_stop(struct ifnet *ifp)
10480 {
10481 	struct iwm_softc *sc = ifp->if_softc;
10482 	struct ieee80211com *ic = &sc->sc_ic;
10483 	struct iwm_node *in = (void *)ic->ic_bss;
10484 	int i, s = splnet();
10485 
10486 	rw_assert_wrlock(&sc->ioctl_rwl);
10487 
10488 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10489 
10490 	/* Cancel scheduled tasks and let any stale tasks finish up. */
10491 	task_del(systq, &sc->init_task);
10492 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10493 	iwm_del_task(sc, systq, &sc->ba_task);
10494 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10495 	iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10496 	iwm_del_task(sc, systq, &sc->bgscan_done_task);
10497 	KASSERT(sc->task_refs.r_refs >= 1);
10498 	refcnt_finalize(&sc->task_refs, "iwmstop");
10499 
10500 	iwm_stop_device(sc);
10501 
10502 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
10503 	sc->bgscan_unref_arg = NULL;
10504 	sc->bgscan_unref_arg_size = 0;
10505 
10506 	/* Reset soft state. */
10507 
10508 	sc->sc_generation++;
10509 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10510 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10511 		sc->sc_cmd_resp_pkt[i] = NULL;
10512 		sc->sc_cmd_resp_len[i] = 0;
10513 	}
10514 	ifp->if_flags &= ~IFF_RUNNING;
10515 	ifq_clr_oactive(&ifp->if_snd);
10516 
10517 	in->in_phyctxt = NULL;
10518 	in->tid_disable_ampdu = 0xffff;
10519 	in->tfd_queue_msk = 0;
10520 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
10521 
10522 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10523 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10524 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10525 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10526 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10527 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10528 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10529 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10530 
10531 	sc->sc_rx_ba_sessions = 0;
10532 	sc->ba_rx.start_tidmask = 0;
10533 	sc->ba_rx.stop_tidmask = 0;
10534 	sc->tx_ba_queue_mask = 0;
10535 	sc->ba_tx.start_tidmask = 0;
10536 	sc->ba_tx.stop_tidmask = 0;
10537 
10538 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10539 	sc->ns_nstate = IEEE80211_S_INIT;
10540 
10541 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10542 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10543 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10544 		iwm_clear_reorder_buffer(sc, rxba);
10545 	}
10546 	iwm_led_blink_stop(sc);
10547 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
10548 	ifp->if_timer = 0;
10549 
10550 	splx(s);
10551 }
10552 
10553 void
10554 iwm_watchdog(struct ifnet *ifp)
10555 {
10556 	struct iwm_softc *sc = ifp->if_softc;
10557 	int i;
10558 
10559 	ifp->if_timer = 0;
10560 
10561 	/*
10562 	 * We maintain a separate timer for each Tx queue because
10563 	 * Tx aggregation queues can get "stuck" while other queues
10564 	 * keep working. The Linux driver uses a similar workaround.
10565 	 */
10566 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
10567 		if (sc->sc_tx_timer[i] > 0) {
10568 			if (--sc->sc_tx_timer[i] == 0) {
10569 				printf("%s: device timeout\n", DEVNAME(sc));
10570 				if (ifp->if_flags & IFF_DEBUG) {
10571 					iwm_nic_error(sc);
10572 					iwm_dump_driver_status(sc);
10573 				}
10574 				if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10575 					task_add(systq, &sc->init_task);
10576 				ifp->if_oerrors++;
10577 				return;
10578 			}
10579 			ifp->if_timer = 1;
10580 		}
10581 	}
10582 
10583 	ieee80211_watchdog(ifp);
10584 }
10585 
10586 int
10587 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10588 {
10589 	struct iwm_softc *sc = ifp->if_softc;
10590 	int s, err = 0, generation = sc->sc_generation;
10591 
10592 	/*
10593 	 * Prevent processes from entering this function while another
10594 	 * process is tsleep'ing in it.
10595 	 */
10596 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10597 	if (err == 0 && generation != sc->sc_generation) {
10598 		rw_exit(&sc->ioctl_rwl);
10599 		return ENXIO;
10600 	}
10601 	if (err)
10602 		return err;
10603 	s = splnet();
10604 
10605 	switch (cmd) {
10606 	case SIOCSIFADDR:
10607 		ifp->if_flags |= IFF_UP;
10608 		/* FALLTHROUGH */
10609 	case SIOCSIFFLAGS:
10610 		if (ifp->if_flags & IFF_UP) {
10611 			if (!(ifp->if_flags & IFF_RUNNING)) {
10612 				/* Force reload of firmware image from disk. */
10613 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10614 				err = iwm_init(ifp);
10615 			}
10616 		} else {
10617 			if (ifp->if_flags & IFF_RUNNING)
10618 				iwm_stop(ifp);
10619 		}
10620 		break;
10621 
10622 	default:
10623 		err = ieee80211_ioctl(ifp, cmd, data);
10624 	}
10625 
10626 	if (err == ENETRESET) {
10627 		err = 0;
10628 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
10629 		    (IFF_UP | IFF_RUNNING)) {
10630 			iwm_stop(ifp);
10631 			err = iwm_init(ifp);
10632 		}
10633 	}
10634 
10635 	splx(s);
10636 	rw_exit(&sc->ioctl_rwl);
10637 
10638 	return err;
10639 }
10640 
10641 /*
10642  * Note: This structure is read from the device with IO accesses,
10643  * and the reading already does the endian conversion. As it is
10644  * read with uint32_t-sized accesses, any members with a different size
10645  * need to be ordered correctly though!
10646  */
10647 struct iwm_error_event_table {
10648 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10649 	uint32_t error_id;		/* type of error */
10650 	uint32_t trm_hw_status0;	/* TRM HW status */
10651 	uint32_t trm_hw_status1;	/* TRM HW status */
10652 	uint32_t blink2;		/* branch link */
10653 	uint32_t ilink1;		/* interrupt link */
10654 	uint32_t ilink2;		/* interrupt link */
10655 	uint32_t data1;		/* error-specific data */
10656 	uint32_t data2;		/* error-specific data */
10657 	uint32_t data3;		/* error-specific data */
10658 	uint32_t bcon_time;		/* beacon timer */
10659 	uint32_t tsf_low;		/* network timestamp function timer */
10660 	uint32_t tsf_hi;		/* network timestamp function timer */
10661 	uint32_t gp1;		/* GP1 timer register */
10662 	uint32_t gp2;		/* GP2 timer register */
10663 	uint32_t fw_rev_type;	/* firmware revision type */
10664 	uint32_t major;		/* uCode version major */
10665 	uint32_t minor;		/* uCode version minor */
10666 	uint32_t hw_ver;		/* HW Silicon version */
10667 	uint32_t brd_ver;		/* HW board version */
10668 	uint32_t log_pc;		/* log program counter */
10669 	uint32_t frame_ptr;		/* frame pointer */
10670 	uint32_t stack_ptr;		/* stack pointer */
10671 	uint32_t hcmd;		/* last host command header */
10672 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
10673 				 * rxtx_flag */
10674 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
10675 				 * host_flag */
10676 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
10677 				 * enc_flag */
10678 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
10679 				 * time_flag */
10680 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
10681 				 * wico interrupt */
10682 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
10683 	uint32_t wait_event;		/* wait event() caller address */
10684 	uint32_t l2p_control;	/* L2pControlField */
10685 	uint32_t l2p_duration;	/* L2pDurationField */
10686 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
10687 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
10688 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
10689 				 * (LMPM_PMG_SEL) */
10690 	uint32_t u_timestamp;	/* indicate when the date and time of the
10691 				 * compilation */
10692 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
10693 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
10694 
10695 /*
10696  * UMAC error struct - relevant starting from family 8000 chip.
10697  * Note: This structure is read from the device with IO accesses,
10698  * and the reading already does the endian conversion. As it is
10699  * read with u32-sized accesses, any members with a different size
10700  * need to be ordered correctly though!
10701  */
10702 struct iwm_umac_error_event_table {
10703 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10704 	uint32_t error_id;	/* type of error */
10705 	uint32_t blink1;	/* branch link */
10706 	uint32_t blink2;	/* branch link */
10707 	uint32_t ilink1;	/* interrupt link */
10708 	uint32_t ilink2;	/* interrupt link */
10709 	uint32_t data1;		/* error-specific data */
10710 	uint32_t data2;		/* error-specific data */
10711 	uint32_t data3;		/* error-specific data */
10712 	uint32_t umac_major;
10713 	uint32_t umac_minor;
10714 	uint32_t frame_pointer;	/* core register 27*/
10715 	uint32_t stack_pointer;	/* core register 28 */
10716 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
10717 	uint32_t nic_isr_pref;	/* ISR status register */
10718 } __packed;
10719 
10720 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
10721 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
10722 
10723 void
10724 iwm_nic_umac_error(struct iwm_softc *sc)
10725 {
10726 	struct iwm_umac_error_event_table table;
10727 	uint32_t base;
10728 
10729 	base = sc->sc_uc.uc_umac_error_event_table;
10730 
10731 	if (base < 0x800000) {
10732 		printf("%s: Invalid error log pointer 0x%08x\n",
10733 		    DEVNAME(sc), base);
10734 		return;
10735 	}
10736 
10737 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10738 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10739 		return;
10740 	}
10741 
10742 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10743 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10744 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10745 			sc->sc_flags, table.valid);
10746 	}
10747 
10748 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10749 		iwm_desc_lookup(table.error_id));
10750 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10751 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10752 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10753 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10754 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10755 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10756 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10757 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10758 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10759 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10760 	    table.frame_pointer);
10761 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10762 	    table.stack_pointer);
10763 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10764 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10765 	    table.nic_isr_pref);
10766 }
10767 
10768 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
10769 static struct {
10770 	const char *name;
10771 	uint8_t num;
10772 } advanced_lookup[] = {
10773 	{ "NMI_INTERRUPT_WDG", 0x34 },
10774 	{ "SYSASSERT", 0x35 },
10775 	{ "UCODE_VERSION_MISMATCH", 0x37 },
10776 	{ "BAD_COMMAND", 0x38 },
10777 	{ "BAD_COMMAND", 0x39 },
10778 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10779 	{ "FATAL_ERROR", 0x3D },
10780 	{ "NMI_TRM_HW_ERR", 0x46 },
10781 	{ "NMI_INTERRUPT_TRM", 0x4C },
10782 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10783 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10784 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10785 	{ "NMI_INTERRUPT_HOST", 0x66 },
10786 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10787 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10788 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10789 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
10790 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
10791 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10792 	{ "ADVANCED_SYSASSERT", 0 },
10793 };
10794 
10795 const char *
10796 iwm_desc_lookup(uint32_t num)
10797 {
10798 	int i;
10799 
10800 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
10801 		if (advanced_lookup[i].num ==
10802 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
10803 			return advanced_lookup[i].name;
10804 
10805 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10806 	return advanced_lookup[i].name;
10807 }
10808 
10809 /*
10810  * Support for dumping the error log seemed like a good idea ...
10811  * but it's mostly hex junk and the only sensible thing is the
10812  * hw/ucode revision (which we know anyway).  Since it's here,
10813  * I'll just leave it in, just in case e.g. the Intel guys want to
10814  * help us decipher some "ADVANCED_SYSASSERT" later.
10815  */
10816 void
10817 iwm_nic_error(struct iwm_softc *sc)
10818 {
10819 	struct iwm_error_event_table table;
10820 	uint32_t base;
10821 
10822 	printf("%s: dumping device error log\n", DEVNAME(sc));
10823 	base = sc->sc_uc.uc_error_event_table;
10824 	if (base < 0x800000) {
10825 		printf("%s: Invalid error log pointer 0x%08x\n",
10826 		    DEVNAME(sc), base);
10827 		return;
10828 	}
10829 
10830 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10831 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10832 		return;
10833 	}
10834 
10835 	if (!table.valid) {
10836 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10837 		return;
10838 	}
10839 
10840 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10841 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10842 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10843 		    sc->sc_flags, table.valid);
10844 	}
10845 
10846 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10847 	    iwm_desc_lookup(table.error_id));
10848 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10849 	    table.trm_hw_status0);
10850 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10851 	    table.trm_hw_status1);
10852 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10853 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10854 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10855 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10856 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10857 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10858 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10859 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10860 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10861 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10862 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10863 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10864 	    table.fw_rev_type);
10865 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10866 	    table.major);
10867 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10868 	    table.minor);
10869 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10870 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10871 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10872 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10873 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10874 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10875 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10876 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10877 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10878 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10879 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10880 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10881 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10882 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10883 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10884 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10885 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10886 
10887 	if (sc->sc_uc.uc_umac_error_event_table)
10888 		iwm_nic_umac_error(sc);
10889 }
10890 
10891 void
10892 iwm_dump_driver_status(struct iwm_softc *sc)
10893 {
10894 	int i;
10895 
10896 	printf("driver status:\n");
10897 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
10898 		struct iwm_tx_ring *ring = &sc->txq[i];
10899 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10900 		    "queued=%-3d\n",
10901 		    i, ring->qid, ring->cur, ring->queued);
10902 	}
10903 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10904 	printf("  802.11 state %s\n",
10905 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10906 }
10907 
10908 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10909 do {									\
10910 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10911 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10912 	_var_ = (void *)((_pkt_)+1);					\
10913 } while (/*CONSTCOND*/0)
10914 
10915 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10916 do {									\
10917 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10918 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10919 	_ptr_ = (void *)((_pkt_)+1);					\
10920 } while (/*CONSTCOND*/0)
10921 
10922 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10923 
10924 int
10925 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10926 {
10927 	int qid, idx, code;
10928 
10929 	qid = pkt->hdr.qid & ~0x80;
10930 	idx = pkt->hdr.idx;
10931 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10932 
10933 	return (!(qid == 0 && idx == 0 && code == 0) &&
10934 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
10935 }
10936 
10937 void
10938 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10939 {
10940 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10941 	struct iwm_rx_packet *pkt, *nextpkt;
10942 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10943 	struct mbuf *m0, *m;
10944 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10945 	int qid, idx, code, handled = 1;
10946 
10947 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
10948 	    BUS_DMASYNC_POSTREAD);
10949 
10950 	m0 = data->m;
10951 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
10952 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
10953 		qid = pkt->hdr.qid;
10954 		idx = pkt->hdr.idx;
10955 
10956 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10957 
10958 		if (!iwm_rx_pkt_valid(pkt))
10959 			break;
10960 
10961 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10962 		if (len < minsz || len > (IWM_RBUF_SIZE - offset))
10963 			break;
10964 
10965 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
10966 			/* Take mbuf m0 off the RX ring. */
10967 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
10968 				ifp->if_ierrors++;
10969 				break;
10970 			}
10971 			KASSERT(data->m != m0);
10972 		}
10973 
10974 		switch (code) {
10975 		case IWM_REPLY_RX_PHY_CMD:
10976 			iwm_rx_rx_phy_cmd(sc, pkt, data);
10977 			break;
10978 
10979 		case IWM_REPLY_RX_MPDU_CMD: {
10980 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
10981 			nextoff = offset +
10982 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
10983 			nextpkt = (struct iwm_rx_packet *)
10984 			    (m0->m_data + nextoff);
10985 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
10986 			    !iwm_rx_pkt_valid(nextpkt)) {
10987 				/* No need to copy last frame in buffer. */
10988 				if (offset > 0)
10989 					m_adj(m0, offset);
10990 				if (sc->sc_mqrx_supported)
10991 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
10992 					    maxlen, ml);
10993 				else
10994 					iwm_rx_mpdu(sc, m0, pkt->data,
10995 					    maxlen, ml);
10996 				m0 = NULL; /* stack owns m0 now; abort loop */
10997 			} else {
10998 				/*
10999 				 * Create an mbuf which points to the current
11000 				 * packet. Always copy from offset zero to
11001 				 * preserve m_pkthdr.
11002 				 */
11003 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
11004 				if (m == NULL) {
11005 					ifp->if_ierrors++;
11006 					m_freem(m0);
11007 					m0 = NULL;
11008 					break;
11009 				}
11010 				m_adj(m, offset);
11011 				if (sc->sc_mqrx_supported)
11012 					iwm_rx_mpdu_mq(sc, m, pkt->data,
11013 					    maxlen, ml);
11014 				else
11015 					iwm_rx_mpdu(sc, m, pkt->data,
11016 					    maxlen, ml);
11017 			}
11018  			break;
11019 		}
11020 
11021 		case IWM_TX_CMD:
11022 			iwm_rx_tx_cmd(sc, pkt, data);
11023 			break;
11024 
11025 		case IWM_BA_NOTIF:
11026 			iwm_rx_compressed_ba(sc, pkt);
11027 			break;
11028 
11029 		case IWM_MISSED_BEACONS_NOTIFICATION:
11030 			iwm_rx_bmiss(sc, pkt, data);
11031 			break;
11032 
11033 		case IWM_MFUART_LOAD_NOTIFICATION:
11034 			break;
11035 
11036 		case IWM_ALIVE: {
11037 			struct iwm_alive_resp_v1 *resp1;
11038 			struct iwm_alive_resp_v2 *resp2;
11039 			struct iwm_alive_resp_v3 *resp3;
11040 
11041 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
11042 				SYNC_RESP_STRUCT(resp1, pkt);
11043 				sc->sc_uc.uc_error_event_table
11044 				    = le32toh(resp1->error_event_table_ptr);
11045 				sc->sc_uc.uc_log_event_table
11046 				    = le32toh(resp1->log_event_table_ptr);
11047 				sc->sched_base = le32toh(resp1->scd_base_ptr);
11048 				if (resp1->status == IWM_ALIVE_STATUS_OK)
11049 					sc->sc_uc.uc_ok = 1;
11050 				else
11051 					sc->sc_uc.uc_ok = 0;
11052 			}
11053 
11054 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
11055 				SYNC_RESP_STRUCT(resp2, pkt);
11056 				sc->sc_uc.uc_error_event_table
11057 				    = le32toh(resp2->error_event_table_ptr);
11058 				sc->sc_uc.uc_log_event_table
11059 				    = le32toh(resp2->log_event_table_ptr);
11060 				sc->sched_base = le32toh(resp2->scd_base_ptr);
11061 				sc->sc_uc.uc_umac_error_event_table
11062 				    = le32toh(resp2->error_info_addr);
11063 				if (resp2->status == IWM_ALIVE_STATUS_OK)
11064 					sc->sc_uc.uc_ok = 1;
11065 				else
11066 					sc->sc_uc.uc_ok = 0;
11067 			}
11068 
11069 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
11070 				SYNC_RESP_STRUCT(resp3, pkt);
11071 				sc->sc_uc.uc_error_event_table
11072 				    = le32toh(resp3->error_event_table_ptr);
11073 				sc->sc_uc.uc_log_event_table
11074 				    = le32toh(resp3->log_event_table_ptr);
11075 				sc->sched_base = le32toh(resp3->scd_base_ptr);
11076 				sc->sc_uc.uc_umac_error_event_table
11077 				    = le32toh(resp3->error_info_addr);
11078 				if (resp3->status == IWM_ALIVE_STATUS_OK)
11079 					sc->sc_uc.uc_ok = 1;
11080 				else
11081 					sc->sc_uc.uc_ok = 0;
11082 			}
11083 
11084 			sc->sc_uc.uc_intr = 1;
11085 			wakeup(&sc->sc_uc);
11086 			break;
11087 		}
11088 
11089 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
11090 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
11091 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
11092 			iwm_phy_db_set_section(sc, phy_db_notif);
11093 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
11094 			wakeup(&sc->sc_init_complete);
11095 			break;
11096 		}
11097 
11098 		case IWM_STATISTICS_NOTIFICATION: {
11099 			struct iwm_notif_statistics *stats;
11100 			SYNC_RESP_STRUCT(stats, pkt);
11101 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
11102 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
11103 			break;
11104 		}
11105 
11106 		case IWM_MCC_CHUB_UPDATE_CMD: {
11107 			struct iwm_mcc_chub_notif *notif;
11108 			SYNC_RESP_STRUCT(notif, pkt);
11109 			iwm_mcc_update(sc, notif);
11110 			break;
11111 		}
11112 
11113 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
11114 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11115 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
11116 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11117 				 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
11118 			break;
11119 
11120 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11121 		    IWM_CT_KILL_NOTIFICATION): {
11122 			struct iwm_ct_kill_notif *notif;
11123 			SYNC_RESP_STRUCT(notif, pkt);
11124 			printf("%s: device at critical temperature (%u degC), "
11125 			    "stopping device\n",
11126 			    DEVNAME(sc), le16toh(notif->temperature));
11127 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11128 			task_add(systq, &sc->init_task);
11129 			break;
11130 		}
11131 
11132 		case IWM_ADD_STA_KEY:
11133 		case IWM_PHY_CONFIGURATION_CMD:
11134 		case IWM_TX_ANT_CONFIGURATION_CMD:
11135 		case IWM_ADD_STA:
11136 		case IWM_MAC_CONTEXT_CMD:
11137 		case IWM_REPLY_SF_CFG_CMD:
11138 		case IWM_POWER_TABLE_CMD:
11139 		case IWM_LTR_CONFIG:
11140 		case IWM_PHY_CONTEXT_CMD:
11141 		case IWM_BINDING_CONTEXT_CMD:
11142 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
11143 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
11144 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
11145 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
11146 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
11147 		case IWM_REPLY_BEACON_FILTERING_CMD:
11148 		case IWM_MAC_PM_POWER_TABLE:
11149 		case IWM_TIME_QUOTA_CMD:
11150 		case IWM_REMOVE_STA:
11151 		case IWM_TXPATH_FLUSH:
11152 		case IWM_LQ_CMD:
11153 		case IWM_WIDE_ID(IWM_LONG_GROUP,
11154 				 IWM_FW_PAGING_BLOCK_CMD):
11155 		case IWM_BT_CONFIG:
11156 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
11157 		case IWM_NVM_ACCESS_CMD:
11158 		case IWM_MCC_UPDATE_CMD:
11159 		case IWM_TIME_EVENT_CMD: {
11160 			size_t pkt_len;
11161 
11162 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
11163 				break;
11164 
11165 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
11166 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11167 
11168 			pkt_len = sizeof(pkt->len_n_flags) +
11169 			    iwm_rx_packet_len(pkt);
11170 
11171 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
11172 			    pkt_len < sizeof(*pkt) ||
11173 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
11174 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11175 				    sc->sc_cmd_resp_len[idx]);
11176 				sc->sc_cmd_resp_pkt[idx] = NULL;
11177 				break;
11178 			}
11179 
11180 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
11181 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11182 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11183 			break;
11184 		}
11185 
11186 		/* ignore */
11187 		case IWM_PHY_DB_CMD:
11188 			break;
11189 
11190 		case IWM_INIT_COMPLETE_NOTIF:
11191 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
11192 			wakeup(&sc->sc_init_complete);
11193 			break;
11194 
11195 		case IWM_SCAN_OFFLOAD_COMPLETE: {
11196 			struct iwm_periodic_scan_complete *notif;
11197 			SYNC_RESP_STRUCT(notif, pkt);
11198 			break;
11199 		}
11200 
11201 		case IWM_SCAN_ITERATION_COMPLETE: {
11202 			struct iwm_lmac_scan_complete_notif *notif;
11203 			SYNC_RESP_STRUCT(notif, pkt);
11204 			iwm_endscan(sc);
11205 			break;
11206 		}
11207 
11208 		case IWM_SCAN_COMPLETE_UMAC: {
11209 			struct iwm_umac_scan_complete *notif;
11210 			SYNC_RESP_STRUCT(notif, pkt);
11211 			iwm_endscan(sc);
11212 			break;
11213 		}
11214 
11215 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
11216 			struct iwm_umac_scan_iter_complete_notif *notif;
11217 			SYNC_RESP_STRUCT(notif, pkt);
11218 			iwm_endscan(sc);
11219 			break;
11220 		}
11221 
11222 		case IWM_REPLY_ERROR: {
11223 			struct iwm_error_resp *resp;
11224 			SYNC_RESP_STRUCT(resp, pkt);
11225 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
11226 				DEVNAME(sc), le32toh(resp->error_type),
11227 				resp->cmd_id);
11228 			break;
11229 		}
11230 
11231 		case IWM_TIME_EVENT_NOTIFICATION: {
11232 			struct iwm_time_event_notif *notif;
11233 			uint32_t action;
11234 			SYNC_RESP_STRUCT(notif, pkt);
11235 
11236 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
11237 				break;
11238 			action = le32toh(notif->action);
11239 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
11240 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
11241 			break;
11242 		}
11243 
11244 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
11245 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
11246 		    break;
11247 
11248 		/*
11249 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
11250 		 * messages. Just ignore them for now.
11251 		 */
11252 		case IWM_DEBUG_LOG_MSG:
11253 			break;
11254 
11255 		case IWM_MCAST_FILTER_CMD:
11256 			break;
11257 
11258 		case IWM_SCD_QUEUE_CFG: {
11259 			struct iwm_scd_txq_cfg_rsp *rsp;
11260 			SYNC_RESP_STRUCT(rsp, pkt);
11261 
11262 			break;
11263 		}
11264 
11265 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
11266 			break;
11267 
11268 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
11269 			break;
11270 
11271 		default:
11272 			handled = 0;
11273 			printf("%s: unhandled firmware response 0x%x/0x%x "
11274 			    "rx ring %d[%d]\n",
11275 			    DEVNAME(sc), code, pkt->len_n_flags,
11276 			    (qid & ~0x80), idx);
11277 			break;
11278 		}
11279 
11280 		/*
11281 		 * uCode sets bit 0x80 when it originates the notification,
11282 		 * i.e. when the notification is not a direct response to a
11283 		 * command sent by the driver.
11284 		 * For example, uCode issues IWM_REPLY_RX when it sends a
11285 		 * received frame to the driver.
11286 		 */
11287 		if (handled && !(qid & (1 << 7))) {
11288 			iwm_cmd_done(sc, qid, idx, code);
11289 		}
11290 
11291 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11292 	}
11293 
11294 	if (m0 && m0 != data->m)
11295 		m_freem(m0);
11296 }
11297 
11298 void
11299 iwm_notif_intr(struct iwm_softc *sc)
11300 {
11301 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
11302 	uint32_t wreg;
11303 	uint16_t hw;
11304 	int count;
11305 
11306 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
11307 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
11308 
11309 	if (sc->sc_mqrx_supported) {
11310 		count = IWM_RX_MQ_RING_COUNT;
11311 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
11312 	} else {
11313 		count = IWM_RX_RING_COUNT;
11314 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
11315 	}
11316 
11317 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
11318 	hw &= (count - 1);
11319 	while (sc->rxq.cur != hw) {
11320 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11321 		iwm_rx_pkt(sc, data, &ml);
11322 		ADVANCE_RXQ(sc);
11323 	}
11324 	if_input(&sc->sc_ic.ic_if, &ml);
11325 
11326 	/*
11327 	 * Tell the firmware what we have processed.
11328 	 * Seems like the hardware gets upset unless we align the write by 8??
11329 	 */
11330 	hw = (hw == 0) ? count - 1 : hw - 1;
11331 	IWM_WRITE(sc, wreg, hw & ~7);
11332 }
11333 
11334 int
11335 iwm_intr(void *arg)
11336 {
11337 	struct iwm_softc *sc = arg;
11338 	struct ieee80211com *ic = &sc->sc_ic;
11339 	struct ifnet *ifp = IC2IFP(ic);
11340 	int handled = 0;
11341 	int rv = 0;
11342 	uint32_t r1, r2;
11343 
11344 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
11345 
11346 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
11347 		uint32_t *ict = sc->ict_dma.vaddr;
11348 		int tmp;
11349 
11350 		tmp = htole32(ict[sc->ict_cur]);
11351 		if (!tmp)
11352 			goto out_ena;
11353 
11354 		/*
11355 		 * ok, there was something.  keep plowing until we have all.
11356 		 */
11357 		r1 = r2 = 0;
11358 		while (tmp) {
11359 			r1 |= tmp;
11360 			ict[sc->ict_cur] = 0;
11361 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
11362 			tmp = htole32(ict[sc->ict_cur]);
11363 		}
11364 
11365 		/* this is where the fun begins.  don't ask */
11366 		if (r1 == 0xffffffff)
11367 			r1 = 0;
11368 
11369 		/*
11370 		 * Workaround for hardware bug where bits are falsely cleared
11371 		 * when using interrupt coalescing.  Bit 15 should be set if
11372 		 * bits 18 and 19 are set.
11373 		 */
11374 		if (r1 & 0xc0000)
11375 			r1 |= 0x8000;
11376 
11377 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11378 	} else {
11379 		r1 = IWM_READ(sc, IWM_CSR_INT);
11380 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
11381 	}
11382 	if (r1 == 0 && r2 == 0) {
11383 		goto out_ena;
11384 	}
11385 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11386 		goto out;
11387 
11388 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
11389 
11390 	/* ignored */
11391 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
11392 
11393 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
11394 		handled |= IWM_CSR_INT_BIT_RF_KILL;
11395 		iwm_check_rfkill(sc);
11396 		task_add(systq, &sc->init_task);
11397 		rv = 1;
11398 		goto out_ena;
11399 	}
11400 
11401 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
11402 		if (ifp->if_flags & IFF_DEBUG) {
11403 			iwm_nic_error(sc);
11404 			iwm_dump_driver_status(sc);
11405 		}
11406 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11407 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11408 			task_add(systq, &sc->init_task);
11409 		rv = 1;
11410 		goto out;
11411 
11412 	}
11413 
11414 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
11415 		handled |= IWM_CSR_INT_BIT_HW_ERR;
11416 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11417 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11418 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11419 			task_add(systq, &sc->init_task);
11420 		}
11421 		rv = 1;
11422 		goto out;
11423 	}
11424 
11425 	/* firmware chunk loaded */
11426 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
11427 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
11428 		handled |= IWM_CSR_INT_BIT_FH_TX;
11429 
11430 		sc->sc_fw_chunk_done = 1;
11431 		wakeup(&sc->sc_fw);
11432 	}
11433 
11434 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
11435 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
11436 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
11437 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
11438 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
11439 		}
11440 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
11441 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
11442 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
11443 		}
11444 
11445 		/* Disable periodic interrupt; we use it as just a one-shot. */
11446 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
11447 
11448 		/*
11449 		 * Enable periodic interrupt in 8 msec only if we received
11450 		 * real RX interrupt (instead of just periodic int), to catch
11451 		 * any dangling Rx interrupt.  If it was just the periodic
11452 		 * interrupt, there was no dangling Rx activity, and no need
11453 		 * to extend the periodic interrupt; one-shot is enough.
11454 		 */
11455 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
11456 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
11457 			    IWM_CSR_INT_PERIODIC_ENA);
11458 
11459 		iwm_notif_intr(sc);
11460 	}
11461 
11462 	rv = 1;
11463 
11464  out_ena:
11465 	iwm_restore_interrupts(sc);
11466  out:
11467 	return rv;
11468 }
11469 
11470 int
11471 iwm_intr_msix(void *arg)
11472 {
11473 	struct iwm_softc *sc = arg;
11474 	struct ieee80211com *ic = &sc->sc_ic;
11475 	struct ifnet *ifp = IC2IFP(ic);
11476 	uint32_t inta_fh, inta_hw;
11477 	int vector = 0;
11478 
11479 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11480 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11481 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11482 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11483 	inta_fh &= sc->sc_fh_mask;
11484 	inta_hw &= sc->sc_hw_mask;
11485 
11486 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11487 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11488 		iwm_notif_intr(sc);
11489 	}
11490 
11491 	/* firmware chunk loaded */
11492 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11493 		sc->sc_fw_chunk_done = 1;
11494 		wakeup(&sc->sc_fw);
11495 	}
11496 
11497 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11498 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11499 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11500 		if (ifp->if_flags & IFF_DEBUG) {
11501 			iwm_nic_error(sc);
11502 			iwm_dump_driver_status(sc);
11503 		}
11504 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11505 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11506 			task_add(systq, &sc->init_task);
11507 		return 1;
11508 	}
11509 
11510 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11511 		iwm_check_rfkill(sc);
11512 		task_add(systq, &sc->init_task);
11513 	}
11514 
11515 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11516 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11517 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11518 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11519 			task_add(systq, &sc->init_task);
11520 		}
11521 		return 1;
11522 	}
11523 
11524 	/*
11525 	 * Before sending the interrupt the HW disables it to prevent
11526 	 * a nested interrupt. This is done by writing 1 to the corresponding
11527 	 * bit in the mask register. After handling the interrupt, it should be
11528 	 * re-enabled by clearing this bit. This register is defined as
11529 	 * write 1 clear (W1C) register, meaning that it's being clear
11530 	 * by writing 1 to the bit.
11531 	 */
11532 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11533 	return 1;
11534 }
11535 
11536 typedef void *iwm_match_t;
11537 
11538 static const struct pci_matchid iwm_devices[] = {
11539 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
11540 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
11541 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
11542 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
11543 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
11544 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
11545 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
11546 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
11547 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
11548 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
11549 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
11550 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
11551 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
11552 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
11553 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
11554 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_3 },
11555 };
11556 
11557 int
11558 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
11559 {
11560 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11561 	    nitems(iwm_devices));
11562 }
11563 
11564 int
11565 iwm_preinit(struct iwm_softc *sc)
11566 {
11567 	struct ieee80211com *ic = &sc->sc_ic;
11568 	struct ifnet *ifp = IC2IFP(ic);
11569 	int err;
11570 
11571 	err = iwm_prepare_card_hw(sc);
11572 	if (err) {
11573 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11574 		return err;
11575 	}
11576 
11577 	if (sc->attached) {
11578 		/* Update MAC in case the upper layers changed it. */
11579 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11580 		    ((struct arpcom *)ifp)->ac_enaddr);
11581 		return 0;
11582 	}
11583 
11584 	err = iwm_start_hw(sc);
11585 	if (err) {
11586 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11587 		return err;
11588 	}
11589 
11590 	err = iwm_run_init_mvm_ucode(sc, 1);
11591 	iwm_stop_device(sc);
11592 	if (err)
11593 		return err;
11594 
11595 	/* Print version info and MAC address on first successful fw load. */
11596 	sc->attached = 1;
11597 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11598 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11599 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11600 
11601 	if (sc->sc_nvm.sku_cap_11n_enable)
11602 		iwm_setup_ht_rates(sc);
11603 
11604 	/* not all hardware can do 5GHz band */
11605 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11606 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11607 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11608 
11609 	/* Configure channel information obtained from firmware. */
11610 	ieee80211_channel_init(ifp);
11611 
11612 	/* Configure MAC address. */
11613 	err = if_setlladdr(ifp, ic->ic_myaddr);
11614 	if (err)
11615 		printf("%s: could not set MAC address (error %d)\n",
11616 		    DEVNAME(sc), err);
11617 
11618 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11619 
11620 	return 0;
11621 }
11622 
11623 void
11624 iwm_attach_hook(struct device *self)
11625 {
11626 	struct iwm_softc *sc = (void *)self;
11627 
11628 	KASSERT(!cold);
11629 
11630 	iwm_preinit(sc);
11631 }
11632 
11633 void
11634 iwm_attach(struct device *parent, struct device *self, void *aux)
11635 {
11636 	struct iwm_softc *sc = (void *)self;
11637 	struct pci_attach_args *pa = aux;
11638 	pci_intr_handle_t ih;
11639 	pcireg_t reg, memtype;
11640 	struct ieee80211com *ic = &sc->sc_ic;
11641 	struct ifnet *ifp = &ic->ic_if;
11642 	const char *intrstr;
11643 	int err;
11644 	int txq_i, i, j;
11645 
11646 	sc->sc_pct = pa->pa_pc;
11647 	sc->sc_pcitag = pa->pa_tag;
11648 	sc->sc_dmat = pa->pa_dmat;
11649 
11650 	rw_init(&sc->ioctl_rwl, "iwmioctl");
11651 
11652 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11653 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11654 	if (err == 0) {
11655 		printf("%s: PCIe capability structure not found!\n",
11656 		    DEVNAME(sc));
11657 		return;
11658 	}
11659 
11660 	/*
11661 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11662 	 * PCI Tx retries from interfering with C3 CPU state.
11663 	 */
11664 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11665 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11666 
11667 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11668 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11669 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11670 	if (err) {
11671 		printf("%s: can't map mem space\n", DEVNAME(sc));
11672 		return;
11673 	}
11674 
11675 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11676 		sc->sc_msix = 1;
11677 	} else if (pci_intr_map_msi(pa, &ih)) {
11678 		if (pci_intr_map(pa, &ih)) {
11679 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11680 			return;
11681 		}
11682 		/* Hardware bug workaround. */
11683 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11684 		    PCI_COMMAND_STATUS_REG);
11685 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11686 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11687 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11688 		    PCI_COMMAND_STATUS_REG, reg);
11689 	}
11690 
11691 	intrstr = pci_intr_string(sc->sc_pct, ih);
11692 	if (sc->sc_msix)
11693 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11694 		    iwm_intr_msix, sc, DEVNAME(sc));
11695 	else
11696 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11697 		    iwm_intr, sc, DEVNAME(sc));
11698 
11699 	if (sc->sc_ih == NULL) {
11700 		printf("\n");
11701 		printf("%s: can't establish interrupt", DEVNAME(sc));
11702 		if (intrstr != NULL)
11703 			printf(" at %s", intrstr);
11704 		printf("\n");
11705 		return;
11706 	}
11707 	printf(", %s\n", intrstr);
11708 
11709 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11710 	switch (PCI_PRODUCT(pa->pa_id)) {
11711 	case PCI_PRODUCT_INTEL_WL_3160_1:
11712 	case PCI_PRODUCT_INTEL_WL_3160_2:
11713 		sc->sc_fwname = "iwm-3160-17";
11714 		sc->host_interrupt_operation_mode = 1;
11715 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11716 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11717 		sc->sc_nvm_max_section_size = 16384;
11718 		sc->nvm_type = IWM_NVM;
11719 		break;
11720 	case PCI_PRODUCT_INTEL_WL_3165_1:
11721 	case PCI_PRODUCT_INTEL_WL_3165_2:
11722 		sc->sc_fwname = "iwm-7265D-29";
11723 		sc->host_interrupt_operation_mode = 0;
11724 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11725 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11726 		sc->sc_nvm_max_section_size = 16384;
11727 		sc->nvm_type = IWM_NVM;
11728 		break;
11729 	case PCI_PRODUCT_INTEL_WL_3168_1:
11730 		sc->sc_fwname = "iwm-3168-29";
11731 		sc->host_interrupt_operation_mode = 0;
11732 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11733 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11734 		sc->sc_nvm_max_section_size = 16384;
11735 		sc->nvm_type = IWM_NVM_SDP;
11736 		break;
11737 	case PCI_PRODUCT_INTEL_WL_7260_1:
11738 	case PCI_PRODUCT_INTEL_WL_7260_2:
11739 		sc->sc_fwname = "iwm-7260-17";
11740 		sc->host_interrupt_operation_mode = 1;
11741 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11742 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11743 		sc->sc_nvm_max_section_size = 16384;
11744 		sc->nvm_type = IWM_NVM;
11745 		break;
11746 	case PCI_PRODUCT_INTEL_WL_7265_1:
11747 	case PCI_PRODUCT_INTEL_WL_7265_2:
11748 		sc->sc_fwname = "iwm-7265-17";
11749 		sc->host_interrupt_operation_mode = 0;
11750 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11751 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11752 		sc->sc_nvm_max_section_size = 16384;
11753 		sc->nvm_type = IWM_NVM;
11754 		break;
11755 	case PCI_PRODUCT_INTEL_WL_8260_1:
11756 	case PCI_PRODUCT_INTEL_WL_8260_2:
11757 		sc->sc_fwname = "iwm-8000C-36";
11758 		sc->host_interrupt_operation_mode = 0;
11759 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11760 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11761 		sc->sc_nvm_max_section_size = 32768;
11762 		sc->nvm_type = IWM_NVM_EXT;
11763 		break;
11764 	case PCI_PRODUCT_INTEL_WL_8265_1:
11765 		sc->sc_fwname = "iwm-8265-36";
11766 		sc->host_interrupt_operation_mode = 0;
11767 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11768 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11769 		sc->sc_nvm_max_section_size = 32768;
11770 		sc->nvm_type = IWM_NVM_EXT;
11771 		break;
11772 	case PCI_PRODUCT_INTEL_WL_9260_1:
11773 		sc->sc_fwname = "iwm-9260-46";
11774 		sc->host_interrupt_operation_mode = 0;
11775 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11776 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11777 		sc->sc_nvm_max_section_size = 32768;
11778 		sc->sc_mqrx_supported = 1;
11779 		break;
11780 	case PCI_PRODUCT_INTEL_WL_9560_1:
11781 	case PCI_PRODUCT_INTEL_WL_9560_2:
11782 	case PCI_PRODUCT_INTEL_WL_9560_3:
11783 		sc->sc_fwname = "iwm-9000-46";
11784 		sc->host_interrupt_operation_mode = 0;
11785 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11786 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11787 		sc->sc_nvm_max_section_size = 32768;
11788 		sc->sc_mqrx_supported = 1;
11789 		sc->sc_integrated = 1;
11790 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_WL_9560_3) {
11791 			sc->sc_xtal_latency = 670;
11792 			sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK;
11793 		} else
11794 			sc->sc_xtal_latency = 650;
11795 		break;
11796 	default:
11797 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11798 		return;
11799 	}
11800 
11801 	/*
11802 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11803 	 * changed, and now the revision step also includes bit 0-1 (no more
11804 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11805 	 * in the old format.
11806 	 */
11807 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11808 		uint32_t hw_step;
11809 
11810 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11811 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11812 
11813 		if (iwm_prepare_card_hw(sc) != 0) {
11814 			printf("%s: could not initialize hardware\n",
11815 			    DEVNAME(sc));
11816 			return;
11817 		}
11818 
11819 		/*
11820 		 * In order to recognize C step the driver should read the
11821 		 * chip version id located at the AUX bus MISC address.
11822 		 */
11823 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11824 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
11825 		DELAY(2);
11826 
11827 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11828 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11829 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11830 				   25000);
11831 		if (!err) {
11832 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11833 			return;
11834 		}
11835 
11836 		if (iwm_nic_lock(sc)) {
11837 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11838 			hw_step |= IWM_ENABLE_WFPM;
11839 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11840 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11841 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
11842 			if (hw_step == 0x3)
11843 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11844 						(IWM_SILICON_C_STEP << 2);
11845 			iwm_nic_unlock(sc);
11846 		} else {
11847 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11848 			return;
11849 		}
11850 	}
11851 
11852 	/*
11853 	 * Allocate DMA memory for firmware transfers.
11854 	 * Must be aligned on a 16-byte boundary.
11855 	 */
11856 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11857 	    sc->sc_fwdmasegsz, 16);
11858 	if (err) {
11859 		printf("%s: could not allocate memory for firmware\n",
11860 		    DEVNAME(sc));
11861 		return;
11862 	}
11863 
11864 	/* Allocate "Keep Warm" page, used internally by the card. */
11865 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11866 	if (err) {
11867 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11868 		goto fail1;
11869 	}
11870 
11871 	/* Allocate interrupt cause table (ICT).*/
11872 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11873 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
11874 	if (err) {
11875 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11876 		goto fail2;
11877 	}
11878 
11879 	/* TX scheduler rings must be aligned on a 1KB boundary. */
11880 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11881 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11882 	if (err) {
11883 		printf("%s: could not allocate TX scheduler rings\n",
11884 		    DEVNAME(sc));
11885 		goto fail3;
11886 	}
11887 
11888 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11889 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11890 		if (err) {
11891 			printf("%s: could not allocate TX ring %d\n",
11892 			    DEVNAME(sc), txq_i);
11893 			goto fail4;
11894 		}
11895 	}
11896 
11897 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
11898 	if (err) {
11899 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11900 		goto fail4;
11901 	}
11902 
11903 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
11904 	if (sc->sc_nswq == NULL)
11905 		goto fail4;
11906 
11907 	/* Clear pending interrupts. */
11908 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
11909 
11910 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11911 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11912 	ic->ic_state = IEEE80211_S_INIT;
11913 
11914 	/* Set device capabilities. */
11915 	ic->ic_caps =
11916 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11917 	    IEEE80211_C_WEP |		/* WEP */
11918 	    IEEE80211_C_RSN |		/* WPA/RSN */
11919 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11920 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11921 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11922 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11923 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11924 
11925 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11926 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11927 	ic->ic_htcaps |=
11928 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11929 	ic->ic_htxcaps = 0;
11930 	ic->ic_txbfcaps = 0;
11931 	ic->ic_aselcaps = 0;
11932 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11933 
11934 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11935 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11936 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11937 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11938 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11939 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11940 
11941 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11942 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11943 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11944 
11945 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11946 		sc->sc_phyctxt[i].id = i;
11947 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11948 		sc->sc_phyctxt[i].vht_chan_width =
11949 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11950 	}
11951 
11952 	sc->sc_amrr.amrr_min_success_threshold =  1;
11953 	sc->sc_amrr.amrr_max_success_threshold = 15;
11954 
11955 	/* IBSS channel undefined for now. */
11956 	ic->ic_ibss_chan = &ic->ic_channels[1];
11957 
11958 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
11959 
11960 	ifp->if_softc = sc;
11961 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11962 	ifp->if_ioctl = iwm_ioctl;
11963 	ifp->if_start = iwm_start;
11964 	ifp->if_watchdog = iwm_watchdog;
11965 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11966 
11967 	if_attach(ifp);
11968 	ieee80211_ifattach(ifp);
11969 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11970 
11971 #if NBPFILTER > 0
11972 	iwm_radiotap_attach(sc);
11973 #endif
11974 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
11975 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
11976 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11977 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
11978 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
11979 		rxba->sc = sc;
11980 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
11981 		    rxba);
11982 		timeout_set(&rxba->reorder_buf.reorder_timer,
11983 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
11984 		for (j = 0; j < nitems(rxba->entries); j++)
11985 			ml_init(&rxba->entries[j].frames);
11986 	}
11987 	task_set(&sc->init_task, iwm_init_task, sc);
11988 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
11989 	task_set(&sc->ba_task, iwm_ba_task, sc);
11990 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
11991 	task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
11992 	task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
11993 
11994 	ic->ic_node_alloc = iwm_node_alloc;
11995 	ic->ic_bgscan_start = iwm_bgscan;
11996 	ic->ic_bgscan_done = iwm_bgscan_done;
11997 	ic->ic_set_key = iwm_set_key;
11998 	ic->ic_delete_key = iwm_delete_key;
11999 
12000 	/* Override 802.11 state transition machine. */
12001 	sc->sc_newstate = ic->ic_newstate;
12002 	ic->ic_newstate = iwm_newstate;
12003 	ic->ic_updateprot = iwm_updateprot;
12004 	ic->ic_updateslot = iwm_updateslot;
12005 	ic->ic_updateedca = iwm_updateedca;
12006 	ic->ic_updatedtim = iwm_updatedtim;
12007 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
12008 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
12009 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
12010 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
12011 	/*
12012 	 * We cannot read the MAC address without loading the
12013 	 * firmware from disk. Postpone until mountroot is done.
12014 	 */
12015 	config_mountroot(self, iwm_attach_hook);
12016 
12017 	return;
12018 
12019 fail4:	while (--txq_i >= 0)
12020 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12021 	iwm_free_rx_ring(sc, &sc->rxq);
12022 	iwm_dma_contig_free(&sc->sched_dma);
12023 fail3:	if (sc->ict_dma.vaddr != NULL)
12024 		iwm_dma_contig_free(&sc->ict_dma);
12025 
12026 fail2:	iwm_dma_contig_free(&sc->kw_dma);
12027 fail1:	iwm_dma_contig_free(&sc->fw_dma);
12028 	return;
12029 }
12030 
12031 #if NBPFILTER > 0
12032 void
12033 iwm_radiotap_attach(struct iwm_softc *sc)
12034 {
12035 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
12036 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
12037 
12038 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12039 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
12040 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
12041 
12042 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
12043 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
12044 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
12045 }
12046 #endif
12047 
12048 void
12049 iwm_init_task(void *arg1)
12050 {
12051 	struct iwm_softc *sc = arg1;
12052 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12053 	int s = splnet();
12054 	int generation = sc->sc_generation;
12055 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
12056 
12057 	rw_enter_write(&sc->ioctl_rwl);
12058 	if (generation != sc->sc_generation) {
12059 		rw_exit(&sc->ioctl_rwl);
12060 		splx(s);
12061 		return;
12062 	}
12063 
12064 	if (ifp->if_flags & IFF_RUNNING)
12065 		iwm_stop(ifp);
12066 	else
12067 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
12068 
12069 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
12070 		iwm_init(ifp);
12071 
12072 	rw_exit(&sc->ioctl_rwl);
12073 	splx(s);
12074 }
12075 
12076 void
12077 iwm_resume(struct iwm_softc *sc)
12078 {
12079 	pcireg_t reg;
12080 
12081 	/*
12082 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
12083 	 * PCI Tx retries from interfering with C3 CPU state.
12084 	 */
12085 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12086 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12087 
12088 	if (!sc->sc_msix) {
12089 		/* Hardware bug workaround. */
12090 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12091 		    PCI_COMMAND_STATUS_REG);
12092 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
12093 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
12094 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12095 		    PCI_COMMAND_STATUS_REG, reg);
12096 	}
12097 
12098 	iwm_disable_interrupts(sc);
12099 }
12100 
12101 int
12102 iwm_wakeup(struct iwm_softc *sc)
12103 {
12104 	struct ieee80211com *ic = &sc->sc_ic;
12105 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12106 	int err;
12107 
12108 	err = iwm_start_hw(sc);
12109 	if (err)
12110 		return err;
12111 
12112 	err = iwm_init_hw(sc);
12113 	if (err)
12114 		return err;
12115 
12116 	refcnt_init(&sc->task_refs);
12117 	ifq_clr_oactive(&ifp->if_snd);
12118 	ifp->if_flags |= IFF_RUNNING;
12119 
12120 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
12121 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
12122 	else
12123 		ieee80211_begin_scan(ifp);
12124 
12125 	return 0;
12126 }
12127 
12128 int
12129 iwm_activate(struct device *self, int act)
12130 {
12131 	struct iwm_softc *sc = (struct iwm_softc *)self;
12132 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12133 	int err = 0;
12134 
12135 	switch (act) {
12136 	case DVACT_QUIESCE:
12137 		if (ifp->if_flags & IFF_RUNNING) {
12138 			rw_enter_write(&sc->ioctl_rwl);
12139 			iwm_stop(ifp);
12140 			rw_exit(&sc->ioctl_rwl);
12141 		}
12142 		break;
12143 	case DVACT_RESUME:
12144 		iwm_resume(sc);
12145 		break;
12146 	case DVACT_WAKEUP:
12147 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
12148 			err = iwm_wakeup(sc);
12149 			if (err)
12150 				printf("%s: could not initialize hardware\n",
12151 				    DEVNAME(sc));
12152 		}
12153 		break;
12154 	}
12155 
12156 	return 0;
12157 }
12158 
12159 struct cfdriver iwm_cd = {
12160 	NULL, "iwm", DV_IFNET
12161 };
12162 
12163 const struct cfattach iwm_ca = {
12164 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
12165 	NULL, iwm_activate
12166 };
12167