xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*	$OpenBSD: if_iwm.c,v 1.407 2023/04/14 12:45:10 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_ra.h>
146 #include <net80211/ieee80211_ra_vht.h>
147 #include <net80211/ieee80211_radiotap.h>
148 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
149 #undef DPRINTF /* defined in ieee80211_priv.h */
150 
151 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
152 
153 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
154 
155 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
156 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
157 
158 #ifdef IWM_DEBUG
159 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
160 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
161 int iwm_debug = 1;
162 #else
163 #define DPRINTF(x)	do { ; } while (0)
164 #define DPRINTFN(n, x)	do { ; } while (0)
165 #endif
166 
167 #include <dev/pci/if_iwmreg.h>
168 #include <dev/pci/if_iwmvar.h>
169 
170 const uint8_t iwm_nvm_channels[] = {
171 	/* 2.4 GHz */
172 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
173 	/* 5 GHz */
174 	36, 40, 44 , 48, 52, 56, 60, 64,
175 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
176 	149, 153, 157, 161, 165
177 };
178 
179 const uint8_t iwm_nvm_channels_8000[] = {
180 	/* 2.4 GHz */
181 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182 	/* 5 GHz */
183 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185 	149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 
188 #define IWM_NUM_2GHZ_CHANNELS	14
189 
190 const struct iwm_rate {
191 	uint16_t rate;
192 	uint8_t plcp;
193 	uint8_t ht_plcp;
194 } iwm_rates[] = {
195 		/* Legacy */		/* HT */
196 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
200 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
201 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
202 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
203 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
204 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
205 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
206 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
207 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
208 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
209 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
210 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
211 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
212 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
213 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
214 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
215 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
216 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
217 };
218 #define IWM_RIDX_CCK	0
219 #define IWM_RIDX_OFDM	4
220 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
221 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
222 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
223 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
224 
225 /* Convert an MCS index into an iwm_rates[] index. */
226 const int iwm_ht_mcs2ridx[] = {
227 	IWM_RATE_MCS_0_INDEX,
228 	IWM_RATE_MCS_1_INDEX,
229 	IWM_RATE_MCS_2_INDEX,
230 	IWM_RATE_MCS_3_INDEX,
231 	IWM_RATE_MCS_4_INDEX,
232 	IWM_RATE_MCS_5_INDEX,
233 	IWM_RATE_MCS_6_INDEX,
234 	IWM_RATE_MCS_7_INDEX,
235 	IWM_RATE_MCS_8_INDEX,
236 	IWM_RATE_MCS_9_INDEX,
237 	IWM_RATE_MCS_10_INDEX,
238 	IWM_RATE_MCS_11_INDEX,
239 	IWM_RATE_MCS_12_INDEX,
240 	IWM_RATE_MCS_13_INDEX,
241 	IWM_RATE_MCS_14_INDEX,
242 	IWM_RATE_MCS_15_INDEX,
243 };
244 
245 struct iwm_nvm_section {
246 	uint16_t length;
247 	uint8_t *data;
248 };
249 
250 int	iwm_is_mimo_ht_plcp(uint8_t);
251 int	iwm_is_mimo_ht_mcs(int);
252 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
253 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
254 	    uint8_t *, size_t);
255 int	iwm_set_default_calib(struct iwm_softc *, const void *);
256 void	iwm_fw_info_free(struct iwm_fw_info *);
257 void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
258 int	iwm_read_firmware(struct iwm_softc *);
259 uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
260 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
261 void	iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
262 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
263 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
264 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
265 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
266 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
267 int	iwm_nic_lock(struct iwm_softc *);
268 void	iwm_nic_assert_locked(struct iwm_softc *);
269 void	iwm_nic_unlock(struct iwm_softc *);
270 int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
271 	    uint32_t);
272 int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
273 int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
274 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
275 	    bus_size_t);
276 void	iwm_dma_contig_free(struct iwm_dma_info *);
277 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
278 void	iwm_disable_rx_dma(struct iwm_softc *);
279 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
281 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
282 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
283 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
284 void	iwm_enable_rfkill_int(struct iwm_softc *);
285 int	iwm_check_rfkill(struct iwm_softc *);
286 void	iwm_enable_interrupts(struct iwm_softc *);
287 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
288 void	iwm_restore_interrupts(struct iwm_softc *);
289 void	iwm_disable_interrupts(struct iwm_softc *);
290 void	iwm_ict_reset(struct iwm_softc *);
291 int	iwm_set_hw_ready(struct iwm_softc *);
292 int	iwm_prepare_card_hw(struct iwm_softc *);
293 void	iwm_apm_config(struct iwm_softc *);
294 int	iwm_apm_init(struct iwm_softc *);
295 void	iwm_apm_stop(struct iwm_softc *);
296 int	iwm_allow_mcast(struct iwm_softc *);
297 void	iwm_init_msix_hw(struct iwm_softc *);
298 void	iwm_conf_msix_hw(struct iwm_softc *, int);
299 int	iwm_clear_persistence_bit(struct iwm_softc *);
300 int	iwm_start_hw(struct iwm_softc *);
301 void	iwm_stop_device(struct iwm_softc *);
302 void	iwm_nic_config(struct iwm_softc *);
303 int	iwm_nic_rx_init(struct iwm_softc *);
304 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
305 int	iwm_nic_rx_mq_init(struct iwm_softc *);
306 int	iwm_nic_tx_init(struct iwm_softc *);
307 int	iwm_nic_init(struct iwm_softc *);
308 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
309 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
310 	    uint16_t);
311 int	iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
312 int	iwm_post_alive(struct iwm_softc *);
313 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
314 	    uint16_t);
315 int	iwm_phy_db_set_section(struct iwm_softc *,
316 	    struct iwm_calib_res_notif_phy_db *);
317 int	iwm_is_valid_channel(uint16_t);
318 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
319 uint16_t iwm_channel_id_to_papd(uint16_t);
320 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
321 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
322 	    uint16_t *, uint16_t);
323 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
324 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
325 	    uint8_t);
326 int	iwm_send_phy_db_data(struct iwm_softc *);
327 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
328 	    uint32_t);
329 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
330 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
331 	    uint8_t *, uint16_t *);
332 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333 	    uint16_t *, size_t);
334 uint8_t	iwm_fw_valid_tx_ant(struct iwm_softc *);
335 uint8_t	iwm_fw_valid_rx_ant(struct iwm_softc *);
336 int	iwm_valid_siso_ant_rate_mask(struct iwm_softc *);
337 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
338 	    const uint8_t *nvm_channels, int nchan);
339 int	iwm_mimo_enabled(struct iwm_softc *);
340 void	iwm_setup_ht_rates(struct iwm_softc *);
341 void	iwm_setup_vht_rates(struct iwm_softc *);
342 void	iwm_mac_ctxt_task(void *);
343 void	iwm_phy_ctxt_task(void *);
344 void	iwm_updateprot(struct ieee80211com *);
345 void	iwm_updateslot(struct ieee80211com *);
346 void	iwm_updateedca(struct ieee80211com *);
347 void	iwm_updatechan(struct ieee80211com *);
348 void	iwm_updatedtim(struct ieee80211com *);
349 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
350 	    uint16_t);
351 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
352 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
353 	    uint8_t);
354 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
355 	    uint8_t);
356 void	iwm_rx_ba_session_expired(void *);
357 void	iwm_reorder_timer_expired(void *);
358 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
359 	    uint16_t, uint16_t, int, int);
360 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
361 	    uint8_t);
362 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
363 	    uint8_t);
364 void	iwm_ba_task(void *);
365 
366 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
367 	    const uint16_t *, const uint16_t *,
368 	    const uint16_t *, const uint16_t *,
369 	    const uint16_t *, int);
370 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
371 	    const uint16_t *, const uint16_t *);
372 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
373 int	iwm_nvm_init(struct iwm_softc *);
374 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
375 	    uint32_t);
376 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
377 	    uint32_t);
378 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
379 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
380 	    int , int *);
381 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
382 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
383 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
384 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
385 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
386 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
387 int	iwm_send_dqa_cmd(struct iwm_softc *);
388 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
389 int	iwm_config_ltr(struct iwm_softc *);
390 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
391 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
392 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
393 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
394 	    struct iwm_rx_data *);
395 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
396 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
397 	    struct ieee80211_rxinfo *);
398 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
399 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
400 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
401 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
402 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
403 	    int, uint8_t, int);
404 void	iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
405 	    int, int, uint8_t, int);
406 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
407 	    struct iwm_node *, int, int);
408 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
409 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
410 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
411 	    struct iwm_rx_data *);
412 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
413 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
414 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
415 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
416 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
417 	    struct iwm_rx_data *);
418 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
419 uint8_t	iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
420 int	iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
421 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
422 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
423 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
424 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
425 	    struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t);
426 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
427 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
428 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
429 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
430 	    const void *);
431 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
432 	    uint32_t *);
433 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
434 	    const void *, uint32_t *);
435 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
436 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
437 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
438 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
439 uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
440 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
441 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
442 int	iwm_flush_tx_path(struct iwm_softc *, int);
443 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
444 void	iwm_led_enable(struct iwm_softc *);
445 void	iwm_led_disable(struct iwm_softc *);
446 int	iwm_led_is_enabled(struct iwm_softc *);
447 void	iwm_led_blink_timeout(void *);
448 void	iwm_led_blink_start(struct iwm_softc *);
449 void	iwm_led_blink_stop(struct iwm_softc *);
450 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
451 	    struct iwm_beacon_filter_cmd *);
452 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
453 	    struct iwm_beacon_filter_cmd *);
454 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
455 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
456 	    struct iwm_mac_power_cmd *);
457 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
458 int	iwm_power_update_device(struct iwm_softc *);
459 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
460 int	iwm_disable_beacon_filter(struct iwm_softc *);
461 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
462 int	iwm_add_aux_sta(struct iwm_softc *);
463 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
464 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
465 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
466 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
467 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
468 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
469 	    struct iwm_scan_channel_cfg_lmac *, int, int);
470 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
471 int	iwm_lmac_scan(struct iwm_softc *, int);
472 int	iwm_config_umac_scan(struct iwm_softc *);
473 int	iwm_umac_scan(struct iwm_softc *, int);
474 void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
475 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
476 int	iwm_rval2ridx(int);
477 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
478 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
479 	    struct iwm_mac_ctx_cmd *, uint32_t);
480 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
481 	    struct iwm_mac_data_sta *, int);
482 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
483 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
484 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
485 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
486 int	iwm_scan(struct iwm_softc *);
487 int	iwm_bgscan(struct ieee80211com *);
488 void	iwm_bgscan_done(struct ieee80211com *,
489 	    struct ieee80211_node_switch_bss_arg *, size_t);
490 void	iwm_bgscan_done_task(void *);
491 int	iwm_umac_scan_abort(struct iwm_softc *);
492 int	iwm_lmac_scan_abort(struct iwm_softc *);
493 int	iwm_scan_abort(struct iwm_softc *);
494 int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
495 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
496 	    uint8_t);
497 int	iwm_auth(struct iwm_softc *);
498 int	iwm_deauth(struct iwm_softc *);
499 int	iwm_run(struct iwm_softc *);
500 int	iwm_run_stop(struct iwm_softc *);
501 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
502 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
503 	    struct ieee80211_key *);
504 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
505 	    struct ieee80211_key *);
506 void	iwm_delete_key_v1(struct ieee80211com *,
507 	    struct ieee80211_node *, struct ieee80211_key *);
508 void	iwm_delete_key(struct ieee80211com *,
509 	    struct ieee80211_node *, struct ieee80211_key *);
510 void	iwm_calib_timeout(void *);
511 void	iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *);
512 void	iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *);
513 void	iwm_setrates(struct iwm_node *, int);
514 int	iwm_media_change(struct ifnet *);
515 void	iwm_newstate_task(void *);
516 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
517 void	iwm_endscan(struct iwm_softc *);
518 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
519 	    struct ieee80211_node *);
520 int	iwm_sf_config(struct iwm_softc *, int);
521 int	iwm_send_bt_init_conf(struct iwm_softc *);
522 int	iwm_send_soc_conf(struct iwm_softc *);
523 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
524 int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
525 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
526 void	iwm_free_fw_paging(struct iwm_softc *);
527 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
528 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
529 int	iwm_init_hw(struct iwm_softc *);
530 int	iwm_init(struct ifnet *);
531 void	iwm_start(struct ifnet *);
532 void	iwm_stop(struct ifnet *);
533 void	iwm_watchdog(struct ifnet *);
534 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
535 const char *iwm_desc_lookup(uint32_t);
536 void	iwm_nic_error(struct iwm_softc *);
537 void	iwm_dump_driver_status(struct iwm_softc *);
538 void	iwm_nic_umac_error(struct iwm_softc *);
539 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
540 	    struct mbuf_list *);
541 void	iwm_flip_address(uint8_t *);
542 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
543 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
544 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
545 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
546 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
547 	    struct mbuf_list *);
548 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
549 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
550 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
551 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
552 	    struct ieee80211_rxinfo *, struct mbuf_list *);
553 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
554 	    struct mbuf_list *);
555 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
556 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
557 	    struct mbuf_list *);
558 void	iwm_notif_intr(struct iwm_softc *);
559 int	iwm_intr(void *);
560 int	iwm_intr_msix(void *);
561 int	iwm_match(struct device *, void *, void *);
562 int	iwm_preinit(struct iwm_softc *);
563 void	iwm_attach_hook(struct device *);
564 void	iwm_attach(struct device *, struct device *, void *);
565 void	iwm_init_task(void *);
566 int	iwm_activate(struct device *, int);
567 void	iwm_resume(struct iwm_softc *);
568 int	iwm_wakeup(struct iwm_softc *);
569 
570 #if NBPFILTER > 0
571 void	iwm_radiotap_attach(struct iwm_softc *);
572 #endif
573 
574 uint8_t
575 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
576 {
577 	const struct iwm_fw_cmd_version *entry;
578 	int i;
579 
580 	for (i = 0; i < sc->n_cmd_versions; i++) {
581 		entry = &sc->cmd_versions[i];
582 		if (entry->group == grp && entry->cmd == cmd)
583 			return entry->cmd_ver;
584 	}
585 
586 	return IWM_FW_CMD_VER_UNKNOWN;
587 }
588 
589 int
590 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
591 {
592 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
593 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
594 }
595 
596 int
597 iwm_is_mimo_ht_mcs(int mcs)
598 {
599 	int ridx = iwm_ht_mcs2ridx[mcs];
600 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
601 
602 }
603 
604 int
605 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
606 {
607 	struct iwm_fw_cscheme_list *l = (void *)data;
608 
609 	if (dlen < sizeof(*l) ||
610 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
611 		return EINVAL;
612 
613 	/* we don't actually store anything for now, always use s/w crypto */
614 
615 	return 0;
616 }
617 
618 int
619 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
620     uint8_t *data, size_t dlen)
621 {
622 	struct iwm_fw_sects *fws;
623 	struct iwm_fw_onesect *fwone;
624 
625 	if (type >= IWM_UCODE_TYPE_MAX)
626 		return EINVAL;
627 	if (dlen < sizeof(uint32_t))
628 		return EINVAL;
629 
630 	fws = &sc->sc_fw.fw_sects[type];
631 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
632 		return EINVAL;
633 
634 	fwone = &fws->fw_sect[fws->fw_count];
635 
636 	/* first 32bit are device load offset */
637 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
638 
639 	/* rest is data */
640 	fwone->fws_data = data + sizeof(uint32_t);
641 	fwone->fws_len = dlen - sizeof(uint32_t);
642 
643 	fws->fw_count++;
644 	fws->fw_totlen += fwone->fws_len;
645 
646 	return 0;
647 }
648 
649 #define IWM_DEFAULT_SCAN_CHANNELS	40
650 /* Newer firmware might support more channels. Raise this value if needed. */
651 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
652 
653 struct iwm_tlv_calib_data {
654 	uint32_t ucode_type;
655 	struct iwm_tlv_calib_ctrl calib;
656 } __packed;
657 
658 int
659 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
660 {
661 	const struct iwm_tlv_calib_data *def_calib = data;
662 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
663 
664 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
665 		return EINVAL;
666 
667 	sc->sc_default_calib[ucode_type].flow_trigger =
668 	    def_calib->calib.flow_trigger;
669 	sc->sc_default_calib[ucode_type].event_trigger =
670 	    def_calib->calib.event_trigger;
671 
672 	return 0;
673 }
674 
675 void
676 iwm_fw_info_free(struct iwm_fw_info *fw)
677 {
678 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
679 	fw->fw_rawdata = NULL;
680 	fw->fw_rawsize = 0;
681 	/* don't touch fw->fw_status */
682 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
683 }
684 
685 void
686 iwm_fw_version_str(char *buf, size_t bufsize,
687     uint32_t major, uint32_t minor, uint32_t api)
688 {
689 	/*
690 	 * Starting with major version 35 the Linux driver prints the minor
691 	 * version in hexadecimal.
692 	 */
693 	if (major >= 35)
694 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
695 	else
696 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
697 }
698 
699 int
700 iwm_read_firmware(struct iwm_softc *sc)
701 {
702 	struct iwm_fw_info *fw = &sc->sc_fw;
703 	struct iwm_tlv_ucode_header *uhdr;
704 	struct iwm_ucode_tlv tlv;
705 	uint32_t tlv_type;
706 	uint8_t *data;
707 	uint32_t usniffer_img;
708 	uint32_t paging_mem_size;
709 	int err;
710 	size_t len;
711 
712 	if (fw->fw_status == IWM_FW_STATUS_DONE)
713 		return 0;
714 
715 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
716 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
717 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
718 
719 	if (fw->fw_rawdata != NULL)
720 		iwm_fw_info_free(fw);
721 
722 	err = loadfirmware(sc->sc_fwname,
723 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
724 	if (err) {
725 		printf("%s: could not read firmware %s (error %d)\n",
726 		    DEVNAME(sc), sc->sc_fwname, err);
727 		goto out;
728 	}
729 
730 	sc->sc_capaflags = 0;
731 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
732 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
733 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
734 	sc->n_cmd_versions = 0;
735 
736 	uhdr = (void *)fw->fw_rawdata;
737 	if (*(uint32_t *)fw->fw_rawdata != 0
738 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
739 		printf("%s: invalid firmware %s\n",
740 		    DEVNAME(sc), sc->sc_fwname);
741 		err = EINVAL;
742 		goto out;
743 	}
744 
745 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
746 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
747 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
748 	    IWM_UCODE_API(le32toh(uhdr->ver)));
749 
750 	data = uhdr->data;
751 	len = fw->fw_rawsize - sizeof(*uhdr);
752 
753 	while (len >= sizeof(tlv)) {
754 		size_t tlv_len;
755 		void *tlv_data;
756 
757 		memcpy(&tlv, data, sizeof(tlv));
758 		tlv_len = le32toh(tlv.length);
759 		tlv_type = le32toh(tlv.type);
760 
761 		len -= sizeof(tlv);
762 		data += sizeof(tlv);
763 		tlv_data = data;
764 
765 		if (len < tlv_len) {
766 			printf("%s: firmware too short: %zu bytes\n",
767 			    DEVNAME(sc), len);
768 			err = EINVAL;
769 			goto parse_out;
770 		}
771 
772 		switch (tlv_type) {
773 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
774 			if (tlv_len < sizeof(uint32_t)) {
775 				err = EINVAL;
776 				goto parse_out;
777 			}
778 			sc->sc_capa_max_probe_len
779 			    = le32toh(*(uint32_t *)tlv_data);
780 			if (sc->sc_capa_max_probe_len >
781 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
782 				err = EINVAL;
783 				goto parse_out;
784 			}
785 			break;
786 		case IWM_UCODE_TLV_PAN:
787 			if (tlv_len) {
788 				err = EINVAL;
789 				goto parse_out;
790 			}
791 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
792 			break;
793 		case IWM_UCODE_TLV_FLAGS:
794 			if (tlv_len < sizeof(uint32_t)) {
795 				err = EINVAL;
796 				goto parse_out;
797 			}
798 			/*
799 			 * Apparently there can be many flags, but Linux driver
800 			 * parses only the first one, and so do we.
801 			 *
802 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
803 			 * Intentional or a bug?  Observations from
804 			 * current firmware file:
805 			 *  1) TLV_PAN is parsed first
806 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
807 			 * ==> this resets TLV_PAN to itself... hnnnk
808 			 */
809 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
810 			break;
811 		case IWM_UCODE_TLV_CSCHEME:
812 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
813 			if (err)
814 				goto parse_out;
815 			break;
816 		case IWM_UCODE_TLV_NUM_OF_CPU: {
817 			uint32_t num_cpu;
818 			if (tlv_len != sizeof(uint32_t)) {
819 				err = EINVAL;
820 				goto parse_out;
821 			}
822 			num_cpu = le32toh(*(uint32_t *)tlv_data);
823 			if (num_cpu < 1 || num_cpu > 2) {
824 				err = EINVAL;
825 				goto parse_out;
826 			}
827 			break;
828 		}
829 		case IWM_UCODE_TLV_SEC_RT:
830 			err = iwm_firmware_store_section(sc,
831 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
832 			if (err)
833 				goto parse_out;
834 			break;
835 		case IWM_UCODE_TLV_SEC_INIT:
836 			err = iwm_firmware_store_section(sc,
837 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
838 			if (err)
839 				goto parse_out;
840 			break;
841 		case IWM_UCODE_TLV_SEC_WOWLAN:
842 			err = iwm_firmware_store_section(sc,
843 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
844 			if (err)
845 				goto parse_out;
846 			break;
847 		case IWM_UCODE_TLV_DEF_CALIB:
848 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
849 				err = EINVAL;
850 				goto parse_out;
851 			}
852 			err = iwm_set_default_calib(sc, tlv_data);
853 			if (err)
854 				goto parse_out;
855 			break;
856 		case IWM_UCODE_TLV_PHY_SKU:
857 			if (tlv_len != sizeof(uint32_t)) {
858 				err = EINVAL;
859 				goto parse_out;
860 			}
861 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
862 			break;
863 
864 		case IWM_UCODE_TLV_API_CHANGES_SET: {
865 			struct iwm_ucode_api *api;
866 			int idx, i;
867 			if (tlv_len != sizeof(*api)) {
868 				err = EINVAL;
869 				goto parse_out;
870 			}
871 			api = (struct iwm_ucode_api *)tlv_data;
872 			idx = le32toh(api->api_index);
873 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
874 				err = EINVAL;
875 				goto parse_out;
876 			}
877 			for (i = 0; i < 32; i++) {
878 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
879 					continue;
880 				setbit(sc->sc_ucode_api, i + (32 * idx));
881 			}
882 			break;
883 		}
884 
885 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
886 			struct iwm_ucode_capa *capa;
887 			int idx, i;
888 			if (tlv_len != sizeof(*capa)) {
889 				err = EINVAL;
890 				goto parse_out;
891 			}
892 			capa = (struct iwm_ucode_capa *)tlv_data;
893 			idx = le32toh(capa->api_index);
894 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
895 				goto parse_out;
896 			}
897 			for (i = 0; i < 32; i++) {
898 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
899 					continue;
900 				setbit(sc->sc_enabled_capa, i + (32 * idx));
901 			}
902 			break;
903 		}
904 
905 		case IWM_UCODE_TLV_CMD_VERSIONS:
906 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
907 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
908 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
909 			}
910 			if (sc->n_cmd_versions != 0) {
911 				err = EINVAL;
912 				goto parse_out;
913 			}
914 			if (tlv_len > sizeof(sc->cmd_versions)) {
915 				err = EINVAL;
916 				goto parse_out;
917 			}
918 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
919 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
920 			break;
921 
922 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
923 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
924 			/* ignore, not used by current driver */
925 			break;
926 
927 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
928 			err = iwm_firmware_store_section(sc,
929 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
930 			    tlv_len);
931 			if (err)
932 				goto parse_out;
933 			break;
934 
935 		case IWM_UCODE_TLV_PAGING:
936 			if (tlv_len != sizeof(uint32_t)) {
937 				err = EINVAL;
938 				goto parse_out;
939 			}
940 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
941 
942 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
943 			    DEVNAME(sc), paging_mem_size));
944 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
945 				printf("%s: Driver only supports up to %u"
946 				    " bytes for paging image (%u requested)\n",
947 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
948 				    paging_mem_size);
949 				err = EINVAL;
950 				goto out;
951 			}
952 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
953 				printf("%s: Paging: image isn't multiple of %u\n",
954 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
955 				err = EINVAL;
956 				goto out;
957 			}
958 
959 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
960 			    paging_mem_size;
961 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
962 			fw->fw_sects[usniffer_img].paging_mem_size =
963 			    paging_mem_size;
964 			break;
965 
966 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
967 			if (tlv_len != sizeof(uint32_t)) {
968 				err = EINVAL;
969 				goto parse_out;
970 			}
971 			sc->sc_capa_n_scan_channels =
972 			  le32toh(*(uint32_t *)tlv_data);
973 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
974 				err = ERANGE;
975 				goto parse_out;
976 			}
977 			break;
978 
979 		case IWM_UCODE_TLV_FW_VERSION:
980 			if (tlv_len != sizeof(uint32_t) * 3) {
981 				err = EINVAL;
982 				goto parse_out;
983 			}
984 
985 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
986 			    le32toh(((uint32_t *)tlv_data)[0]),
987 			    le32toh(((uint32_t *)tlv_data)[1]),
988 			    le32toh(((uint32_t *)tlv_data)[2]));
989 			break;
990 
991 		case IWM_UCODE_TLV_FW_DBG_DEST:
992 		case IWM_UCODE_TLV_FW_DBG_CONF:
993 		case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
994 		case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
995 		case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
996 		case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
997 		case IWM_UCODE_TLV_TYPE_HCMD:
998 		case IWM_UCODE_TLV_TYPE_REGIONS:
999 		case IWM_UCODE_TLV_TYPE_TRIGGERS:
1000 			break;
1001 
1002 		case IWM_UCODE_TLV_HW_TYPE:
1003 			break;
1004 
1005 		case IWM_UCODE_TLV_FW_MEM_SEG:
1006 			break;
1007 
1008 		/* undocumented TLVs found in iwm-9000-43 image */
1009 		case 0x1000003:
1010 		case 0x1000004:
1011 			break;
1012 
1013 		default:
1014 			err = EINVAL;
1015 			goto parse_out;
1016 		}
1017 
1018 		/*
1019 		 * Check for size_t overflow and ignore missing padding at
1020 		 * end of firmware file.
1021 		 */
1022 		if (roundup(tlv_len, 4) > len)
1023 			break;
1024 
1025 		len -= roundup(tlv_len, 4);
1026 		data += roundup(tlv_len, 4);
1027 	}
1028 
1029 	KASSERT(err == 0);
1030 
1031  parse_out:
1032 	if (err) {
1033 		printf("%s: firmware parse error %d, "
1034 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1035 	}
1036 
1037  out:
1038 	if (err) {
1039 		fw->fw_status = IWM_FW_STATUS_NONE;
1040 		if (fw->fw_rawdata != NULL)
1041 			iwm_fw_info_free(fw);
1042 	} else
1043 		fw->fw_status = IWM_FW_STATUS_DONE;
1044 	wakeup(&sc->sc_fw);
1045 
1046 	return err;
1047 }
1048 
1049 uint32_t
1050 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1051 {
1052 	IWM_WRITE(sc,
1053 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1054 	IWM_BARRIER_READ_WRITE(sc);
1055 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1056 }
1057 
1058 uint32_t
1059 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1060 {
1061 	iwm_nic_assert_locked(sc);
1062 	return iwm_read_prph_unlocked(sc, addr);
1063 }
1064 
1065 void
1066 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1067 {
1068 	IWM_WRITE(sc,
1069 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1070 	IWM_BARRIER_WRITE(sc);
1071 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1072 }
1073 
1074 void
1075 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1076 {
1077 	iwm_nic_assert_locked(sc);
1078 	iwm_write_prph_unlocked(sc, addr, val);
1079 }
1080 
1081 void
1082 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1083 {
1084 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1085 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1086 }
1087 
1088 int
1089 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1090 {
1091 	int offs, err = 0;
1092 	uint32_t *vals = buf;
1093 
1094 	if (iwm_nic_lock(sc)) {
1095 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1096 		for (offs = 0; offs < dwords; offs++)
1097 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1098 		iwm_nic_unlock(sc);
1099 	} else {
1100 		err = EBUSY;
1101 	}
1102 	return err;
1103 }
1104 
1105 int
1106 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1107 {
1108 	int offs;
1109 	const uint32_t *vals = buf;
1110 
1111 	if (iwm_nic_lock(sc)) {
1112 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1113 		/* WADDR auto-increments */
1114 		for (offs = 0; offs < dwords; offs++) {
1115 			uint32_t val = vals ? vals[offs] : 0;
1116 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1117 		}
1118 		iwm_nic_unlock(sc);
1119 	} else {
1120 		return EBUSY;
1121 	}
1122 	return 0;
1123 }
1124 
1125 int
1126 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1127 {
1128 	return iwm_write_mem(sc, addr, &val, 1);
1129 }
1130 
1131 int
1132 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1133     int timo)
1134 {
1135 	for (;;) {
1136 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1137 			return 1;
1138 		}
1139 		if (timo < 10) {
1140 			return 0;
1141 		}
1142 		timo -= 10;
1143 		DELAY(10);
1144 	}
1145 }
1146 
1147 int
1148 iwm_nic_lock(struct iwm_softc *sc)
1149 {
1150 	if (sc->sc_nic_locks > 0) {
1151 		iwm_nic_assert_locked(sc);
1152 		sc->sc_nic_locks++;
1153 		return 1; /* already locked */
1154 	}
1155 
1156 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1157 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1158 
1159 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1160 		DELAY(2);
1161 
1162 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1163 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1164 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1165 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1166 		sc->sc_nic_locks++;
1167 		return 1;
1168 	}
1169 
1170 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1171 	return 0;
1172 }
1173 
1174 void
1175 iwm_nic_assert_locked(struct iwm_softc *sc)
1176 {
1177 	if (sc->sc_nic_locks <= 0)
1178 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1179 }
1180 
1181 void
1182 iwm_nic_unlock(struct iwm_softc *sc)
1183 {
1184 	if (sc->sc_nic_locks > 0) {
1185 		if (--sc->sc_nic_locks == 0)
1186 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1187 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1188 	} else
1189 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1190 }
1191 
1192 int
1193 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1194     uint32_t mask)
1195 {
1196 	uint32_t val;
1197 
1198 	if (iwm_nic_lock(sc)) {
1199 		val = iwm_read_prph(sc, reg) & mask;
1200 		val |= bits;
1201 		iwm_write_prph(sc, reg, val);
1202 		iwm_nic_unlock(sc);
1203 		return 0;
1204 	}
1205 	return EBUSY;
1206 }
1207 
1208 int
1209 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1210 {
1211 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1212 }
1213 
1214 int
1215 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1216 {
1217 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1218 }
1219 
1220 int
1221 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1222     bus_size_t size, bus_size_t alignment)
1223 {
1224 	int nsegs, err;
1225 	caddr_t va;
1226 
1227 	dma->tag = tag;
1228 	dma->size = size;
1229 
1230 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1231 	    &dma->map);
1232 	if (err)
1233 		goto fail;
1234 
1235 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1236 	    BUS_DMA_NOWAIT);
1237 	if (err)
1238 		goto fail;
1239 
1240 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1241 	    BUS_DMA_NOWAIT);
1242 	if (err)
1243 		goto fail;
1244 	dma->vaddr = va;
1245 
1246 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1247 	    BUS_DMA_NOWAIT);
1248 	if (err)
1249 		goto fail;
1250 
1251 	memset(dma->vaddr, 0, size);
1252 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1253 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1254 
1255 	return 0;
1256 
1257 fail:	iwm_dma_contig_free(dma);
1258 	return err;
1259 }
1260 
1261 void
1262 iwm_dma_contig_free(struct iwm_dma_info *dma)
1263 {
1264 	if (dma->map != NULL) {
1265 		if (dma->vaddr != NULL) {
1266 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1267 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1268 			bus_dmamap_unload(dma->tag, dma->map);
1269 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1270 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1271 			dma->vaddr = NULL;
1272 		}
1273 		bus_dmamap_destroy(dma->tag, dma->map);
1274 		dma->map = NULL;
1275 	}
1276 }
1277 
1278 int
1279 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1280 {
1281 	bus_size_t size;
1282 	size_t descsz;
1283 	int count, i, err;
1284 
1285 	ring->cur = 0;
1286 
1287 	if (sc->sc_mqrx_supported) {
1288 		count = IWM_RX_MQ_RING_COUNT;
1289 		descsz = sizeof(uint64_t);
1290 	} else {
1291 		count = IWM_RX_RING_COUNT;
1292 		descsz = sizeof(uint32_t);
1293 	}
1294 
1295 	/* Allocate RX descriptors (256-byte aligned). */
1296 	size = count * descsz;
1297 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1298 	if (err) {
1299 		printf("%s: could not allocate RX ring DMA memory\n",
1300 		    DEVNAME(sc));
1301 		goto fail;
1302 	}
1303 	ring->desc = ring->free_desc_dma.vaddr;
1304 
1305 	/* Allocate RX status area (16-byte aligned). */
1306 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1307 	    sizeof(*ring->stat), 16);
1308 	if (err) {
1309 		printf("%s: could not allocate RX status DMA memory\n",
1310 		    DEVNAME(sc));
1311 		goto fail;
1312 	}
1313 	ring->stat = ring->stat_dma.vaddr;
1314 
1315 	if (sc->sc_mqrx_supported) {
1316 		size = count * sizeof(uint32_t);
1317 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1318 		    size, 256);
1319 		if (err) {
1320 			printf("%s: could not allocate RX ring DMA memory\n",
1321 			    DEVNAME(sc));
1322 			goto fail;
1323 		}
1324 	}
1325 
1326 	for (i = 0; i < count; i++) {
1327 		struct iwm_rx_data *data = &ring->data[i];
1328 
1329 		memset(data, 0, sizeof(*data));
1330 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1331 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1332 		    &data->map);
1333 		if (err) {
1334 			printf("%s: could not create RX buf DMA map\n",
1335 			    DEVNAME(sc));
1336 			goto fail;
1337 		}
1338 
1339 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1340 		if (err)
1341 			goto fail;
1342 	}
1343 	return 0;
1344 
1345 fail:	iwm_free_rx_ring(sc, ring);
1346 	return err;
1347 }
1348 
1349 void
1350 iwm_disable_rx_dma(struct iwm_softc *sc)
1351 {
1352 	int ntries;
1353 
1354 	if (iwm_nic_lock(sc)) {
1355 		if (sc->sc_mqrx_supported) {
1356 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1357 			for (ntries = 0; ntries < 1000; ntries++) {
1358 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1359 				    IWM_RXF_DMA_IDLE)
1360 					break;
1361 				DELAY(10);
1362 			}
1363 		} else {
1364 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1365 			for (ntries = 0; ntries < 1000; ntries++) {
1366 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1367 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1368 					break;
1369 				DELAY(10);
1370 			}
1371 		}
1372 		iwm_nic_unlock(sc);
1373 	}
1374 }
1375 
1376 void
1377 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1378 {
1379 	ring->cur = 0;
1380 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1381 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1382 	memset(ring->stat, 0, sizeof(*ring->stat));
1383 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1384 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1385 
1386 }
1387 
1388 void
1389 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1390 {
1391 	int count, i;
1392 
1393 	iwm_dma_contig_free(&ring->free_desc_dma);
1394 	iwm_dma_contig_free(&ring->stat_dma);
1395 	iwm_dma_contig_free(&ring->used_desc_dma);
1396 
1397 	if (sc->sc_mqrx_supported)
1398 		count = IWM_RX_MQ_RING_COUNT;
1399 	else
1400 		count = IWM_RX_RING_COUNT;
1401 
1402 	for (i = 0; i < count; i++) {
1403 		struct iwm_rx_data *data = &ring->data[i];
1404 
1405 		if (data->m != NULL) {
1406 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1407 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1408 			bus_dmamap_unload(sc->sc_dmat, data->map);
1409 			m_freem(data->m);
1410 			data->m = NULL;
1411 		}
1412 		if (data->map != NULL)
1413 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1414 	}
1415 }
1416 
1417 int
1418 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1419 {
1420 	bus_addr_t paddr;
1421 	bus_size_t size;
1422 	int i, err;
1423 
1424 	ring->qid = qid;
1425 	ring->queued = 0;
1426 	ring->cur = 0;
1427 	ring->tail = 0;
1428 
1429 	/* Allocate TX descriptors (256-byte aligned). */
1430 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1431 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1432 	if (err) {
1433 		printf("%s: could not allocate TX ring DMA memory\n",
1434 		    DEVNAME(sc));
1435 		goto fail;
1436 	}
1437 	ring->desc = ring->desc_dma.vaddr;
1438 
1439 	/*
1440 	 * There is no need to allocate DMA buffers for unused rings.
1441 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1442 	 * than we currently need.
1443 	 *
1444 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1445 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1446 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1447 	 * in order to provide one queue per EDCA category.
1448 	 * Tx aggregation requires additional queues, one queue per TID for
1449 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1450 	 *
1451 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1452 	 * and Tx aggregation is not supported.
1453 	 *
1454 	 * Unfortunately, we cannot tell if DQA will be used until the
1455 	 * firmware gets loaded later, so just allocate sufficient rings
1456 	 * in order to satisfy both cases.
1457 	 */
1458 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1459 		return 0;
1460 
1461 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1462 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1463 	if (err) {
1464 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1465 		goto fail;
1466 	}
1467 	ring->cmd = ring->cmd_dma.vaddr;
1468 
1469 	paddr = ring->cmd_dma.paddr;
1470 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1471 		struct iwm_tx_data *data = &ring->data[i];
1472 		size_t mapsize;
1473 
1474 		data->cmd_paddr = paddr;
1475 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1476 		    + offsetof(struct iwm_tx_cmd, scratch);
1477 		paddr += sizeof(struct iwm_device_cmd);
1478 
1479 		/* FW commands may require more mapped space than packets. */
1480 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1481 			mapsize = (sizeof(struct iwm_cmd_header) +
1482 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1483 		else
1484 			mapsize = MCLBYTES;
1485 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1486 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1487 		    &data->map);
1488 		if (err) {
1489 			printf("%s: could not create TX buf DMA map\n",
1490 			    DEVNAME(sc));
1491 			goto fail;
1492 		}
1493 	}
1494 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1495 	return 0;
1496 
1497 fail:	iwm_free_tx_ring(sc, ring);
1498 	return err;
1499 }
1500 
1501 void
1502 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1503 {
1504 	int i;
1505 
1506 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1507 		struct iwm_tx_data *data = &ring->data[i];
1508 
1509 		if (data->m != NULL) {
1510 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1511 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1512 			bus_dmamap_unload(sc->sc_dmat, data->map);
1513 			m_freem(data->m);
1514 			data->m = NULL;
1515 		}
1516 	}
1517 	/* Clear TX descriptors. */
1518 	memset(ring->desc, 0, ring->desc_dma.size);
1519 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1520 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1521 	sc->qfullmsk &= ~(1 << ring->qid);
1522 	sc->qenablemsk &= ~(1 << ring->qid);
1523 	/* 7000 family NICs are locked while commands are in progress. */
1524 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1525 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1526 			iwm_nic_unlock(sc);
1527 	}
1528 	ring->queued = 0;
1529 	ring->cur = 0;
1530 	ring->tail = 0;
1531 }
1532 
1533 void
1534 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1535 {
1536 	int i;
1537 
1538 	iwm_dma_contig_free(&ring->desc_dma);
1539 	iwm_dma_contig_free(&ring->cmd_dma);
1540 
1541 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1542 		struct iwm_tx_data *data = &ring->data[i];
1543 
1544 		if (data->m != NULL) {
1545 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1546 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1547 			bus_dmamap_unload(sc->sc_dmat, data->map);
1548 			m_freem(data->m);
1549 			data->m = NULL;
1550 		}
1551 		if (data->map != NULL)
1552 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1553 	}
1554 }
1555 
1556 void
1557 iwm_enable_rfkill_int(struct iwm_softc *sc)
1558 {
1559 	if (!sc->sc_msix) {
1560 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1561 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1562 	} else {
1563 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1564 		    sc->sc_fh_init_mask);
1565 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1566 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1567 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1568 	}
1569 
1570 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1571 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1572 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1573 }
1574 
1575 int
1576 iwm_check_rfkill(struct iwm_softc *sc)
1577 {
1578 	uint32_t v;
1579 	int rv;
1580 
1581 	/*
1582 	 * "documentation" is not really helpful here:
1583 	 *  27:	HW_RF_KILL_SW
1584 	 *	Indicates state of (platform's) hardware RF-Kill switch
1585 	 *
1586 	 * But apparently when it's off, it's on ...
1587 	 */
1588 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1589 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1590 	if (rv) {
1591 		sc->sc_flags |= IWM_FLAG_RFKILL;
1592 	} else {
1593 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1594 	}
1595 
1596 	return rv;
1597 }
1598 
1599 void
1600 iwm_enable_interrupts(struct iwm_softc *sc)
1601 {
1602 	if (!sc->sc_msix) {
1603 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1604 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1605 	} else {
1606 		/*
1607 		 * fh/hw_mask keeps all the unmasked causes.
1608 		 * Unlike msi, in msix cause is enabled when it is unset.
1609 		 */
1610 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1611 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1612 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1613 		    ~sc->sc_fh_mask);
1614 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1615 		    ~sc->sc_hw_mask);
1616 	}
1617 }
1618 
1619 void
1620 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1621 {
1622 	if (!sc->sc_msix) {
1623 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1624 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1625 	} else {
1626 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1627 		    sc->sc_hw_init_mask);
1628 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1629 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1630 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1631 	}
1632 }
1633 
1634 void
1635 iwm_restore_interrupts(struct iwm_softc *sc)
1636 {
1637 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1638 }
1639 
1640 void
1641 iwm_disable_interrupts(struct iwm_softc *sc)
1642 {
1643 	if (!sc->sc_msix) {
1644 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1645 
1646 		/* acknowledge all interrupts */
1647 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1648 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1649 	} else {
1650 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1651 		    sc->sc_fh_init_mask);
1652 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1653 		    sc->sc_hw_init_mask);
1654 	}
1655 }
1656 
1657 void
1658 iwm_ict_reset(struct iwm_softc *sc)
1659 {
1660 	iwm_disable_interrupts(sc);
1661 
1662 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1663 	sc->ict_cur = 0;
1664 
1665 	/* Set physical address of ICT (4KB aligned). */
1666 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1667 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1668 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1669 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1670 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1671 
1672 	/* Switch to ICT interrupt mode in driver. */
1673 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1674 
1675 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1676 	iwm_enable_interrupts(sc);
1677 }
1678 
1679 #define IWM_HW_READY_TIMEOUT 50
1680 int
1681 iwm_set_hw_ready(struct iwm_softc *sc)
1682 {
1683 	int ready;
1684 
1685 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1686 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1687 
1688 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1689 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1690 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1691 	    IWM_HW_READY_TIMEOUT);
1692 	if (ready)
1693 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1694 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1695 
1696 	return ready;
1697 }
1698 #undef IWM_HW_READY_TIMEOUT
1699 
1700 int
1701 iwm_prepare_card_hw(struct iwm_softc *sc)
1702 {
1703 	int t = 0;
1704 	int ntries;
1705 
1706 	if (iwm_set_hw_ready(sc))
1707 		return 0;
1708 
1709 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1710 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1711 	DELAY(1000);
1712 
1713 	for (ntries = 0; ntries < 10; ntries++) {
1714 		/* If HW is not ready, prepare the conditions to check again */
1715 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1716 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1717 
1718 		do {
1719 			if (iwm_set_hw_ready(sc))
1720 				return 0;
1721 			DELAY(200);
1722 			t += 200;
1723 		} while (t < 150000);
1724 		DELAY(25000);
1725 	}
1726 
1727 	return ETIMEDOUT;
1728 }
1729 
1730 void
1731 iwm_apm_config(struct iwm_softc *sc)
1732 {
1733 	pcireg_t lctl, cap;
1734 
1735 	/*
1736 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1737 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1738 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1739 	 *    costs negligible amount of power savings.
1740 	 * If not (unlikely), enable L0S, so there is at least some
1741 	 *    power savings, even without L1.
1742 	 */
1743 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1744 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1745 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1746 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1747 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1748 	} else {
1749 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1750 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1751 	}
1752 
1753 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1754 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1755 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1756 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1757 	    DEVNAME(sc),
1758 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1759 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1760 }
1761 
1762 /*
1763  * Start up NIC's basic functionality after it has been reset
1764  * e.g. after platform boot or shutdown.
1765  * NOTE:  This does not load uCode nor start the embedded processor
1766  */
1767 int
1768 iwm_apm_init(struct iwm_softc *sc)
1769 {
1770 	int err = 0;
1771 
1772 	/* Disable L0S exit timer (platform NMI workaround) */
1773 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1774 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1775 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1776 
1777 	/*
1778 	 * Disable L0s without affecting L1;
1779 	 *  don't wait for ICH L0s (ICH bug W/A)
1780 	 */
1781 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1782 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1783 
1784 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1785 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1786 
1787 	/*
1788 	 * Enable HAP INTA (interrupt from management bus) to
1789 	 * wake device's PCI Express link L1a -> L0s
1790 	 */
1791 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1792 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1793 
1794 	iwm_apm_config(sc);
1795 
1796 #if 0 /* not for 7k/8k */
1797 	/* Configure analog phase-lock-loop before activating to D0A */
1798 	if (trans->cfg->base_params->pll_cfg_val)
1799 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1800 		    trans->cfg->base_params->pll_cfg_val);
1801 #endif
1802 
1803 	/*
1804 	 * Set "initialization complete" bit to move adapter from
1805 	 * D0U* --> D0A* (powered-up active) state.
1806 	 */
1807 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1808 
1809 	/*
1810 	 * Wait for clock stabilization; once stabilized, access to
1811 	 * device-internal resources is supported, e.g. iwm_write_prph()
1812 	 * and accesses to uCode SRAM.
1813 	 */
1814 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1815 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1816 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1817 		printf("%s: timeout waiting for clock stabilization\n",
1818 		    DEVNAME(sc));
1819 		err = ETIMEDOUT;
1820 		goto out;
1821 	}
1822 
1823 	if (sc->host_interrupt_operation_mode) {
1824 		/*
1825 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1826 		 * only check host_interrupt_operation_mode even if this is
1827 		 * not related to host_interrupt_operation_mode.
1828 		 *
1829 		 * Enable the oscillator to count wake up time for L1 exit. This
1830 		 * consumes slightly more power (100uA) - but allows to be sure
1831 		 * that we wake up from L1 on time.
1832 		 *
1833 		 * This looks weird: read twice the same register, discard the
1834 		 * value, set a bit, and yet again, read that same register
1835 		 * just to discard the value. But that's the way the hardware
1836 		 * seems to like it.
1837 		 */
1838 		if (iwm_nic_lock(sc)) {
1839 			iwm_read_prph(sc, IWM_OSC_CLK);
1840 			iwm_read_prph(sc, IWM_OSC_CLK);
1841 			iwm_nic_unlock(sc);
1842 		}
1843 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1844 		    IWM_OSC_CLK_FORCE_CONTROL);
1845 		if (err)
1846 			goto out;
1847 		if (iwm_nic_lock(sc)) {
1848 			iwm_read_prph(sc, IWM_OSC_CLK);
1849 			iwm_read_prph(sc, IWM_OSC_CLK);
1850 			iwm_nic_unlock(sc);
1851 		}
1852 	}
1853 
1854 	/*
1855 	 * Enable DMA clock and wait for it to stabilize.
1856 	 *
1857 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1858 	 * do not disable clocks.  This preserves any hardware bits already
1859 	 * set by default in "CLK_CTRL_REG" after reset.
1860 	 */
1861 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1862 		if (iwm_nic_lock(sc)) {
1863 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1864 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1865 			iwm_nic_unlock(sc);
1866 		}
1867 		DELAY(20);
1868 
1869 		/* Disable L1-Active */
1870 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1871 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1872 		if (err)
1873 			goto out;
1874 
1875 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1876 		if (iwm_nic_lock(sc)) {
1877 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1878 			    IWM_APMG_RTC_INT_STT_RFKILL);
1879 			iwm_nic_unlock(sc);
1880 		}
1881 	}
1882  out:
1883 	if (err)
1884 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1885 	return err;
1886 }
1887 
1888 void
1889 iwm_apm_stop(struct iwm_softc *sc)
1890 {
1891 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1892 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1893 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1894 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1895 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1896 	DELAY(1000);
1897 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1898 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1899 	DELAY(5000);
1900 
1901 	/* stop device's busmaster DMA activity */
1902 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1903 
1904 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1905 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1906 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1907 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1908 
1909 	/*
1910 	 * Clear "initialization complete" bit to move adapter from
1911 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1912 	 */
1913 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1914 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1915 }
1916 
1917 void
1918 iwm_init_msix_hw(struct iwm_softc *sc)
1919 {
1920 	iwm_conf_msix_hw(sc, 0);
1921 
1922 	if (!sc->sc_msix)
1923 		return;
1924 
1925 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1926 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1927 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1928 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1929 }
1930 
1931 void
1932 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1933 {
1934 	int vector = 0;
1935 
1936 	if (!sc->sc_msix) {
1937 		/* Newer chips default to MSIX. */
1938 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1939 			iwm_write_prph(sc, IWM_UREG_CHICK,
1940 			    IWM_UREG_CHICK_MSI_ENABLE);
1941 			iwm_nic_unlock(sc);
1942 		}
1943 		return;
1944 	}
1945 
1946 	if (!stopped && iwm_nic_lock(sc)) {
1947 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1948 		iwm_nic_unlock(sc);
1949 	}
1950 
1951 	/* Disable all interrupts */
1952 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1953 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1954 
1955 	/* Map fallback-queue (command/mgmt) to a single vector */
1956 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1957 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1958 	/* Map RSS queue (data) to the same vector */
1959 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1960 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1961 
1962 	/* Enable the RX queues cause interrupts */
1963 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1964 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1965 
1966 	/* Map non-RX causes to the same vector */
1967 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1968 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1969 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1970 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1971 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1972 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1973 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1974 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1975 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1976 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1977 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1978 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1979 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1980 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1981 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1982 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1983 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1984 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1985 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1986 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1987 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1988 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1989 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1990 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1991 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1992 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1993 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1994 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1995 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1996 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1997 
1998 	/* Enable non-RX causes interrupts */
1999 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
2000 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2001 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2002 	    IWM_MSIX_FH_INT_CAUSES_S2D |
2003 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
2004 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
2005 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
2006 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2007 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
2008 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2009 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2010 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2011 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2012 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
2013 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
2014 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2015 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
2016 }
2017 
2018 int
2019 iwm_clear_persistence_bit(struct iwm_softc *sc)
2020 {
2021 	uint32_t hpm, wprot;
2022 
2023 	hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2024 	if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
2025 		wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2026 		if (wprot & IWM_PREG_WFPM_ACCESS) {
2027 			printf("%s: cannot clear persistence bit\n",
2028 			    DEVNAME(sc));
2029 			return EPERM;
2030 		}
2031 		iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2032 		    hpm & ~IWM_HPM_PERSISTENCE_BIT);
2033 	}
2034 
2035 	return 0;
2036 }
2037 
2038 int
2039 iwm_start_hw(struct iwm_softc *sc)
2040 {
2041 	int err;
2042 
2043 	err = iwm_prepare_card_hw(sc);
2044 	if (err)
2045 		return err;
2046 
2047 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2048 		err = iwm_clear_persistence_bit(sc);
2049 		if (err)
2050 			return err;
2051 	}
2052 
2053 	/* Reset the entire device */
2054 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2055 	DELAY(5000);
2056 
2057 	err = iwm_apm_init(sc);
2058 	if (err)
2059 		return err;
2060 
2061 	iwm_init_msix_hw(sc);
2062 
2063 	iwm_enable_rfkill_int(sc);
2064 	iwm_check_rfkill(sc);
2065 
2066 	return 0;
2067 }
2068 
2069 
2070 void
2071 iwm_stop_device(struct iwm_softc *sc)
2072 {
2073 	int chnl, ntries;
2074 	int qid;
2075 
2076 	iwm_disable_interrupts(sc);
2077 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2078 
2079 	/* Stop all DMA channels. */
2080 	if (iwm_nic_lock(sc)) {
2081 		/* Deactivate TX scheduler. */
2082 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2083 
2084 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2085 			IWM_WRITE(sc,
2086 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2087 			for (ntries = 0; ntries < 200; ntries++) {
2088 				uint32_t r;
2089 
2090 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2091 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
2092 				    chnl))
2093 					break;
2094 				DELAY(20);
2095 			}
2096 		}
2097 		iwm_nic_unlock(sc);
2098 	}
2099 	iwm_disable_rx_dma(sc);
2100 
2101 	iwm_reset_rx_ring(sc, &sc->rxq);
2102 
2103 	for (qid = 0; qid < nitems(sc->txq); qid++)
2104 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
2105 
2106 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2107 		if (iwm_nic_lock(sc)) {
2108 			/* Power-down device's busmaster DMA clocks */
2109 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2110 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
2111 			iwm_nic_unlock(sc);
2112 		}
2113 		DELAY(5);
2114 	}
2115 
2116 	/* Make sure (redundant) we've released our request to stay awake */
2117 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2118 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2119 	if (sc->sc_nic_locks > 0)
2120 		printf("%s: %d active NIC locks forcefully cleared\n",
2121 		    DEVNAME(sc), sc->sc_nic_locks);
2122 	sc->sc_nic_locks = 0;
2123 
2124 	/* Stop the device, and put it in low power state */
2125 	iwm_apm_stop(sc);
2126 
2127 	/* Reset the on-board processor. */
2128 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2129 	DELAY(5000);
2130 
2131 	/*
2132 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2133 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2134 	 * that enables radio won't fire on the correct irq, and the
2135 	 * driver won't be able to handle the interrupt.
2136 	 * Configure the IVAR table again after reset.
2137 	 */
2138 	iwm_conf_msix_hw(sc, 1);
2139 
2140 	/*
2141 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2142 	 * Clear the interrupt again.
2143 	 */
2144 	iwm_disable_interrupts(sc);
2145 
2146 	/* Even though we stop the HW we still want the RF kill interrupt. */
2147 	iwm_enable_rfkill_int(sc);
2148 	iwm_check_rfkill(sc);
2149 
2150 	iwm_prepare_card_hw(sc);
2151 }
2152 
2153 void
2154 iwm_nic_config(struct iwm_softc *sc)
2155 {
2156 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2157 	uint32_t mask, val, reg_val = 0;
2158 
2159 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2160 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2161 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2162 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2163 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2164 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2165 
2166 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2167 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2168 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2169 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2170 
2171 	/* radio configuration */
2172 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2173 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2174 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2175 
2176 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2177 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2178 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2179 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2180 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2181 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2182 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2183 
2184 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2185 	val &= ~mask;
2186 	val |= reg_val;
2187 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2188 
2189 	/*
2190 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2191 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2192 	 * to lose ownership and not being able to obtain it back.
2193 	 */
2194 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2195 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2196 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2197 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2198 }
2199 
2200 int
2201 iwm_nic_rx_init(struct iwm_softc *sc)
2202 {
2203 	if (sc->sc_mqrx_supported)
2204 		return iwm_nic_rx_mq_init(sc);
2205 	else
2206 		return iwm_nic_rx_legacy_init(sc);
2207 }
2208 
2209 int
2210 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2211 {
2212 	int enabled;
2213 
2214 	if (!iwm_nic_lock(sc))
2215 		return EBUSY;
2216 
2217 	/* Stop RX DMA. */
2218 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2219 	/* Disable RX used and free queue operation. */
2220 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2221 
2222 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2223 	    sc->rxq.free_desc_dma.paddr);
2224 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2225 	    sc->rxq.used_desc_dma.paddr);
2226 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2227 	    sc->rxq.stat_dma.paddr);
2228 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2229 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2230 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2231 
2232 	/* We configure only queue 0 for now. */
2233 	enabled = ((1 << 0) << 16) | (1 << 0);
2234 
2235 	/* Enable RX DMA, 4KB buffer size. */
2236 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2237 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2238 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2239 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2240 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2241 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2242 
2243 	/* Enable RX DMA snooping. */
2244 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2245 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2246 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2247 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2248 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2249 
2250 	/* Enable the configured queue(s). */
2251 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2252 
2253 	iwm_nic_unlock(sc);
2254 
2255 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2256 
2257 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2258 
2259 	return 0;
2260 }
2261 
2262 int
2263 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2264 {
2265 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2266 
2267 	iwm_disable_rx_dma(sc);
2268 
2269 	if (!iwm_nic_lock(sc))
2270 		return EBUSY;
2271 
2272 	/* reset and flush pointers */
2273 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2274 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2275 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2276 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2277 
2278 	/* Set physical address of RX ring (256-byte aligned). */
2279 	IWM_WRITE(sc,
2280 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2281 
2282 	/* Set physical address of RX status (16-byte aligned). */
2283 	IWM_WRITE(sc,
2284 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2285 
2286 	/* Enable RX. */
2287 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2288 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2289 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2290 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2291 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2292 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2293 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2294 
2295 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2296 
2297 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2298 	if (sc->host_interrupt_operation_mode)
2299 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2300 
2301 	iwm_nic_unlock(sc);
2302 
2303 	/*
2304 	 * This value should initially be 0 (before preparing any RBs),
2305 	 * and should be 8 after preparing the first 8 RBs (for example).
2306 	 */
2307 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2308 
2309 	return 0;
2310 }
2311 
2312 int
2313 iwm_nic_tx_init(struct iwm_softc *sc)
2314 {
2315 	int qid, err;
2316 
2317 	if (!iwm_nic_lock(sc))
2318 		return EBUSY;
2319 
2320 	/* Deactivate TX scheduler. */
2321 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2322 
2323 	/* Set physical address of "keep warm" page (16-byte aligned). */
2324 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2325 
2326 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2327 		struct iwm_tx_ring *txq = &sc->txq[qid];
2328 
2329 		/* Set physical address of TX ring (256-byte aligned). */
2330 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2331 		    txq->desc_dma.paddr >> 8);
2332 	}
2333 
2334 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2335 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2336 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2337 
2338 	iwm_nic_unlock(sc);
2339 
2340 	return err;
2341 }
2342 
2343 int
2344 iwm_nic_init(struct iwm_softc *sc)
2345 {
2346 	int err;
2347 
2348 	iwm_apm_init(sc);
2349 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2350 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2351 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2352 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2353 
2354 	iwm_nic_config(sc);
2355 
2356 	err = iwm_nic_rx_init(sc);
2357 	if (err)
2358 		return err;
2359 
2360 	err = iwm_nic_tx_init(sc);
2361 	if (err)
2362 		return err;
2363 
2364 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2365 
2366 	return 0;
2367 }
2368 
2369 /* Map a TID to an ieee80211_edca_ac category. */
2370 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2371 	EDCA_AC_BE,
2372 	EDCA_AC_BK,
2373 	EDCA_AC_BK,
2374 	EDCA_AC_BE,
2375 	EDCA_AC_VI,
2376 	EDCA_AC_VI,
2377 	EDCA_AC_VO,
2378 	EDCA_AC_VO,
2379 };
2380 
2381 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2382 const uint8_t iwm_ac_to_tx_fifo[] = {
2383 	IWM_TX_FIFO_BE,
2384 	IWM_TX_FIFO_BK,
2385 	IWM_TX_FIFO_VI,
2386 	IWM_TX_FIFO_VO,
2387 };
2388 
2389 int
2390 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2391 {
2392 	int err;
2393 	iwm_nic_assert_locked(sc);
2394 
2395 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2396 
2397 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2398 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2399 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2400 
2401 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2402 	if (err) {
2403 		return err;
2404 	}
2405 
2406 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2407 
2408 	iwm_write_mem32(sc,
2409 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2410 
2411 	/* Set scheduler window size and frame limit. */
2412 	iwm_write_mem32(sc,
2413 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2414 	    sizeof(uint32_t),
2415 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2416 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2417 	    ((IWM_FRAME_LIMIT
2418 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2419 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2420 
2421 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2422 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2423 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2424 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2425 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2426 
2427 	if (qid == sc->cmdqid)
2428 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2429 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2430 
2431 	return 0;
2432 }
2433 
2434 int
2435 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2436     int aggregate, uint8_t tid, uint16_t ssn)
2437 {
2438 	struct iwm_tx_ring *ring = &sc->txq[qid];
2439 	struct iwm_scd_txq_cfg_cmd cmd;
2440 	int err, idx, scd_bug;
2441 
2442 	iwm_nic_assert_locked(sc);
2443 
2444 	/*
2445 	 * If we need to move the SCD write pointer by steps of
2446 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2447 	 * This is really ugly, but this is the easiest way out for
2448 	 * this sad hardware issue.
2449 	 * This bug has been fixed on devices 9000 and up.
2450 	 */
2451 	scd_bug = !sc->sc_mqrx_supported &&
2452 		!((ssn - ring->cur) & 0x3f) &&
2453 		(ssn != ring->cur);
2454 	if (scd_bug)
2455 		ssn = (ssn + 1) & 0xfff;
2456 
2457 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2458 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2459 	ring->cur = idx;
2460 	ring->tail = idx;
2461 
2462 	memset(&cmd, 0, sizeof(cmd));
2463 	cmd.tid = tid;
2464 	cmd.scd_queue = qid;
2465 	cmd.enable = 1;
2466 	cmd.sta_id = sta_id;
2467 	cmd.tx_fifo = fifo;
2468 	cmd.aggregate = aggregate;
2469 	cmd.ssn = htole16(ssn);
2470 	cmd.window = IWM_FRAME_LIMIT;
2471 
2472 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2473 	    sizeof(cmd), &cmd);
2474 	if (err)
2475 		return err;
2476 
2477 	sc->qenablemsk |= (1 << qid);
2478 	return 0;
2479 }
2480 
2481 int
2482 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2483 {
2484 	struct iwm_scd_txq_cfg_cmd cmd;
2485 	int err;
2486 
2487 	memset(&cmd, 0, sizeof(cmd));
2488 	cmd.tid = tid;
2489 	cmd.scd_queue = qid;
2490 	cmd.enable = 0;
2491 	cmd.sta_id = sta_id;
2492 
2493 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2494 	if (err)
2495 		return err;
2496 
2497 	sc->qenablemsk &= ~(1 << qid);
2498 	return 0;
2499 }
2500 
2501 int
2502 iwm_post_alive(struct iwm_softc *sc)
2503 {
2504 	int nwords;
2505 	int err, chnl;
2506 	uint32_t base;
2507 
2508 	if (!iwm_nic_lock(sc))
2509 		return EBUSY;
2510 
2511 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2512 
2513 	iwm_ict_reset(sc);
2514 
2515 	iwm_nic_unlock(sc);
2516 
2517 	/* Clear TX scheduler state in SRAM. */
2518 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2519 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2520 	    / sizeof(uint32_t);
2521 	err = iwm_write_mem(sc,
2522 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2523 	    NULL, nwords);
2524 	if (err)
2525 		return err;
2526 
2527 	if (!iwm_nic_lock(sc))
2528 		return EBUSY;
2529 
2530 	/* Set physical address of TX scheduler rings (1KB aligned). */
2531 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2532 
2533 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2534 
2535 	/* enable command channel */
2536 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2537 	if (err) {
2538 		iwm_nic_unlock(sc);
2539 		return err;
2540 	}
2541 
2542 	/* Activate TX scheduler. */
2543 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2544 
2545 	/* Enable DMA channels. */
2546 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2547 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2548 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2549 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2550 	}
2551 
2552 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2553 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2554 
2555 	iwm_nic_unlock(sc);
2556 
2557 	/* Enable L1-Active */
2558 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2559 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2560 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2561 	}
2562 
2563 	return err;
2564 }
2565 
2566 struct iwm_phy_db_entry *
2567 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2568 {
2569 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2570 
2571 	if (type >= IWM_PHY_DB_MAX)
2572 		return NULL;
2573 
2574 	switch (type) {
2575 	case IWM_PHY_DB_CFG:
2576 		return &phy_db->cfg;
2577 	case IWM_PHY_DB_CALIB_NCH:
2578 		return &phy_db->calib_nch;
2579 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2580 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2581 			return NULL;
2582 		return &phy_db->calib_ch_group_papd[chg_id];
2583 	case IWM_PHY_DB_CALIB_CHG_TXP:
2584 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2585 			return NULL;
2586 		return &phy_db->calib_ch_group_txp[chg_id];
2587 	default:
2588 		return NULL;
2589 	}
2590 	return NULL;
2591 }
2592 
2593 int
2594 iwm_phy_db_set_section(struct iwm_softc *sc,
2595     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2596 {
2597 	uint16_t type = le16toh(phy_db_notif->type);
2598 	uint16_t size  = le16toh(phy_db_notif->length);
2599 	struct iwm_phy_db_entry *entry;
2600 	uint16_t chg_id = 0;
2601 
2602 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2603 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2604 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2605 
2606 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2607 	if (!entry)
2608 		return EINVAL;
2609 
2610 	if (entry->data)
2611 		free(entry->data, M_DEVBUF, entry->size);
2612 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2613 	if (!entry->data) {
2614 		entry->size = 0;
2615 		return ENOMEM;
2616 	}
2617 	memcpy(entry->data, phy_db_notif->data, size);
2618 	entry->size = size;
2619 
2620 	return 0;
2621 }
2622 
2623 int
2624 iwm_is_valid_channel(uint16_t ch_id)
2625 {
2626 	if (ch_id <= 14 ||
2627 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2628 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2629 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2630 		return 1;
2631 	return 0;
2632 }
2633 
2634 uint8_t
2635 iwm_ch_id_to_ch_index(uint16_t ch_id)
2636 {
2637 	if (!iwm_is_valid_channel(ch_id))
2638 		return 0xff;
2639 
2640 	if (ch_id <= 14)
2641 		return ch_id - 1;
2642 	if (ch_id <= 64)
2643 		return (ch_id + 20) / 4;
2644 	if (ch_id <= 140)
2645 		return (ch_id - 12) / 4;
2646 	return (ch_id - 13) / 4;
2647 }
2648 
2649 
2650 uint16_t
2651 iwm_channel_id_to_papd(uint16_t ch_id)
2652 {
2653 	if (!iwm_is_valid_channel(ch_id))
2654 		return 0xff;
2655 
2656 	if (1 <= ch_id && ch_id <= 14)
2657 		return 0;
2658 	if (36 <= ch_id && ch_id <= 64)
2659 		return 1;
2660 	if (100 <= ch_id && ch_id <= 140)
2661 		return 2;
2662 	return 3;
2663 }
2664 
2665 uint16_t
2666 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2667 {
2668 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2669 	struct iwm_phy_db_chg_txp *txp_chg;
2670 	int i;
2671 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2672 
2673 	if (ch_index == 0xff)
2674 		return 0xff;
2675 
2676 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2677 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2678 		if (!txp_chg)
2679 			return 0xff;
2680 		/*
2681 		 * Looking for the first channel group the max channel
2682 		 * of which is higher than the requested channel.
2683 		 */
2684 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2685 			return i;
2686 	}
2687 	return 0xff;
2688 }
2689 
2690 int
2691 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2692     uint16_t *size, uint16_t ch_id)
2693 {
2694 	struct iwm_phy_db_entry *entry;
2695 	uint16_t ch_group_id = 0;
2696 
2697 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2698 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2699 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2700 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2701 
2702 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2703 	if (!entry)
2704 		return EINVAL;
2705 
2706 	*data = entry->data;
2707 	*size = entry->size;
2708 
2709 	return 0;
2710 }
2711 
2712 int
2713 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2714     void *data)
2715 {
2716 	struct iwm_phy_db_cmd phy_db_cmd;
2717 	struct iwm_host_cmd cmd = {
2718 		.id = IWM_PHY_DB_CMD,
2719 		.flags = IWM_CMD_ASYNC,
2720 	};
2721 
2722 	phy_db_cmd.type = le16toh(type);
2723 	phy_db_cmd.length = le16toh(length);
2724 
2725 	cmd.data[0] = &phy_db_cmd;
2726 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2727 	cmd.data[1] = data;
2728 	cmd.len[1] = length;
2729 
2730 	return iwm_send_cmd(sc, &cmd);
2731 }
2732 
2733 int
2734 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2735     uint8_t max_ch_groups)
2736 {
2737 	uint16_t i;
2738 	int err;
2739 	struct iwm_phy_db_entry *entry;
2740 
2741 	for (i = 0; i < max_ch_groups; i++) {
2742 		entry = iwm_phy_db_get_section(sc, type, i);
2743 		if (!entry)
2744 			return EINVAL;
2745 
2746 		if (!entry->size)
2747 			continue;
2748 
2749 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2750 		if (err)
2751 			return err;
2752 
2753 		DELAY(1000);
2754 	}
2755 
2756 	return 0;
2757 }
2758 
2759 int
2760 iwm_send_phy_db_data(struct iwm_softc *sc)
2761 {
2762 	uint8_t *data = NULL;
2763 	uint16_t size = 0;
2764 	int err;
2765 
2766 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2767 	if (err)
2768 		return err;
2769 
2770 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2771 	if (err)
2772 		return err;
2773 
2774 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2775 	    &data, &size, 0);
2776 	if (err)
2777 		return err;
2778 
2779 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2780 	if (err)
2781 		return err;
2782 
2783 	err = iwm_phy_db_send_all_channel_groups(sc,
2784 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2785 	if (err)
2786 		return err;
2787 
2788 	err = iwm_phy_db_send_all_channel_groups(sc,
2789 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2790 	if (err)
2791 		return err;
2792 
2793 	return 0;
2794 }
2795 
2796 /*
2797  * For the high priority TE use a time event type that has similar priority to
2798  * the FW's action scan priority.
2799  */
2800 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2801 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2802 
2803 int
2804 iwm_send_time_event_cmd(struct iwm_softc *sc,
2805     const struct iwm_time_event_cmd *cmd)
2806 {
2807 	struct iwm_rx_packet *pkt;
2808 	struct iwm_time_event_resp *resp;
2809 	struct iwm_host_cmd hcmd = {
2810 		.id = IWM_TIME_EVENT_CMD,
2811 		.flags = IWM_CMD_WANT_RESP,
2812 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2813 	};
2814 	uint32_t resp_len;
2815 	int err;
2816 
2817 	hcmd.data[0] = cmd;
2818 	hcmd.len[0] = sizeof(*cmd);
2819 	err = iwm_send_cmd(sc, &hcmd);
2820 	if (err)
2821 		return err;
2822 
2823 	pkt = hcmd.resp_pkt;
2824 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2825 		err = EIO;
2826 		goto out;
2827 	}
2828 
2829 	resp_len = iwm_rx_packet_payload_len(pkt);
2830 	if (resp_len != sizeof(*resp)) {
2831 		err = EIO;
2832 		goto out;
2833 	}
2834 
2835 	resp = (void *)pkt->data;
2836 	if (le32toh(resp->status) == 0)
2837 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2838 	else
2839 		err = EIO;
2840 out:
2841 	iwm_free_resp(sc, &hcmd);
2842 	return err;
2843 }
2844 
2845 void
2846 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2847     uint32_t duration, uint32_t max_delay)
2848 {
2849 	struct iwm_time_event_cmd time_cmd;
2850 
2851 	/* Do nothing if a time event is already scheduled. */
2852 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2853 		return;
2854 
2855 	memset(&time_cmd, 0, sizeof(time_cmd));
2856 
2857 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2858 	time_cmd.id_and_color =
2859 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2860 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2861 
2862 	time_cmd.apply_time = htole32(0);
2863 
2864 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2865 	time_cmd.max_delay = htole32(max_delay);
2866 	/* TODO: why do we need to interval = bi if it is not periodic? */
2867 	time_cmd.interval = htole32(1);
2868 	time_cmd.duration = htole32(duration);
2869 	time_cmd.repeat = 1;
2870 	time_cmd.policy
2871 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2872 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2873 		IWM_T2_V2_START_IMMEDIATELY);
2874 
2875 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2876 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2877 
2878 	DELAY(100);
2879 }
2880 
2881 void
2882 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2883 {
2884 	struct iwm_time_event_cmd time_cmd;
2885 
2886 	/* Do nothing if the time event has already ended. */
2887 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2888 		return;
2889 
2890 	memset(&time_cmd, 0, sizeof(time_cmd));
2891 
2892 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2893 	time_cmd.id_and_color =
2894 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2895 	time_cmd.id = htole32(sc->sc_time_event_uid);
2896 
2897 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2898 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2899 
2900 	DELAY(100);
2901 }
2902 
2903 /*
2904  * NVM read access and content parsing.  We do not support
2905  * external NVM or writing NVM.
2906  */
2907 
2908 /* list of NVM sections we are allowed/need to read */
2909 const int iwm_nvm_to_read[] = {
2910 	IWM_NVM_SECTION_TYPE_HW,
2911 	IWM_NVM_SECTION_TYPE_SW,
2912 	IWM_NVM_SECTION_TYPE_REGULATORY,
2913 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2914 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2915 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2916 	IWM_NVM_SECTION_TYPE_HW_8000,
2917 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2918 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2919 };
2920 
2921 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2922 
2923 #define IWM_NVM_WRITE_OPCODE 1
2924 #define IWM_NVM_READ_OPCODE 0
2925 
2926 int
2927 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2928     uint16_t length, uint8_t *data, uint16_t *len)
2929 {
2930 	offset = 0;
2931 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2932 		.offset = htole16(offset),
2933 		.length = htole16(length),
2934 		.type = htole16(section),
2935 		.op_code = IWM_NVM_READ_OPCODE,
2936 	};
2937 	struct iwm_nvm_access_resp *nvm_resp;
2938 	struct iwm_rx_packet *pkt;
2939 	struct iwm_host_cmd cmd = {
2940 		.id = IWM_NVM_ACCESS_CMD,
2941 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2942 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2943 		.data = { &nvm_access_cmd, },
2944 	};
2945 	int err, offset_read;
2946 	size_t bytes_read;
2947 	uint8_t *resp_data;
2948 
2949 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2950 
2951 	err = iwm_send_cmd(sc, &cmd);
2952 	if (err)
2953 		return err;
2954 
2955 	pkt = cmd.resp_pkt;
2956 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2957 		err = EIO;
2958 		goto exit;
2959 	}
2960 
2961 	/* Extract NVM response */
2962 	nvm_resp = (void *)pkt->data;
2963 	if (nvm_resp == NULL)
2964 		return EIO;
2965 
2966 	err = le16toh(nvm_resp->status);
2967 	bytes_read = le16toh(nvm_resp->length);
2968 	offset_read = le16toh(nvm_resp->offset);
2969 	resp_data = nvm_resp->data;
2970 	if (err) {
2971 		err = EINVAL;
2972 		goto exit;
2973 	}
2974 
2975 	if (offset_read != offset) {
2976 		err = EINVAL;
2977 		goto exit;
2978 	}
2979 
2980 	if (bytes_read > length) {
2981 		err = EINVAL;
2982 		goto exit;
2983 	}
2984 
2985 	memcpy(data + offset, resp_data, bytes_read);
2986 	*len = bytes_read;
2987 
2988  exit:
2989 	iwm_free_resp(sc, &cmd);
2990 	return err;
2991 }
2992 
2993 /*
2994  * Reads an NVM section completely.
2995  * NICs prior to 7000 family doesn't have a real NVM, but just read
2996  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2997  * by uCode, we need to manually check in this case that we don't
2998  * overflow and try to read more than the EEPROM size.
2999  */
3000 int
3001 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
3002     uint16_t *len, size_t max_len)
3003 {
3004 	uint16_t chunklen, seglen;
3005 	int err = 0;
3006 
3007 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
3008 	*len = 0;
3009 
3010 	/* Read NVM chunks until exhausted (reading less than requested) */
3011 	while (seglen == chunklen && *len < max_len) {
3012 		err = iwm_nvm_read_chunk(sc,
3013 		    section, *len, chunklen, data, &seglen);
3014 		if (err)
3015 			return err;
3016 
3017 		*len += seglen;
3018 	}
3019 
3020 	return err;
3021 }
3022 
3023 uint8_t
3024 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3025 {
3026 	uint8_t tx_ant;
3027 
3028 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3029 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
3030 
3031 	if (sc->sc_nvm.valid_tx_ant)
3032 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3033 
3034 	return tx_ant;
3035 }
3036 
3037 uint8_t
3038 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3039 {
3040 	uint8_t rx_ant;
3041 
3042 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3043 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
3044 
3045 	if (sc->sc_nvm.valid_rx_ant)
3046 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3047 
3048 	return rx_ant;
3049 }
3050 
3051 int
3052 iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc)
3053 {
3054 	uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc);
3055 
3056 	/*
3057 	 * According to the Linux driver, antenna B should be preferred
3058 	 * on 9k devices since it is not shared with bluetooth. However,
3059 	 * there are 9k devices which do not support antenna B at all.
3060 	 */
3061 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
3062 	    (valid_tx_ant & IWM_ANT_B))
3063 		return IWM_RATE_MCS_ANT_B_MSK;
3064 
3065 	return IWM_RATE_MCS_ANT_A_MSK;
3066 }
3067 
3068 void
3069 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3070     const uint8_t *nvm_channels, int nchan)
3071 {
3072 	struct ieee80211com *ic = &sc->sc_ic;
3073 	struct iwm_nvm_data *data = &sc->sc_nvm;
3074 	int ch_idx;
3075 	struct ieee80211_channel *channel;
3076 	uint16_t ch_flags;
3077 	int is_5ghz;
3078 	int flags, hw_value;
3079 
3080 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3081 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
3082 
3083 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
3084 		    !data->sku_cap_band_52GHz_enable)
3085 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
3086 
3087 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
3088 			continue;
3089 
3090 		hw_value = nvm_channels[ch_idx];
3091 		channel = &ic->ic_channels[hw_value];
3092 
3093 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
3094 		if (!is_5ghz) {
3095 			flags = IEEE80211_CHAN_2GHZ;
3096 			channel->ic_flags
3097 			    = IEEE80211_CHAN_CCK
3098 			    | IEEE80211_CHAN_OFDM
3099 			    | IEEE80211_CHAN_DYN
3100 			    | IEEE80211_CHAN_2GHZ;
3101 		} else {
3102 			flags = IEEE80211_CHAN_5GHZ;
3103 			channel->ic_flags =
3104 			    IEEE80211_CHAN_A;
3105 		}
3106 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3107 
3108 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
3109 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3110 
3111 		if (data->sku_cap_11n_enable) {
3112 			channel->ic_flags |= IEEE80211_CHAN_HT;
3113 			if (ch_flags & IWM_NVM_CHANNEL_40MHZ)
3114 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3115 		}
3116 
3117 		if (is_5ghz && data->sku_cap_11ac_enable) {
3118 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3119 			if (ch_flags & IWM_NVM_CHANNEL_80MHZ)
3120 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3121 		}
3122 	}
3123 }
3124 
3125 int
3126 iwm_mimo_enabled(struct iwm_softc *sc)
3127 {
3128 	struct ieee80211com *ic = &sc->sc_ic;
3129 
3130 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3131 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3132 }
3133 
3134 void
3135 iwm_setup_ht_rates(struct iwm_softc *sc)
3136 {
3137 	struct ieee80211com *ic = &sc->sc_ic;
3138 	uint8_t rx_ant;
3139 
3140 	/* TX is supported with the same MCS as RX. */
3141 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3142 
3143 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3144 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3145 
3146 	if (!iwm_mimo_enabled(sc))
3147 		return;
3148 
3149 	rx_ant = iwm_fw_valid_rx_ant(sc);
3150 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3151 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
3152 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3153 }
3154 
3155 void
3156 iwm_setup_vht_rates(struct iwm_softc *sc)
3157 {
3158 	struct ieee80211com *ic = &sc->sc_ic;
3159 	uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3160 	int n;
3161 
3162 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3163 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3164 
3165 	if (iwm_mimo_enabled(sc) &&
3166 	    ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3167 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)) {
3168 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3169 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3170 	} else {
3171 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3172 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3173 	}
3174 
3175 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3176 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3177 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3178 	}
3179 
3180 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3181 }
3182 
3183 void
3184 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3185     uint16_t ssn, uint16_t buf_size)
3186 {
3187 	reorder_buf->head_sn = ssn;
3188 	reorder_buf->num_stored = 0;
3189 	reorder_buf->buf_size = buf_size;
3190 	reorder_buf->last_amsdu = 0;
3191 	reorder_buf->last_sub_index = 0;
3192 	reorder_buf->removed = 0;
3193 	reorder_buf->valid = 0;
3194 	reorder_buf->consec_oldsn_drops = 0;
3195 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3196 	reorder_buf->consec_oldsn_prev_drop = 0;
3197 }
3198 
3199 void
3200 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3201 {
3202 	int i;
3203 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3204 	struct iwm_reorder_buf_entry *entry;
3205 
3206 	for (i = 0; i < reorder_buf->buf_size; i++) {
3207 		entry = &rxba->entries[i];
3208 		ml_purge(&entry->frames);
3209 		timerclear(&entry->reorder_time);
3210 	}
3211 
3212 	reorder_buf->removed = 1;
3213 	timeout_del(&reorder_buf->reorder_timer);
3214 	timerclear(&rxba->last_rx);
3215 	timeout_del(&rxba->session_timer);
3216 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3217 }
3218 
3219 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3220 
3221 void
3222 iwm_rx_ba_session_expired(void *arg)
3223 {
3224 	struct iwm_rxba_data *rxba = arg;
3225 	struct iwm_softc *sc = rxba->sc;
3226 	struct ieee80211com *ic = &sc->sc_ic;
3227 	struct ieee80211_node *ni = ic->ic_bss;
3228 	struct timeval now, timeout, expiry;
3229 	int s;
3230 
3231 	s = splnet();
3232 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3233 	    ic->ic_state == IEEE80211_S_RUN &&
3234 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3235 		getmicrouptime(&now);
3236 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3237 		timeradd(&rxba->last_rx, &timeout, &expiry);
3238 		if (timercmp(&now, &expiry, <)) {
3239 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3240 		} else {
3241 			ic->ic_stats.is_ht_rx_ba_timeout++;
3242 			ieee80211_delba_request(ic, ni,
3243 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3244 		}
3245 	}
3246 	splx(s);
3247 }
3248 
3249 void
3250 iwm_reorder_timer_expired(void *arg)
3251 {
3252 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3253 	struct iwm_reorder_buffer *buf = arg;
3254 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3255 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3256 	struct iwm_softc *sc = rxba->sc;
3257 	struct ieee80211com *ic = &sc->sc_ic;
3258 	struct ieee80211_node *ni = ic->ic_bss;
3259 	int i, s;
3260 	uint16_t sn = 0, index = 0;
3261 	int expired = 0;
3262 	int cont = 0;
3263 	struct timeval now, timeout, expiry;
3264 
3265 	if (!buf->num_stored || buf->removed)
3266 		return;
3267 
3268 	s = splnet();
3269 	getmicrouptime(&now);
3270 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3271 
3272 	for (i = 0; i < buf->buf_size ; i++) {
3273 		index = (buf->head_sn + i) % buf->buf_size;
3274 
3275 		if (ml_empty(&entries[index].frames)) {
3276 			/*
3277 			 * If there is a hole and the next frame didn't expire
3278 			 * we want to break and not advance SN.
3279 			 */
3280 			cont = 0;
3281 			continue;
3282 		}
3283 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3284 		if (!cont && timercmp(&now, &expiry, <))
3285 			break;
3286 
3287 		expired = 1;
3288 		/* continue until next hole after this expired frame */
3289 		cont = 1;
3290 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3291 	}
3292 
3293 	if (expired) {
3294 		/* SN is set to the last expired frame + 1 */
3295 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3296 		if_input(&sc->sc_ic.ic_if, &ml);
3297 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3298 	} else {
3299 		/*
3300 		 * If no frame expired and there are stored frames, index is now
3301 		 * pointing to the first unexpired frame - modify reorder timeout
3302 		 * accordingly.
3303 		 */
3304 		timeout_add_usec(&buf->reorder_timer,
3305 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3306 	}
3307 
3308 	splx(s);
3309 }
3310 
3311 #define IWM_MAX_RX_BA_SESSIONS 16
3312 
3313 int
3314 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3315     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3316 {
3317 	struct ieee80211com *ic = &sc->sc_ic;
3318 	struct iwm_add_sta_cmd cmd;
3319 	struct iwm_node *in = (void *)ni;
3320 	int err, s;
3321 	uint32_t status;
3322 	size_t cmdsize;
3323 	struct iwm_rxba_data *rxba = NULL;
3324 	uint8_t baid = 0;
3325 
3326 	s = splnet();
3327 
3328 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3329 		ieee80211_addba_req_refuse(ic, ni, tid);
3330 		splx(s);
3331 		return 0;
3332 	}
3333 
3334 	memset(&cmd, 0, sizeof(cmd));
3335 
3336 	cmd.sta_id = IWM_STATION_ID;
3337 	cmd.mac_id_n_color
3338 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3339 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3340 
3341 	if (start) {
3342 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3343 		cmd.add_immediate_ba_ssn = ssn;
3344 		cmd.rx_ba_window = winsize;
3345 	} else {
3346 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3347 	}
3348 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3349 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3350 
3351 	status = IWM_ADD_STA_SUCCESS;
3352 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3353 		cmdsize = sizeof(cmd);
3354 	else
3355 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3356 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3357 	    &status);
3358 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3359 		err = EIO;
3360 	if (err) {
3361 		if (start)
3362 			ieee80211_addba_req_refuse(ic, ni, tid);
3363 		splx(s);
3364 		return err;
3365 	}
3366 
3367 	if (sc->sc_mqrx_supported) {
3368 		/* Deaggregation is done in hardware. */
3369 		if (start) {
3370 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3371 				ieee80211_addba_req_refuse(ic, ni, tid);
3372 				splx(s);
3373 				return EIO;
3374 			}
3375 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3376 			    IWM_ADD_STA_BAID_SHIFT;
3377 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3378 			    baid >= nitems(sc->sc_rxba_data)) {
3379 				ieee80211_addba_req_refuse(ic, ni, tid);
3380 				splx(s);
3381 				return EIO;
3382 			}
3383 			rxba = &sc->sc_rxba_data[baid];
3384 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3385 				ieee80211_addba_req_refuse(ic, ni, tid);
3386 				splx(s);
3387 				return 0;
3388 			}
3389 			rxba->sta_id = IWM_STATION_ID;
3390 			rxba->tid = tid;
3391 			rxba->baid = baid;
3392 			rxba->timeout = timeout_val;
3393 			getmicrouptime(&rxba->last_rx);
3394 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3395 			    winsize);
3396 			if (timeout_val != 0) {
3397 				struct ieee80211_rx_ba *ba;
3398 				timeout_add_usec(&rxba->session_timer,
3399 				    timeout_val);
3400 				/* XXX disable net80211's BA timeout handler */
3401 				ba = &ni->ni_rx_ba[tid];
3402 				ba->ba_timeout_val = 0;
3403 			}
3404 		} else {
3405 			int i;
3406 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3407 				rxba = &sc->sc_rxba_data[i];
3408 				if (rxba->baid ==
3409 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3410 					continue;
3411 				if (rxba->tid != tid)
3412 					continue;
3413 				iwm_clear_reorder_buffer(sc, rxba);
3414 				break;
3415 			}
3416 		}
3417 	}
3418 
3419 	if (start) {
3420 		sc->sc_rx_ba_sessions++;
3421 		ieee80211_addba_req_accept(ic, ni, tid);
3422 	} else if (sc->sc_rx_ba_sessions > 0)
3423 		sc->sc_rx_ba_sessions--;
3424 
3425 	splx(s);
3426 	return 0;
3427 }
3428 
3429 void
3430 iwm_mac_ctxt_task(void *arg)
3431 {
3432 	struct iwm_softc *sc = arg;
3433 	struct ieee80211com *ic = &sc->sc_ic;
3434 	struct iwm_node *in = (void *)ic->ic_bss;
3435 	int err, s = splnet();
3436 
3437 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3438 	    ic->ic_state != IEEE80211_S_RUN) {
3439 		refcnt_rele_wake(&sc->task_refs);
3440 		splx(s);
3441 		return;
3442 	}
3443 
3444 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3445 	if (err)
3446 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3447 
3448 	iwm_unprotect_session(sc, in);
3449 
3450 	refcnt_rele_wake(&sc->task_refs);
3451 	splx(s);
3452 }
3453 
3454 void
3455 iwm_updateprot(struct ieee80211com *ic)
3456 {
3457 	struct iwm_softc *sc = ic->ic_softc;
3458 
3459 	if (ic->ic_state == IEEE80211_S_RUN &&
3460 	    !task_pending(&sc->newstate_task))
3461 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3462 }
3463 
3464 void
3465 iwm_updateslot(struct ieee80211com *ic)
3466 {
3467 	struct iwm_softc *sc = ic->ic_softc;
3468 
3469 	if (ic->ic_state == IEEE80211_S_RUN &&
3470 	    !task_pending(&sc->newstate_task))
3471 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3472 }
3473 
3474 void
3475 iwm_updateedca(struct ieee80211com *ic)
3476 {
3477 	struct iwm_softc *sc = ic->ic_softc;
3478 
3479 	if (ic->ic_state == IEEE80211_S_RUN &&
3480 	    !task_pending(&sc->newstate_task))
3481 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3482 }
3483 
3484 void
3485 iwm_phy_ctxt_task(void *arg)
3486 {
3487 	struct iwm_softc *sc = arg;
3488 	struct ieee80211com *ic = &sc->sc_ic;
3489 	struct iwm_node *in = (void *)ic->ic_bss;
3490 	struct ieee80211_node *ni = &in->in_ni;
3491 	uint8_t chains, sco, vht_chan_width;
3492 	int err, s = splnet();
3493 
3494 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3495 	    ic->ic_state != IEEE80211_S_RUN ||
3496 	    in->in_phyctxt == NULL) {
3497 		refcnt_rele_wake(&sc->task_refs);
3498 		splx(s);
3499 		return;
3500 	}
3501 
3502 	chains = iwm_mimo_enabled(sc) ? 2 : 1;
3503 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3504 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3505 	    ieee80211_node_supports_ht_chan40(ni))
3506 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3507 	else
3508 		sco = IEEE80211_HTOP0_SCO_SCN;
3509 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3510 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3511 	    ieee80211_node_supports_vht_chan80(ni))
3512 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3513 	else
3514 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3515 	if (in->in_phyctxt->sco != sco ||
3516 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3517 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3518 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3519 		    vht_chan_width);
3520 		if (err)
3521 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3522 		iwm_setrates(in, 0);
3523 	}
3524 
3525 	refcnt_rele_wake(&sc->task_refs);
3526 	splx(s);
3527 }
3528 
3529 void
3530 iwm_updatechan(struct ieee80211com *ic)
3531 {
3532 	struct iwm_softc *sc = ic->ic_softc;
3533 
3534 	if (ic->ic_state == IEEE80211_S_RUN &&
3535 	    !task_pending(&sc->newstate_task))
3536 		iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3537 }
3538 
3539 void
3540 iwm_updatedtim(struct ieee80211com *ic)
3541 {
3542 	struct iwm_softc *sc = ic->ic_softc;
3543 
3544 	if (ic->ic_state == IEEE80211_S_RUN &&
3545 	    !task_pending(&sc->newstate_task))
3546 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3547 }
3548 
3549 int
3550 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3551     uint16_t ssn, uint16_t winsize, int start)
3552 {
3553 	struct iwm_add_sta_cmd cmd;
3554 	struct ieee80211com *ic = &sc->sc_ic;
3555 	struct iwm_node *in = (void *)ni;
3556 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3557 	struct iwm_tx_ring *ring;
3558 	enum ieee80211_edca_ac ac;
3559 	int fifo;
3560 	uint32_t status;
3561 	int err;
3562 	size_t cmdsize;
3563 
3564 	/* Ensure we can map this TID to an aggregation queue. */
3565 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3566 		return ENOSPC;
3567 
3568 	if (start) {
3569 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3570 			return 0;
3571 	} else {
3572 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3573 			return 0;
3574 	}
3575 
3576 	ring = &sc->txq[qid];
3577 	ac = iwm_tid_to_ac[tid];
3578 	fifo = iwm_ac_to_tx_fifo[ac];
3579 
3580 	memset(&cmd, 0, sizeof(cmd));
3581 
3582 	cmd.sta_id = IWM_STATION_ID;
3583 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3584 	    in->in_color));
3585 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3586 
3587 	if (start) {
3588 		/* Enable Tx aggregation for this queue. */
3589 		in->tid_disable_ampdu &= ~(1 << tid);
3590 		in->tfd_queue_msk |= (1 << qid);
3591 	} else {
3592 		in->tid_disable_ampdu |= (1 << tid);
3593 		/*
3594 		 * Queue remains enabled in the TFD queue mask
3595 		 * until we leave RUN state.
3596 		 */
3597 		err = iwm_flush_sta(sc, in);
3598 		if (err)
3599 			return err;
3600 	}
3601 
3602 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3603 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3604 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3605 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3606 
3607 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3608 		if (!iwm_nic_lock(sc)) {
3609 			if (start)
3610 				ieee80211_addba_resp_refuse(ic, ni, tid,
3611 				    IEEE80211_STATUS_UNSPECIFIED);
3612 			return EBUSY;
3613 		}
3614 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3615 		    ssn);
3616 		iwm_nic_unlock(sc);
3617 		if (err) {
3618 			printf("%s: could not enable Tx queue %d (error %d)\n",
3619 			    DEVNAME(sc), qid, err);
3620 			if (start)
3621 				ieee80211_addba_resp_refuse(ic, ni, tid,
3622 				    IEEE80211_STATUS_UNSPECIFIED);
3623 			return err;
3624 		}
3625 		/*
3626 		 * If iwm_enable_txq() employed the SCD hardware bug
3627 		 * workaround we must skip the frame with seqnum SSN.
3628 		 */
3629 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3630 			ssn = (ssn + 1) & 0xfff;
3631 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3632 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3633 			ni->ni_qos_txseqs[tid] = ssn;
3634 		}
3635 	}
3636 
3637 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3638 		cmdsize = sizeof(cmd);
3639 	else
3640 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3641 
3642 	status = 0;
3643 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3644 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3645 		err = EIO;
3646 	if (err) {
3647 		printf("%s: could not update sta (error %d)\n",
3648 		    DEVNAME(sc), err);
3649 		if (start)
3650 			ieee80211_addba_resp_refuse(ic, ni, tid,
3651 			    IEEE80211_STATUS_UNSPECIFIED);
3652 		return err;
3653 	}
3654 
3655 	if (start) {
3656 		sc->tx_ba_queue_mask |= (1 << qid);
3657 		ieee80211_addba_resp_accept(ic, ni, tid);
3658 	} else {
3659 		sc->tx_ba_queue_mask &= ~(1 << qid);
3660 
3661 		/*
3662 		 * Clear pending frames but keep the queue enabled.
3663 		 * Firmware panics if we disable the queue here.
3664 		 */
3665 		iwm_txq_advance(sc, ring, ring->cur);
3666 		iwm_clear_oactive(sc, ring);
3667 	}
3668 
3669 	return 0;
3670 }
3671 
3672 void
3673 iwm_ba_task(void *arg)
3674 {
3675 	struct iwm_softc *sc = arg;
3676 	struct ieee80211com *ic = &sc->sc_ic;
3677 	struct ieee80211_node *ni = ic->ic_bss;
3678 	int s = splnet();
3679 	int tid, err = 0;
3680 
3681 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3682 	    ic->ic_state != IEEE80211_S_RUN) {
3683 		refcnt_rele_wake(&sc->task_refs);
3684 		splx(s);
3685 		return;
3686 	}
3687 
3688 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3689 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3690 			break;
3691 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3692 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3693 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3694 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3695 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3696 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3697 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3698 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3699 		}
3700 	}
3701 
3702 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3703 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3704 			break;
3705 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3706 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3707 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3708 			    ba->ba_winsize, 1);
3709 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3710 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3711 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3712 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3713 		}
3714 	}
3715 
3716 	/*
3717 	 * We "recover" from failure to start or stop a BA session
3718 	 * by resetting the device.
3719 	 */
3720 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3721 		task_add(systq, &sc->init_task);
3722 
3723 	refcnt_rele_wake(&sc->task_refs);
3724 	splx(s);
3725 }
3726 
3727 /*
3728  * This function is called by upper layer when an ADDBA request is received
3729  * from another STA and before the ADDBA response is sent.
3730  */
3731 int
3732 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3733     uint8_t tid)
3734 {
3735 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3736 
3737 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3738 	    tid > IWM_MAX_TID_COUNT)
3739 		return ENOSPC;
3740 
3741 	if (sc->ba_rx.start_tidmask & (1 << tid))
3742 		return EBUSY;
3743 
3744 	sc->ba_rx.start_tidmask |= (1 << tid);
3745 	iwm_add_task(sc, systq, &sc->ba_task);
3746 
3747 	return EBUSY;
3748 }
3749 
3750 /*
3751  * This function is called by upper layer on teardown of an HT-immediate
3752  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3753  */
3754 void
3755 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3756     uint8_t tid)
3757 {
3758 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3759 
3760 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3761 		return;
3762 
3763 	sc->ba_rx.stop_tidmask |= (1 << tid);
3764 	iwm_add_task(sc, systq, &sc->ba_task);
3765 }
3766 
3767 int
3768 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3769     uint8_t tid)
3770 {
3771 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3772 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3773 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3774 
3775 	/* We only implement Tx aggregation with DQA-capable firmware. */
3776 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3777 		return ENOTSUP;
3778 
3779 	/* Ensure we can map this TID to an aggregation queue. */
3780 	if (tid >= IWM_MAX_TID_COUNT)
3781 		return EINVAL;
3782 
3783 	/* We only support a fixed Tx aggregation window size, for now. */
3784 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3785 		return ENOTSUP;
3786 
3787 	/* Is firmware already using Tx aggregation on this queue? */
3788 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3789 		return ENOSPC;
3790 
3791 	/* Are we already processing an ADDBA request? */
3792 	if (sc->ba_tx.start_tidmask & (1 << tid))
3793 		return EBUSY;
3794 
3795 	sc->ba_tx.start_tidmask |= (1 << tid);
3796 	iwm_add_task(sc, systq, &sc->ba_task);
3797 
3798 	return EBUSY;
3799 }
3800 
3801 void
3802 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3803     uint8_t tid)
3804 {
3805 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3806 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3807 
3808 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3809 		return;
3810 
3811 	/* Is firmware currently using Tx aggregation on this queue? */
3812 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3813 		return;
3814 
3815 	sc->ba_tx.stop_tidmask |= (1 << tid);
3816 	iwm_add_task(sc, systq, &sc->ba_task);
3817 }
3818 
3819 void
3820 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3821     const uint16_t *mac_override, const uint16_t *nvm_hw)
3822 {
3823 	const uint8_t *hw_addr;
3824 
3825 	if (mac_override) {
3826 		static const uint8_t reserved_mac[] = {
3827 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3828 		};
3829 
3830 		hw_addr = (const uint8_t *)(mac_override +
3831 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3832 
3833 		/*
3834 		 * Store the MAC address from MAO section.
3835 		 * No byte swapping is required in MAO section
3836 		 */
3837 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3838 
3839 		/*
3840 		 * Force the use of the OTP MAC address in case of reserved MAC
3841 		 * address in the NVM, or if address is given but invalid.
3842 		 */
3843 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3844 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3845 		    sizeof(etherbroadcastaddr)) != 0) &&
3846 		    (memcmp(etheranyaddr, data->hw_addr,
3847 		    sizeof(etheranyaddr)) != 0) &&
3848 		    !ETHER_IS_MULTICAST(data->hw_addr))
3849 			return;
3850 	}
3851 
3852 	if (nvm_hw) {
3853 		/* Read the mac address from WFMP registers. */
3854 		uint32_t mac_addr0, mac_addr1;
3855 
3856 		if (!iwm_nic_lock(sc))
3857 			goto out;
3858 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3859 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3860 		iwm_nic_unlock(sc);
3861 
3862 		hw_addr = (const uint8_t *)&mac_addr0;
3863 		data->hw_addr[0] = hw_addr[3];
3864 		data->hw_addr[1] = hw_addr[2];
3865 		data->hw_addr[2] = hw_addr[1];
3866 		data->hw_addr[3] = hw_addr[0];
3867 
3868 		hw_addr = (const uint8_t *)&mac_addr1;
3869 		data->hw_addr[4] = hw_addr[1];
3870 		data->hw_addr[5] = hw_addr[0];
3871 
3872 		return;
3873 	}
3874 out:
3875 	printf("%s: mac address not found\n", DEVNAME(sc));
3876 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3877 }
3878 
3879 int
3880 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3881     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3882     const uint16_t *mac_override, const uint16_t *phy_sku,
3883     const uint16_t *regulatory, int n_regulatory)
3884 {
3885 	struct iwm_nvm_data *data = &sc->sc_nvm;
3886 	uint8_t hw_addr[ETHER_ADDR_LEN];
3887 	uint32_t sku;
3888 	uint16_t lar_config;
3889 
3890 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3891 
3892 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3893 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3894 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3895 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3896 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3897 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3898 
3899 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3900 	} else {
3901 		uint32_t radio_cfg =
3902 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3903 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3904 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3905 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3906 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3907 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3908 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3909 
3910 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3911 	}
3912 
3913 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3914 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3915 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3916 	data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE;
3917 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3918 
3919 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3920 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3921 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3922 				       IWM_NVM_LAR_OFFSET_8000;
3923 
3924 		lar_config = le16_to_cpup(regulatory + lar_offset);
3925 		data->lar_enabled = !!(lar_config &
3926 				       IWM_NVM_LAR_ENABLED_8000);
3927 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3928 	} else
3929 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3930 
3931 
3932 	/* The byte order is little endian 16 bit, meaning 214365 */
3933 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3934 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3935 		data->hw_addr[0] = hw_addr[1];
3936 		data->hw_addr[1] = hw_addr[0];
3937 		data->hw_addr[2] = hw_addr[3];
3938 		data->hw_addr[3] = hw_addr[2];
3939 		data->hw_addr[4] = hw_addr[5];
3940 		data->hw_addr[5] = hw_addr[4];
3941 	} else
3942 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3943 
3944 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3945 		if (sc->nvm_type == IWM_NVM_SDP) {
3946 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3947 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3948 		} else {
3949 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3950 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3951 		}
3952 	} else
3953 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3954 		    iwm_nvm_channels_8000,
3955 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3956 
3957 	data->calib_version = 255;   /* TODO:
3958 					this value will prevent some checks from
3959 					failing, we need to check if this
3960 					field is still needed, and if it does,
3961 					where is it in the NVM */
3962 
3963 	return 0;
3964 }
3965 
3966 int
3967 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3968 {
3969 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3970 	const uint16_t *regulatory = NULL;
3971 	int n_regulatory = 0;
3972 
3973 	/* Checking for required sections */
3974 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3975 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3976 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3977 			return ENOENT;
3978 		}
3979 
3980 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3981 
3982 		if (sc->nvm_type == IWM_NVM_SDP) {
3983 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3984 				return ENOENT;
3985 			regulatory = (const uint16_t *)
3986 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3987 			n_regulatory =
3988 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3989 		}
3990 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3991 		/* SW and REGULATORY sections are mandatory */
3992 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3993 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3994 			return ENOENT;
3995 		}
3996 		/* MAC_OVERRIDE or at least HW section must exist */
3997 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3998 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3999 			return ENOENT;
4000 		}
4001 
4002 		/* PHY_SKU section is mandatory in B0 */
4003 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
4004 			return ENOENT;
4005 		}
4006 
4007 		regulatory = (const uint16_t *)
4008 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
4009 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
4010 		hw = (const uint16_t *)
4011 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
4012 		mac_override =
4013 			(const uint16_t *)
4014 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
4015 		phy_sku = (const uint16_t *)
4016 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
4017 	} else {
4018 		panic("unknown device family %d", sc->sc_device_family);
4019 	}
4020 
4021 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
4022 	calib = (const uint16_t *)
4023 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
4024 
4025 	/* XXX should pass in the length of every section */
4026 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4027 	    phy_sku, regulatory, n_regulatory);
4028 }
4029 
4030 int
4031 iwm_nvm_init(struct iwm_softc *sc)
4032 {
4033 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
4034 	int i, section, err;
4035 	uint16_t len;
4036 	uint8_t *buf;
4037 	const size_t bufsz = sc->sc_nvm_max_section_size;
4038 
4039 	memset(nvm_sections, 0, sizeof(nvm_sections));
4040 
4041 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
4042 	if (buf == NULL)
4043 		return ENOMEM;
4044 
4045 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
4046 		section = iwm_nvm_to_read[i];
4047 		KASSERT(section <= nitems(nvm_sections));
4048 
4049 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4050 		if (err) {
4051 			err = 0;
4052 			continue;
4053 		}
4054 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
4055 		if (nvm_sections[section].data == NULL) {
4056 			err = ENOMEM;
4057 			break;
4058 		}
4059 		memcpy(nvm_sections[section].data, buf, len);
4060 		nvm_sections[section].length = len;
4061 	}
4062 	free(buf, M_DEVBUF, bufsz);
4063 	if (err == 0)
4064 		err = iwm_parse_nvm_sections(sc, nvm_sections);
4065 
4066 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
4067 		if (nvm_sections[i].data != NULL)
4068 			free(nvm_sections[i].data, M_DEVBUF,
4069 			    nvm_sections[i].length);
4070 	}
4071 
4072 	return err;
4073 }
4074 
4075 int
4076 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4077     const uint8_t *section, uint32_t byte_cnt)
4078 {
4079 	int err = EINVAL;
4080 	uint32_t chunk_sz, offset;
4081 
4082 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
4083 
4084 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
4085 		uint32_t addr, len;
4086 		const uint8_t *data;
4087 
4088 		addr = dst_addr + offset;
4089 		len = MIN(chunk_sz, byte_cnt - offset);
4090 		data = section + offset;
4091 
4092 		err = iwm_firmware_load_chunk(sc, addr, data, len);
4093 		if (err)
4094 			break;
4095 	}
4096 
4097 	return err;
4098 }
4099 
4100 int
4101 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4102     const uint8_t *chunk, uint32_t byte_cnt)
4103 {
4104 	struct iwm_dma_info *dma = &sc->fw_dma;
4105 	int err;
4106 
4107 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
4108 	memcpy(dma->vaddr, chunk, byte_cnt);
4109 	bus_dmamap_sync(sc->sc_dmat,
4110 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
4111 
4112 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4113 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4114 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4115 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4116 		if (err)
4117 			return err;
4118 	}
4119 
4120 	sc->sc_fw_chunk_done = 0;
4121 
4122 	if (!iwm_nic_lock(sc))
4123 		return EBUSY;
4124 
4125 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4126 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4127 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4128 	    dst_addr);
4129 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4130 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4131 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4132 	    (iwm_get_dma_hi_addr(dma->paddr)
4133 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4134 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4135 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4136 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4137 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4138 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4139 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
4140 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4141 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4142 
4143 	iwm_nic_unlock(sc);
4144 
4145 	/* Wait for this segment to load. */
4146 	err = 0;
4147 	while (!sc->sc_fw_chunk_done) {
4148 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4149 		if (err)
4150 			break;
4151 	}
4152 
4153 	if (!sc->sc_fw_chunk_done)
4154 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4155 		    DEVNAME(sc), dst_addr, byte_cnt);
4156 
4157 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4158 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4159 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4160 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4161 		if (!err)
4162 			err = err2;
4163 	}
4164 
4165 	return err;
4166 }
4167 
4168 int
4169 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4170 {
4171 	struct iwm_fw_sects *fws;
4172 	int err, i;
4173 	void *data;
4174 	uint32_t dlen;
4175 	uint32_t offset;
4176 
4177 	fws = &sc->sc_fw.fw_sects[ucode_type];
4178 	for (i = 0; i < fws->fw_count; i++) {
4179 		data = fws->fw_sect[i].fws_data;
4180 		dlen = fws->fw_sect[i].fws_len;
4181 		offset = fws->fw_sect[i].fws_devoff;
4182 		if (dlen > sc->sc_fwdmasegsz) {
4183 			err = EFBIG;
4184 		} else
4185 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4186 		if (err) {
4187 			printf("%s: could not load firmware chunk %u of %u\n",
4188 			    DEVNAME(sc), i, fws->fw_count);
4189 			return err;
4190 		}
4191 	}
4192 
4193 	iwm_enable_interrupts(sc);
4194 
4195 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
4196 
4197 	return 0;
4198 }
4199 
4200 int
4201 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4202     int cpu, int *first_ucode_section)
4203 {
4204 	int shift_param;
4205 	int i, err = 0, sec_num = 0x1;
4206 	uint32_t val, last_read_idx = 0;
4207 	void *data;
4208 	uint32_t dlen;
4209 	uint32_t offset;
4210 
4211 	if (cpu == 1) {
4212 		shift_param = 0;
4213 		*first_ucode_section = 0;
4214 	} else {
4215 		shift_param = 16;
4216 		(*first_ucode_section)++;
4217 	}
4218 
4219 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
4220 		last_read_idx = i;
4221 		data = fws->fw_sect[i].fws_data;
4222 		dlen = fws->fw_sect[i].fws_len;
4223 		offset = fws->fw_sect[i].fws_devoff;
4224 
4225 		/*
4226 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4227 		 * CPU1 to CPU2.
4228 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
4229 		 * CPU2 non paged to CPU2 paging sec.
4230 		 */
4231 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
4232 		    offset == IWM_PAGING_SEPARATOR_SECTION)
4233 			break;
4234 
4235 		if (dlen > sc->sc_fwdmasegsz) {
4236 			err = EFBIG;
4237 		} else
4238 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4239 		if (err) {
4240 			printf("%s: could not load firmware chunk %d "
4241 			    "(error %d)\n", DEVNAME(sc), i, err);
4242 			return err;
4243 		}
4244 
4245 		/* Notify the ucode of the loaded section number and status */
4246 		if (iwm_nic_lock(sc)) {
4247 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4248 			val = val | (sec_num << shift_param);
4249 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4250 			sec_num = (sec_num << 1) | 0x1;
4251 			iwm_nic_unlock(sc);
4252 		} else {
4253 			err = EBUSY;
4254 			printf("%s: could not load firmware chunk %d "
4255 			    "(error %d)\n", DEVNAME(sc), i, err);
4256 			return err;
4257 		}
4258 	}
4259 
4260 	*first_ucode_section = last_read_idx;
4261 
4262 	if (iwm_nic_lock(sc)) {
4263 		if (cpu == 1)
4264 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4265 		else
4266 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4267 		iwm_nic_unlock(sc);
4268 	} else {
4269 		err = EBUSY;
4270 		printf("%s: could not finalize firmware loading (error %d)\n",
4271 		    DEVNAME(sc), err);
4272 		return err;
4273 	}
4274 
4275 	return 0;
4276 }
4277 
4278 int
4279 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4280 {
4281 	struct iwm_fw_sects *fws;
4282 	int err = 0;
4283 	int first_ucode_section;
4284 
4285 	fws = &sc->sc_fw.fw_sects[ucode_type];
4286 
4287 	/* configure the ucode to be ready to get the secured image */
4288 	/* release CPU reset */
4289 	if (iwm_nic_lock(sc)) {
4290 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4291 		    IWM_RELEASE_CPU_RESET_BIT);
4292 		iwm_nic_unlock(sc);
4293 	}
4294 
4295 	/* load to FW the binary Secured sections of CPU1 */
4296 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4297 	if (err)
4298 		return err;
4299 
4300 	/* load to FW the binary sections of CPU2 */
4301 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4302 	if (err)
4303 		return err;
4304 
4305 	iwm_enable_interrupts(sc);
4306 	return 0;
4307 }
4308 
4309 int
4310 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4311 {
4312 	int err;
4313 
4314 	splassert(IPL_NET);
4315 
4316 	sc->sc_uc.uc_intr = 0;
4317 	sc->sc_uc.uc_ok = 0;
4318 
4319 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4320 		err = iwm_load_firmware_8000(sc, ucode_type);
4321 	else
4322 		err = iwm_load_firmware_7000(sc, ucode_type);
4323 
4324 	if (err)
4325 		return err;
4326 
4327 	/* wait for the firmware to load */
4328 	err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4329 	if (err || !sc->sc_uc.uc_ok)
4330 		printf("%s: could not load firmware\n", DEVNAME(sc));
4331 
4332 	return err;
4333 }
4334 
4335 int
4336 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4337 {
4338 	int err;
4339 
4340 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4341 
4342 	err = iwm_nic_init(sc);
4343 	if (err) {
4344 		printf("%s: unable to init nic\n", DEVNAME(sc));
4345 		return err;
4346 	}
4347 
4348 	/* make sure rfkill handshake bits are cleared */
4349 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4350 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4351 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4352 
4353 	/* clear (again), then enable firmware load interrupt */
4354 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4355 	iwm_enable_fwload_interrupt(sc);
4356 
4357 	/* really make sure rfkill handshake bits are cleared */
4358 	/* maybe we should write a few times more?  just to make sure */
4359 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4360 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4361 
4362 	return iwm_load_firmware(sc, ucode_type);
4363 }
4364 
4365 int
4366 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4367 {
4368 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4369 		.valid = htole32(valid_tx_ant),
4370 	};
4371 
4372 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4373 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4374 }
4375 
4376 int
4377 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4378 {
4379 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4380 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4381 
4382 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |
4383 	    sc->sc_extra_phy_config);
4384 	phy_cfg_cmd.calib_control.event_trigger =
4385 	    sc->sc_default_calib[ucode_type].event_trigger;
4386 	phy_cfg_cmd.calib_control.flow_trigger =
4387 	    sc->sc_default_calib[ucode_type].flow_trigger;
4388 
4389 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4390 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4391 }
4392 
4393 int
4394 iwm_send_dqa_cmd(struct iwm_softc *sc)
4395 {
4396 	struct iwm_dqa_enable_cmd dqa_cmd = {
4397 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4398 	};
4399 	uint32_t cmd_id;
4400 
4401 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4402 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4403 }
4404 
4405 int
4406 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4407 	enum iwm_ucode_type ucode_type)
4408 {
4409 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4410 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4411 	int err;
4412 
4413 	err = iwm_read_firmware(sc);
4414 	if (err)
4415 		return err;
4416 
4417 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4418 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4419 	else
4420 		sc->cmdqid = IWM_CMD_QUEUE;
4421 
4422 	sc->sc_uc_current = ucode_type;
4423 	err = iwm_start_fw(sc, ucode_type);
4424 	if (err) {
4425 		sc->sc_uc_current = old_type;
4426 		return err;
4427 	}
4428 
4429 	err = iwm_post_alive(sc);
4430 	if (err)
4431 		return err;
4432 
4433 	/*
4434 	 * configure and operate fw paging mechanism.
4435 	 * driver configures the paging flow only once, CPU2 paging image
4436 	 * included in the IWM_UCODE_INIT image.
4437 	 */
4438 	if (fw->paging_mem_size) {
4439 		err = iwm_save_fw_paging(sc, fw);
4440 		if (err) {
4441 			printf("%s: failed to save the FW paging image\n",
4442 			    DEVNAME(sc));
4443 			return err;
4444 		}
4445 
4446 		err = iwm_send_paging_cmd(sc, fw);
4447 		if (err) {
4448 			printf("%s: failed to send the paging cmd\n",
4449 			    DEVNAME(sc));
4450 			iwm_free_fw_paging(sc);
4451 			return err;
4452 		}
4453 	}
4454 
4455 	return 0;
4456 }
4457 
4458 int
4459 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4460 {
4461 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4462 	int err, s;
4463 
4464 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4465 		printf("%s: radio is disabled by hardware switch\n",
4466 		    DEVNAME(sc));
4467 		return EPERM;
4468 	}
4469 
4470 	s = splnet();
4471 	sc->sc_init_complete = 0;
4472 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4473 	if (err) {
4474 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4475 		splx(s);
4476 		return err;
4477 	}
4478 
4479 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4480 		err = iwm_send_bt_init_conf(sc);
4481 		if (err) {
4482 			printf("%s: could not init bt coex (error %d)\n",
4483 			    DEVNAME(sc), err);
4484 			splx(s);
4485 			return err;
4486 		}
4487 	}
4488 
4489 	if (justnvm) {
4490 		err = iwm_nvm_init(sc);
4491 		if (err) {
4492 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4493 			splx(s);
4494 			return err;
4495 		}
4496 
4497 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4498 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4499 			    sc->sc_nvm.hw_addr);
4500 
4501 		splx(s);
4502 		return 0;
4503 	}
4504 
4505 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4506 	if (err) {
4507 		splx(s);
4508 		return err;
4509 	}
4510 
4511 	/* Send TX valid antennas before triggering calibrations */
4512 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4513 	if (err) {
4514 		splx(s);
4515 		return err;
4516 	}
4517 
4518 	/*
4519 	 * Send phy configurations command to init uCode
4520 	 * to start the 16.0 uCode init image internal calibrations.
4521 	 */
4522 	err = iwm_send_phy_cfg_cmd(sc);
4523 	if (err) {
4524 		splx(s);
4525 		return err;
4526 	}
4527 
4528 	/*
4529 	 * Nothing to do but wait for the init complete and phy DB
4530 	 * notifications from the firmware.
4531 	 */
4532 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4533 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4534 		    SEC_TO_NSEC(2));
4535 		if (err)
4536 			break;
4537 	}
4538 
4539 	splx(s);
4540 	return err;
4541 }
4542 
4543 int
4544 iwm_config_ltr(struct iwm_softc *sc)
4545 {
4546 	struct iwm_ltr_config_cmd cmd = {
4547 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4548 	};
4549 
4550 	if (!sc->sc_ltr_enabled)
4551 		return 0;
4552 
4553 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4554 }
4555 
4556 int
4557 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4558 {
4559 	struct iwm_rx_ring *ring = &sc->rxq;
4560 	struct iwm_rx_data *data = &ring->data[idx];
4561 	struct mbuf *m;
4562 	int err;
4563 	int fatal = 0;
4564 
4565 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4566 	if (m == NULL)
4567 		return ENOBUFS;
4568 
4569 	if (size <= MCLBYTES) {
4570 		MCLGET(m, M_DONTWAIT);
4571 	} else {
4572 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4573 	}
4574 	if ((m->m_flags & M_EXT) == 0) {
4575 		m_freem(m);
4576 		return ENOBUFS;
4577 	}
4578 
4579 	if (data->m != NULL) {
4580 		bus_dmamap_unload(sc->sc_dmat, data->map);
4581 		fatal = 1;
4582 	}
4583 
4584 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4585 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4586 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4587 	if (err) {
4588 		/* XXX */
4589 		if (fatal)
4590 			panic("iwm: could not load RX mbuf");
4591 		m_freem(m);
4592 		return err;
4593 	}
4594 	data->m = m;
4595 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4596 
4597 	/* Update RX descriptor. */
4598 	if (sc->sc_mqrx_supported) {
4599 		((uint64_t *)ring->desc)[idx] =
4600 		    htole64(data->map->dm_segs[0].ds_addr);
4601 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4602 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4603 		    BUS_DMASYNC_PREWRITE);
4604 	} else {
4605 		((uint32_t *)ring->desc)[idx] =
4606 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4607 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4608 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4609 		    BUS_DMASYNC_PREWRITE);
4610 	}
4611 
4612 	return 0;
4613 }
4614 
4615 /*
4616  * RSSI values are reported by the FW as positive values - need to negate
4617  * to obtain their dBM.  Account for missing antennas by replacing 0
4618  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4619  */
4620 int
4621 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4622 {
4623 	int energy_a, energy_b, energy_c, max_energy;
4624 	uint32_t val;
4625 
4626 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4627 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4628 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4629 	energy_a = energy_a ? -energy_a : -256;
4630 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4631 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4632 	energy_b = energy_b ? -energy_b : -256;
4633 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4634 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4635 	energy_c = energy_c ? -energy_c : -256;
4636 	max_energy = MAX(energy_a, energy_b);
4637 	max_energy = MAX(max_energy, energy_c);
4638 
4639 	return max_energy;
4640 }
4641 
4642 int
4643 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4644     struct iwm_rx_mpdu_desc *desc)
4645 {
4646 	int energy_a, energy_b;
4647 
4648 	energy_a = desc->v1.energy_a;
4649 	energy_b = desc->v1.energy_b;
4650 	energy_a = energy_a ? -energy_a : -256;
4651 	energy_b = energy_b ? -energy_b : -256;
4652 	return MAX(energy_a, energy_b);
4653 }
4654 
4655 void
4656 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4657     struct iwm_rx_data *data)
4658 {
4659 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4660 
4661 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4662 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4663 
4664 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4665 }
4666 
4667 /*
4668  * Retrieve the average noise (in dBm) among receivers.
4669  */
4670 int
4671 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4672 {
4673 	int i, total, nbant, noise;
4674 
4675 	total = nbant = noise = 0;
4676 	for (i = 0; i < 3; i++) {
4677 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4678 		if (noise) {
4679 			total += noise;
4680 			nbant++;
4681 		}
4682 	}
4683 
4684 	/* There should be at least one antenna but check anyway. */
4685 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4686 }
4687 
4688 int
4689 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4690     struct ieee80211_rxinfo *rxi)
4691 {
4692 	struct ieee80211com *ic = &sc->sc_ic;
4693 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4694 	struct ieee80211_frame *wh;
4695 	uint64_t pn, *prsc;
4696 	uint8_t *ivp;
4697 	uint8_t tid;
4698 	int hdrlen, hasqos;
4699 
4700 	wh = mtod(m, struct ieee80211_frame *);
4701 	hdrlen = ieee80211_get_hdrlen(wh);
4702 	ivp = (uint8_t *)wh + hdrlen;
4703 
4704 	/* Check that ExtIV bit is set. */
4705 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4706 		return 1;
4707 
4708 	hasqos = ieee80211_has_qos(wh);
4709 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4710 	prsc = &k->k_rsc[tid];
4711 
4712 	/* Extract the 48-bit PN from the CCMP header. */
4713 	pn = (uint64_t)ivp[0]       |
4714 	     (uint64_t)ivp[1] <<  8 |
4715 	     (uint64_t)ivp[4] << 16 |
4716 	     (uint64_t)ivp[5] << 24 |
4717 	     (uint64_t)ivp[6] << 32 |
4718 	     (uint64_t)ivp[7] << 40;
4719 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4720 		if (pn < *prsc) {
4721 			ic->ic_stats.is_ccmp_replays++;
4722 			return 1;
4723 		}
4724 	} else if (pn <= *prsc) {
4725 		ic->ic_stats.is_ccmp_replays++;
4726 		return 1;
4727 	}
4728 	/* Last seen packet number is updated in ieee80211_inputm(). */
4729 
4730 	/*
4731 	 * Some firmware versions strip the MIC, and some don't. It is not
4732 	 * clear which of the capability flags could tell us what to expect.
4733 	 * For now, keep things simple and just leave the MIC in place if
4734 	 * it is present.
4735 	 *
4736 	 * The IV will be stripped by ieee80211_inputm().
4737 	 */
4738 	return 0;
4739 }
4740 
4741 int
4742 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4743     struct ieee80211_rxinfo *rxi)
4744 {
4745 	struct ieee80211com *ic = &sc->sc_ic;
4746 	struct ifnet *ifp = IC2IFP(ic);
4747 	struct ieee80211_frame *wh;
4748 	struct ieee80211_node *ni;
4749 	int ret = 0;
4750 	uint8_t type, subtype;
4751 
4752 	wh = mtod(m, struct ieee80211_frame *);
4753 
4754 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4755 	if (type == IEEE80211_FC0_TYPE_CTL)
4756 		return 0;
4757 
4758 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4759 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4760 		return 0;
4761 
4762 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4763 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4764 		return 0;
4765 
4766 	ni = ieee80211_find_rxnode(ic, wh);
4767 	/* Handle hardware decryption. */
4768 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4769 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4770 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4771 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4772 			ic->ic_stats.is_ccmp_dec_errs++;
4773 			ret = 1;
4774 			goto out;
4775 		}
4776 		/* Check whether decryption was successful or not. */
4777 		if ((rx_pkt_status &
4778 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4779 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4780 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4781 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4782 			ic->ic_stats.is_ccmp_dec_errs++;
4783 			ret = 1;
4784 			goto out;
4785 		}
4786 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4787 	}
4788 out:
4789 	if (ret)
4790 		ifp->if_ierrors++;
4791 	ieee80211_release_node(ic, ni);
4792 	return ret;
4793 }
4794 
4795 void
4796 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4797     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4798     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4799     struct mbuf_list *ml)
4800 {
4801 	struct ieee80211com *ic = &sc->sc_ic;
4802 	struct ifnet *ifp = IC2IFP(ic);
4803 	struct ieee80211_frame *wh;
4804 	struct ieee80211_node *ni;
4805 
4806 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4807 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4808 
4809 	wh = mtod(m, struct ieee80211_frame *);
4810 	ni = ieee80211_find_rxnode(ic, wh);
4811 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4812 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4813 		ifp->if_ierrors++;
4814 		m_freem(m);
4815 		ieee80211_release_node(ic, ni);
4816 		return;
4817 	}
4818 
4819 #if NBPFILTER > 0
4820 	if (sc->sc_drvbpf != NULL) {
4821 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4822 		uint16_t chan_flags;
4823 
4824 		tap->wr_flags = 0;
4825 		if (is_shortpre)
4826 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4827 		tap->wr_chan_freq =
4828 		    htole16(ic->ic_channels[chanidx].ic_freq);
4829 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4830 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4831 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4832 			chan_flags &= ~IEEE80211_CHAN_HT;
4833 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4834 		}
4835 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4836 			chan_flags &= ~IEEE80211_CHAN_VHT;
4837 		tap->wr_chan_flags = htole16(chan_flags);
4838 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4839 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4840 		tap->wr_tsft = device_timestamp;
4841 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4842 			uint8_t mcs = (rate_n_flags &
4843 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4844 			    IWM_RATE_HT_MCS_NSS_MSK));
4845 			tap->wr_rate = (0x80 | mcs);
4846 		} else {
4847 			uint8_t rate = (rate_n_flags &
4848 			    IWM_RATE_LEGACY_RATE_MSK);
4849 			switch (rate) {
4850 			/* CCK rates. */
4851 			case  10: tap->wr_rate =   2; break;
4852 			case  20: tap->wr_rate =   4; break;
4853 			case  55: tap->wr_rate =  11; break;
4854 			case 110: tap->wr_rate =  22; break;
4855 			/* OFDM rates. */
4856 			case 0xd: tap->wr_rate =  12; break;
4857 			case 0xf: tap->wr_rate =  18; break;
4858 			case 0x5: tap->wr_rate =  24; break;
4859 			case 0x7: tap->wr_rate =  36; break;
4860 			case 0x9: tap->wr_rate =  48; break;
4861 			case 0xb: tap->wr_rate =  72; break;
4862 			case 0x1: tap->wr_rate =  96; break;
4863 			case 0x3: tap->wr_rate = 108; break;
4864 			/* Unknown rate: should not happen. */
4865 			default:  tap->wr_rate =   0;
4866 			}
4867 		}
4868 
4869 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4870 		    m, BPF_DIRECTION_IN);
4871 	}
4872 #endif
4873 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4874 	ieee80211_release_node(ic, ni);
4875 }
4876 
4877 void
4878 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4879     size_t maxlen, struct mbuf_list *ml)
4880 {
4881 	struct ieee80211com *ic = &sc->sc_ic;
4882 	struct ieee80211_rxinfo rxi;
4883 	struct iwm_rx_phy_info *phy_info;
4884 	struct iwm_rx_mpdu_res_start *rx_res;
4885 	int device_timestamp;
4886 	uint16_t phy_flags;
4887 	uint32_t len;
4888 	uint32_t rx_pkt_status;
4889 	int rssi, chanidx, rate_n_flags;
4890 
4891 	memset(&rxi, 0, sizeof(rxi));
4892 
4893 	phy_info = &sc->sc_last_phy_info;
4894 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4895 	len = le16toh(rx_res->byte_count);
4896 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4897 		/* Allow control frames in monitor mode. */
4898 		if (len < sizeof(struct ieee80211_frame_cts)) {
4899 			ic->ic_stats.is_rx_tooshort++;
4900 			IC2IFP(ic)->if_ierrors++;
4901 			m_freem(m);
4902 			return;
4903 		}
4904 	} else if (len < sizeof(struct ieee80211_frame)) {
4905 		ic->ic_stats.is_rx_tooshort++;
4906 		IC2IFP(ic)->if_ierrors++;
4907 		m_freem(m);
4908 		return;
4909 	}
4910 	if (len > maxlen - sizeof(*rx_res)) {
4911 		IC2IFP(ic)->if_ierrors++;
4912 		m_freem(m);
4913 		return;
4914 	}
4915 
4916 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4917 		m_freem(m);
4918 		return;
4919 	}
4920 
4921 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4922 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4923 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4924 		m_freem(m);
4925 		return; /* drop */
4926 	}
4927 
4928 	m->m_data = pktdata + sizeof(*rx_res);
4929 	m->m_pkthdr.len = m->m_len = len;
4930 
4931 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4932 		m_freem(m);
4933 		return;
4934 	}
4935 
4936 	chanidx = letoh32(phy_info->channel);
4937 	device_timestamp = le32toh(phy_info->system_timestamp);
4938 	phy_flags = letoh16(phy_info->phy_flags);
4939 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4940 
4941 	rssi = iwm_get_signal_strength(sc, phy_info);
4942 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4943 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4944 
4945 	rxi.rxi_rssi = rssi;
4946 	rxi.rxi_tstamp = device_timestamp;
4947 	rxi.rxi_chan = chanidx;
4948 
4949 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4950 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4951 	    rate_n_flags, device_timestamp, &rxi, ml);
4952 }
4953 
4954 void
4955 iwm_flip_address(uint8_t *addr)
4956 {
4957 	int i;
4958 	uint8_t mac_addr[ETHER_ADDR_LEN];
4959 
4960 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4961 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4962 	IEEE80211_ADDR_COPY(addr, mac_addr);
4963 }
4964 
4965 /*
4966  * Drop duplicate 802.11 retransmissions
4967  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4968  * and handle pseudo-duplicate frames which result from deaggregation
4969  * of A-MSDU frames in hardware.
4970  */
4971 int
4972 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4973     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4974 {
4975 	struct ieee80211com *ic = &sc->sc_ic;
4976 	struct iwm_node *in = (void *)ic->ic_bss;
4977 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4978 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4979 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4980 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4981 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4982 	int hasqos = ieee80211_has_qos(wh);
4983 	uint16_t seq;
4984 
4985 	if (type == IEEE80211_FC0_TYPE_CTL ||
4986 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4987 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4988 		return 0;
4989 
4990 	if (hasqos) {
4991 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4992 		if (tid > IWM_MAX_TID_COUNT)
4993 			tid = IWM_MAX_TID_COUNT;
4994 	}
4995 
4996 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4997 	subframe_idx = desc->amsdu_info &
4998 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4999 
5000 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
5001 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
5002 	    dup_data->last_seq[tid] == seq &&
5003 	    dup_data->last_sub_frame[tid] >= subframe_idx)
5004 		return 1;
5005 
5006 	/*
5007 	 * Allow the same frame sequence number for all A-MSDU subframes
5008 	 * following the first subframe.
5009 	 * Otherwise these subframes would be discarded as replays.
5010 	 */
5011 	if (dup_data->last_seq[tid] == seq &&
5012 	    subframe_idx > dup_data->last_sub_frame[tid] &&
5013 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
5014 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5015 	}
5016 
5017 	dup_data->last_seq[tid] = seq;
5018 	dup_data->last_sub_frame[tid] = subframe_idx;
5019 
5020 	return 0;
5021 }
5022 
5023 /*
5024  * Returns true if sn2 - buffer_size < sn1 < sn2.
5025  * To be used only in order to compare reorder buffer head with NSSN.
5026  * We fully trust NSSN unless it is behind us due to reorder timeout.
5027  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
5028  */
5029 int
5030 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
5031 {
5032 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
5033 }
5034 
5035 void
5036 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5037     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
5038     uint16_t nssn, struct mbuf_list *ml)
5039 {
5040 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
5041 	uint16_t ssn = reorder_buf->head_sn;
5042 
5043 	/* ignore nssn smaller than head sn - this can happen due to timeout */
5044 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
5045 		goto set_timer;
5046 
5047 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
5048 		int index = ssn % reorder_buf->buf_size;
5049 		struct mbuf *m;
5050 		int chanidx, is_shortpre;
5051 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
5052 		struct ieee80211_rxinfo *rxi;
5053 
5054 		/* This data is the same for all A-MSDU subframes. */
5055 		chanidx = entries[index].chanidx;
5056 		rx_pkt_status = entries[index].rx_pkt_status;
5057 		is_shortpre = entries[index].is_shortpre;
5058 		rate_n_flags = entries[index].rate_n_flags;
5059 		device_timestamp = entries[index].device_timestamp;
5060 		rxi = &entries[index].rxi;
5061 
5062 		/*
5063 		 * Empty the list. Will have more than one frame for A-MSDU.
5064 		 * Empty list is valid as well since nssn indicates frames were
5065 		 * received.
5066 		 */
5067 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
5068 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5069 			    rate_n_flags, device_timestamp, rxi, ml);
5070 			reorder_buf->num_stored--;
5071 
5072 			/*
5073 			 * Allow the same frame sequence number and CCMP PN for
5074 			 * all A-MSDU subframes following the first subframe.
5075 			 * Otherwise they would be discarded as replays.
5076 			 */
5077 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5078 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5079 		}
5080 
5081 		ssn = (ssn + 1) & 0xfff;
5082 	}
5083 	reorder_buf->head_sn = nssn;
5084 
5085 set_timer:
5086 	if (reorder_buf->num_stored && !reorder_buf->removed) {
5087 		timeout_add_usec(&reorder_buf->reorder_timer,
5088 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
5089 	} else
5090 		timeout_del(&reorder_buf->reorder_timer);
5091 }
5092 
5093 int
5094 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5095     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5096 {
5097 	struct ieee80211com *ic = &sc->sc_ic;
5098 
5099 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5100 		/* we have a new (A-)MPDU ... */
5101 
5102 		/*
5103 		 * reset counter to 0 if we didn't have any oldsn in
5104 		 * the last A-MPDU (as detected by GP2 being identical)
5105 		 */
5106 		if (!buffer->consec_oldsn_prev_drop)
5107 			buffer->consec_oldsn_drops = 0;
5108 
5109 		/* either way, update our tracking state */
5110 		buffer->consec_oldsn_ampdu_gp2 = gp2;
5111 	} else if (buffer->consec_oldsn_prev_drop) {
5112 		/*
5113 		 * tracking state didn't change, and we had an old SN
5114 		 * indication before - do nothing in this case, we
5115 		 * already noted this one down and are waiting for the
5116 		 * next A-MPDU (by GP2)
5117 		 */
5118 		return 0;
5119 	}
5120 
5121 	/* return unless this MPDU has old SN */
5122 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
5123 		return 0;
5124 
5125 	/* update state */
5126 	buffer->consec_oldsn_prev_drop = 1;
5127 	buffer->consec_oldsn_drops++;
5128 
5129 	/* if limit is reached, send del BA and reset state */
5130 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
5131 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5132 		    0, tid);
5133 		buffer->consec_oldsn_prev_drop = 0;
5134 		buffer->consec_oldsn_drops = 0;
5135 		return 1;
5136 	}
5137 
5138 	return 0;
5139 }
5140 
5141 /*
5142  * Handle re-ordering of frames which were de-aggregated in hardware.
5143  * Returns 1 if the MPDU was consumed (buffered or dropped).
5144  * Returns 0 if the MPDU should be passed to upper layer.
5145  */
5146 int
5147 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5148     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5149     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5150     struct mbuf_list *ml)
5151 {
5152 	struct ieee80211com *ic = &sc->sc_ic;
5153 	struct ieee80211_frame *wh;
5154 	struct ieee80211_node *ni;
5155 	struct iwm_rxba_data *rxba;
5156 	struct iwm_reorder_buffer *buffer;
5157 	uint32_t reorder_data = le32toh(desc->reorder_data);
5158 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
5159 	int last_subframe =
5160 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
5161 	uint8_t tid;
5162 	uint8_t subframe_idx = (desc->amsdu_info &
5163 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5164 	struct iwm_reorder_buf_entry *entries;
5165 	int index;
5166 	uint16_t nssn, sn;
5167 	uint8_t baid, type, subtype;
5168 	int hasqos;
5169 
5170 	wh = mtod(m, struct ieee80211_frame *);
5171 	hasqos = ieee80211_has_qos(wh);
5172 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5173 
5174 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5175 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5176 
5177 	/*
5178 	 * We are only interested in Block Ack requests and unicast QoS data.
5179 	 */
5180 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5181 		return 0;
5182 	if (hasqos) {
5183 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5184 			return 0;
5185 	} else {
5186 		if (type != IEEE80211_FC0_TYPE_CTL ||
5187 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5188 			return 0;
5189 	}
5190 
5191 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
5192 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
5193 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5194 	    baid >= nitems(sc->sc_rxba_data))
5195 		return 0;
5196 
5197 	rxba = &sc->sc_rxba_data[baid];
5198 	if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5199 	    tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
5200 		return 0;
5201 
5202 	if (rxba->timeout != 0)
5203 		getmicrouptime(&rxba->last_rx);
5204 
5205 	/* Bypass A-MPDU re-ordering in net80211. */
5206 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5207 
5208 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
5209 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
5210 		IWM_RX_MPDU_REORDER_SN_SHIFT;
5211 
5212 	buffer = &rxba->reorder_buf;
5213 	entries = &rxba->entries[0];
5214 
5215 	if (!buffer->valid) {
5216 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
5217 			return 0;
5218 		buffer->valid = 1;
5219 	}
5220 
5221 	ni = ieee80211_find_rxnode(ic, wh);
5222 	if (type == IEEE80211_FC0_TYPE_CTL &&
5223 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5224 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5225 		goto drop;
5226 	}
5227 
5228 	/*
5229 	 * If there was a significant jump in the nssn - adjust.
5230 	 * If the SN is smaller than the NSSN it might need to first go into
5231 	 * the reorder buffer, in which case we just release up to it and the
5232 	 * rest of the function will take care of storing it and releasing up to
5233 	 * the nssn.
5234 	 */
5235 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5236 	    buffer->buf_size) ||
5237 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5238 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5239 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5240 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5241 	}
5242 
5243 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5244 	    device_timestamp)) {
5245 		 /* BA session will be torn down. */
5246 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5247 		goto drop;
5248 
5249 	}
5250 
5251 	/* drop any outdated packets */
5252 	if (SEQ_LT(sn, buffer->head_sn)) {
5253 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5254 		goto drop;
5255 	}
5256 
5257 	/* release immediately if allowed by nssn and no stored frames */
5258 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5259 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5260 		   (!is_amsdu || last_subframe))
5261 			buffer->head_sn = nssn;
5262 		ieee80211_release_node(ic, ni);
5263 		return 0;
5264 	}
5265 
5266 	/*
5267 	 * release immediately if there are no stored frames, and the sn is
5268 	 * equal to the head.
5269 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5270 	 * When we released everything, and we got the next frame in the
5271 	 * sequence, according to the NSSN we can't release immediately,
5272 	 * while technically there is no hole and we can move forward.
5273 	 */
5274 	if (!buffer->num_stored && sn == buffer->head_sn) {
5275 		if (!is_amsdu || last_subframe)
5276 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5277 		ieee80211_release_node(ic, ni);
5278 		return 0;
5279 	}
5280 
5281 	index = sn % buffer->buf_size;
5282 
5283 	/*
5284 	 * Check if we already stored this frame
5285 	 * As AMSDU is either received or not as whole, logic is simple:
5286 	 * If we have frames in that position in the buffer and the last frame
5287 	 * originated from AMSDU had a different SN then it is a retransmission.
5288 	 * If it is the same SN then if the subframe index is incrementing it
5289 	 * is the same AMSDU - otherwise it is a retransmission.
5290 	 */
5291 	if (!ml_empty(&entries[index].frames)) {
5292 		if (!is_amsdu) {
5293 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5294 			goto drop;
5295 		} else if (sn != buffer->last_amsdu ||
5296 		    buffer->last_sub_index >= subframe_idx) {
5297 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5298 			goto drop;
5299 		}
5300 	} else {
5301 		/* This data is the same for all A-MSDU subframes. */
5302 		entries[index].chanidx = chanidx;
5303 		entries[index].is_shortpre = is_shortpre;
5304 		entries[index].rate_n_flags = rate_n_flags;
5305 		entries[index].device_timestamp = device_timestamp;
5306 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5307 	}
5308 
5309 	/* put in reorder buffer */
5310 	ml_enqueue(&entries[index].frames, m);
5311 	buffer->num_stored++;
5312 	getmicrouptime(&entries[index].reorder_time);
5313 
5314 	if (is_amsdu) {
5315 		buffer->last_amsdu = sn;
5316 		buffer->last_sub_index = subframe_idx;
5317 	}
5318 
5319 	/*
5320 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5321 	 * The reason is that NSSN advances on the first sub-frame, and may
5322 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5323 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5324 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5325 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5326 	 * already ahead and it will be dropped.
5327 	 * If the last sub-frame is not on this queue - we will get frame
5328 	 * release notification with up to date NSSN.
5329 	 */
5330 	if (!is_amsdu || last_subframe)
5331 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5332 
5333 	ieee80211_release_node(ic, ni);
5334 	return 1;
5335 
5336 drop:
5337 	m_freem(m);
5338 	ieee80211_release_node(ic, ni);
5339 	return 1;
5340 }
5341 
5342 void
5343 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5344     size_t maxlen, struct mbuf_list *ml)
5345 {
5346 	struct ieee80211com *ic = &sc->sc_ic;
5347 	struct ieee80211_rxinfo rxi;
5348 	struct iwm_rx_mpdu_desc *desc;
5349 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5350 	int rssi;
5351 	uint8_t chanidx;
5352 	uint16_t phy_info;
5353 
5354 	memset(&rxi, 0, sizeof(rxi));
5355 
5356 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5357 
5358 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5359 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5360 		m_freem(m);
5361 		return; /* drop */
5362 	}
5363 
5364 	len = le16toh(desc->mpdu_len);
5365 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5366 		/* Allow control frames in monitor mode. */
5367 		if (len < sizeof(struct ieee80211_frame_cts)) {
5368 			ic->ic_stats.is_rx_tooshort++;
5369 			IC2IFP(ic)->if_ierrors++;
5370 			m_freem(m);
5371 			return;
5372 		}
5373 	} else if (len < sizeof(struct ieee80211_frame)) {
5374 		ic->ic_stats.is_rx_tooshort++;
5375 		IC2IFP(ic)->if_ierrors++;
5376 		m_freem(m);
5377 		return;
5378 	}
5379 	if (len > maxlen - sizeof(*desc)) {
5380 		IC2IFP(ic)->if_ierrors++;
5381 		m_freem(m);
5382 		return;
5383 	}
5384 
5385 	m->m_data = pktdata + sizeof(*desc);
5386 	m->m_pkthdr.len = m->m_len = len;
5387 
5388 	/* Account for padding following the frame header. */
5389 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5390 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5391 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5392 		if (type == IEEE80211_FC0_TYPE_CTL) {
5393 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5394 			case IEEE80211_FC0_SUBTYPE_CTS:
5395 				hdrlen = sizeof(struct ieee80211_frame_cts);
5396 				break;
5397 			case IEEE80211_FC0_SUBTYPE_ACK:
5398 				hdrlen = sizeof(struct ieee80211_frame_ack);
5399 				break;
5400 			default:
5401 				hdrlen = sizeof(struct ieee80211_frame_min);
5402 				break;
5403 			}
5404 		} else
5405 			hdrlen = ieee80211_get_hdrlen(wh);
5406 
5407 		if ((le16toh(desc->status) &
5408 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5409 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5410 			/* Padding is inserted after the IV. */
5411 			hdrlen += IEEE80211_CCMP_HDRLEN;
5412 		}
5413 
5414 		memmove(m->m_data + 2, m->m_data, hdrlen);
5415 		m_adj(m, 2);
5416 	}
5417 
5418 	/*
5419 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5420 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5421 	 * bit set in the frame header. We need to clear this bit ourselves.
5422 	 *
5423 	 * And we must allow the same CCMP PN for subframes following the
5424 	 * first subframe. Otherwise they would be discarded as replays.
5425 	 */
5426 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5427 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5428 		uint8_t subframe_idx = (desc->amsdu_info &
5429 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5430 		if (subframe_idx > 0)
5431 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5432 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5433 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5434 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5435 			    struct ieee80211_qosframe_addr4 *);
5436 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5437 
5438 			/* HW reverses addr3 and addr4. */
5439 			iwm_flip_address(qwh4->i_addr3);
5440 			iwm_flip_address(qwh4->i_addr4);
5441 		} else if (ieee80211_has_qos(wh) &&
5442 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5443 			struct ieee80211_qosframe *qwh = mtod(m,
5444 			    struct ieee80211_qosframe *);
5445 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5446 
5447 			/* HW reverses addr3. */
5448 			iwm_flip_address(qwh->i_addr3);
5449 		}
5450 	}
5451 
5452 	/*
5453 	 * Verify decryption before duplicate detection. The latter uses
5454 	 * the TID supplied in QoS frame headers and this TID is implicitly
5455 	 * verified as part of the CCMP nonce.
5456 	 */
5457 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5458 		m_freem(m);
5459 		return;
5460 	}
5461 
5462 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5463 		m_freem(m);
5464 		return;
5465 	}
5466 
5467 	phy_info = le16toh(desc->phy_info);
5468 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5469 	chanidx = desc->v1.channel;
5470 	device_timestamp = desc->v1.gp2_on_air_rise;
5471 
5472 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5473 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5474 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5475 
5476 	rxi.rxi_rssi = rssi;
5477 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5478 	rxi.rxi_chan = chanidx;
5479 
5480 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5481 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5482 	    rate_n_flags, device_timestamp, &rxi, ml))
5483 		return;
5484 
5485 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5486 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5487 	    rate_n_flags, device_timestamp, &rxi, ml);
5488 }
5489 
5490 void
5491 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5492 {
5493 	struct ieee80211com *ic = &sc->sc_ic;
5494 	struct iwm_node *in = (void *)ni;
5495 	int old_txmcs = ni->ni_txmcs;
5496 	int old_nss = ni->ni_vht_ss;
5497 
5498 	if (ni->ni_flags & IEEE80211_NODE_VHT)
5499 		ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni);
5500 	else
5501 		ieee80211_ra_choose(&in->in_rn, ic, ni);
5502 
5503 	/*
5504 	 * If RA has chosen a new TX rate we must update
5505 	 * the firmware's LQ rate table.
5506 	 */
5507 	if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss)
5508 		iwm_setrates(in, 1);
5509 }
5510 
5511 void
5512 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5513     int txmcs, uint8_t failure_frame, int txfail)
5514 {
5515 	struct ieee80211com *ic = &sc->sc_ic;
5516 	struct iwm_node *in = (void *)ni;
5517 
5518 	/* Ignore Tx reports which don't match our last LQ command. */
5519 	if (txmcs != ni->ni_txmcs) {
5520 		if (++in->lq_rate_mismatch > 15) {
5521 			/* Try to sync firmware with the driver... */
5522 			iwm_setrates(in, 1);
5523 			in->lq_rate_mismatch = 0;
5524 		}
5525 	} else {
5526 		int mcs = txmcs;
5527 		const struct ieee80211_ht_rateset *rs =
5528 		    ieee80211_ra_get_ht_rateset(txmcs,
5529 		        ieee80211_node_supports_ht_chan40(ni),
5530 			ieee80211_ra_use_ht_sgi(ni));
5531 		unsigned int retries = 0, i;
5532 
5533 		in->lq_rate_mismatch = 0;
5534 
5535 		for (i = 0; i < failure_frame; i++) {
5536 			if (mcs > rs->min_mcs) {
5537 				ieee80211_ra_add_stats_ht(&in->in_rn,
5538 				    ic, ni, mcs, 1, 1);
5539 				mcs--;
5540 			} else
5541 				retries++;
5542 		}
5543 
5544 		if (txfail && failure_frame == 0) {
5545 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5546 			    txmcs, 1, 1);
5547 		} else {
5548 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5549 			    mcs, retries + 1, retries);
5550 		}
5551 
5552 		iwm_ra_choose(sc, ni);
5553 	}
5554 }
5555 
5556 void
5557 iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5558     int txmcs, int nss, uint8_t failure_frame, int txfail)
5559 {
5560 	struct ieee80211com *ic = &sc->sc_ic;
5561 	struct iwm_node *in = (void *)ni;
5562 	uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5563 	uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5564 
5565 	/* Ignore Tx reports which don't match our last LQ command. */
5566 	if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) {
5567 		if (++in->lq_rate_mismatch > 15) {
5568 			/* Try to sync firmware with the driver... */
5569 			iwm_setrates(in, 1);
5570 			in->lq_rate_mismatch = 0;
5571 		}
5572 	} else {
5573 		int mcs = txmcs;
5574 		unsigned int retries = 0, i;
5575 
5576 		if (in->in_phyctxt) {
5577 			vht_chan_width = in->in_phyctxt->vht_chan_width;
5578 			sco = in->in_phyctxt->sco;
5579 		}
5580 		in->lq_rate_mismatch = 0;
5581 
5582 		for (i = 0; i < failure_frame; i++) {
5583 			if (mcs > 0) {
5584 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5585 				    ic, ni, mcs, nss, 1, 1);
5586 				if (vht_chan_width >=
5587 				    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5588 					/*
5589 					 * First 4 Tx attempts used same MCS,
5590 					 * twice at 80MHz and twice at 40MHz.
5591 					 */
5592 					if (i >= 4)
5593 						mcs--;
5594 				} else if (sco == IEEE80211_HTOP0_SCO_SCA ||
5595 				    sco == IEEE80211_HTOP0_SCO_SCB) {
5596 					/*
5597 					 * First 4 Tx attempts used same MCS,
5598 					 * four times at 40MHz.
5599 					 */
5600 					if (i >= 4)
5601 						mcs--;
5602 				} else
5603 					mcs--;
5604 			} else
5605 				retries++;
5606 		}
5607 
5608 		if (txfail && failure_frame == 0) {
5609 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5610 			    txmcs, nss, 1, 1);
5611 		} else {
5612 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5613 			    mcs, nss, retries + 1, retries);
5614 		}
5615 
5616 		iwm_ra_choose(sc, ni);
5617 	}
5618 }
5619 
5620 void
5621 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5622     struct iwm_node *in, int txmcs, int txrate)
5623 {
5624 	struct ieee80211com *ic = &sc->sc_ic;
5625 	struct ieee80211_node *ni = &in->in_ni;
5626 	struct ifnet *ifp = IC2IFP(ic);
5627 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5628 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5629 	uint32_t initial_rate = le32toh(tx_resp->initial_rate);
5630 	int txfail;
5631 
5632 	KASSERT(tx_resp->frame_count == 1);
5633 
5634 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5635 	    status != IWM_TX_STATUS_DIRECT_DONE);
5636 
5637 	/*
5638 	 * Update rate control statistics.
5639 	 * Only report frames which were actually queued with the currently
5640 	 * selected Tx rate. Because Tx queues are relatively long we may
5641 	 * encounter previously selected rates here during Tx bursts.
5642 	 * Providing feedback based on such frames can lead to suboptimal
5643 	 * Tx rate control decisions.
5644 	 */
5645 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5646 		if (txrate != ni->ni_txrate) {
5647 			if (++in->lq_rate_mismatch > 15) {
5648 				/* Try to sync firmware with the driver... */
5649 				iwm_setrates(in, 1);
5650 				in->lq_rate_mismatch = 0;
5651 			}
5652 		} else {
5653 			in->lq_rate_mismatch = 0;
5654 
5655 			in->in_amn.amn_txcnt++;
5656 			if (txfail)
5657 				in->in_amn.amn_retrycnt++;
5658 			if (tx_resp->failure_frame > 0)
5659 				in->in_amn.amn_retrycnt++;
5660 		}
5661 	} else if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5662 	    ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5663 	    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5664 		int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5665 		int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK) >>
5666 		    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5667 		iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5668 		    tx_resp->failure_frame, txfail);
5669 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5670 	    (initial_rate & IWM_RATE_MCS_HT_MSK)) {
5671 		int txmcs = initial_rate &
5672 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5673 		iwm_ht_single_rate_control(sc, ni, txmcs,
5674 		    tx_resp->failure_frame, txfail);
5675 	}
5676 
5677 	if (txfail)
5678 		ifp->if_oerrors++;
5679 }
5680 
5681 void
5682 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5683 {
5684 	struct ieee80211com *ic = &sc->sc_ic;
5685 
5686 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5687 	    BUS_DMASYNC_POSTWRITE);
5688 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5689 	m_freem(txd->m);
5690 	txd->m = NULL;
5691 
5692 	KASSERT(txd->in);
5693 	ieee80211_release_node(ic, &txd->in->in_ni);
5694 	txd->in = NULL;
5695 	txd->ampdu_nframes = 0;
5696 	txd->ampdu_txmcs = 0;
5697 	txd->ampdu_txnss = 0;
5698 }
5699 
5700 void
5701 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5702 {
5703 	struct iwm_tx_data *txd;
5704 
5705 	while (ring->tail != idx) {
5706 		txd = &ring->data[ring->tail];
5707 		if (txd->m != NULL) {
5708 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5709 			iwm_txd_done(sc, txd);
5710 			ring->queued--;
5711 		}
5712 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5713 	}
5714 
5715 	wakeup(ring);
5716 }
5717 
5718 void
5719 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5720     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5721     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5722     struct iwm_agg_tx_status *agg_status)
5723 {
5724 	struct ieee80211com *ic = &sc->sc_ic;
5725 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5726 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5727 	struct ieee80211_node *ni = &in->in_ni;
5728 	struct ieee80211_tx_ba *ba;
5729 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5730 	    status != IWM_TX_STATUS_DIRECT_DONE);
5731 	uint16_t seq;
5732 
5733 	if (ic->ic_state != IEEE80211_S_RUN)
5734 		return;
5735 
5736 	if (nframes > 1) {
5737 		int i;
5738  		/*
5739 		 * Collect information about this A-MPDU.
5740 		 */
5741 
5742 		for (i = 0; i < nframes; i++) {
5743 			uint8_t qid = agg_status[i].qid;
5744 			uint8_t idx = agg_status[i].idx;
5745 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5746 			    IWM_AGG_TX_STATE_STATUS_MSK);
5747 
5748 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5749 				continue;
5750 
5751 			if (qid != cmd_hdr->qid)
5752 				continue;
5753 
5754 			txdata = &txq->data[idx];
5755 			if (txdata->m == NULL)
5756 				continue;
5757 
5758 			/* The Tx rate was the same for all subframes. */
5759 			if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5760 			    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5761 				txdata->ampdu_txmcs = initial_rate &
5762 				    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5763 				txdata->ampdu_txnss = ((initial_rate &
5764 				    IWM_RATE_VHT_MCS_NSS_MSK) >>
5765 				    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5766 				txdata->ampdu_nframes = nframes;
5767 			} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5768 				txdata->ampdu_txmcs = initial_rate &
5769 				    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5770 				    IWM_RATE_HT_MCS_NSS_MSK);
5771 				txdata->ampdu_nframes = nframes;
5772 			}
5773 		}
5774 		return;
5775 	}
5776 
5777 	ba = &ni->ni_tx_ba[tid];
5778 	if (ba->ba_state != IEEE80211_BA_AGREED)
5779 		return;
5780 	if (SEQ_LT(ssn, ba->ba_winstart))
5781 		return;
5782 
5783 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5784 	seq = (ssn - 1) & 0xfff;
5785 
5786 	/*
5787 	 * Skip rate control if our Tx rate is fixed.
5788 	 * Don't report frames to MiRA which were sent at a different
5789 	 * Tx rate than ni->ni_txmcs.
5790 	 */
5791 	if (ic->ic_fixed_mcs == -1) {
5792 		if (txdata->ampdu_nframes > 1) {
5793 			/*
5794 			 * This frame was once part of an A-MPDU.
5795 			 * Report one failed A-MPDU Tx attempt.
5796 			 * The firmware might have made several such
5797 			 * attempts but we don't keep track of this.
5798 			 */
5799 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5800 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5801 				    ic, ni, txdata->ampdu_txmcs,
5802 				    txdata->ampdu_txnss, 1, 1);
5803 			} else {
5804 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5805 				    txdata->ampdu_txmcs, 1, 1);
5806 			}
5807 		}
5808 
5809 		/* Report the final single-frame Tx attempt. */
5810 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5811 		    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5812 			int txmcs = initial_rate &
5813 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5814 			int nss = ((initial_rate &
5815 			    IWM_RATE_VHT_MCS_NSS_MSK) >>
5816 			    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5817 			iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5818 			    failure_frame, txfail);
5819 		} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5820 			int txmcs = initial_rate &
5821 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5822 			   IWM_RATE_HT_MCS_NSS_MSK);
5823 			iwm_ht_single_rate_control(sc, ni, txmcs,
5824 			    failure_frame, txfail);
5825 		}
5826 	}
5827 
5828 	if (txfail)
5829 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5830 
5831 	/*
5832 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5833 	 * in firmware's BA window. Firmware is not going to retransmit any
5834 	 * frames before its BA window so mark them all as done.
5835 	 */
5836 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5837 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5838 	iwm_clear_oactive(sc, txq);
5839 }
5840 
5841 void
5842 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5843     struct iwm_rx_data *data)
5844 {
5845 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5846 	int idx = cmd_hdr->idx;
5847 	int qid = cmd_hdr->qid;
5848 	struct iwm_tx_ring *ring = &sc->txq[qid];
5849 	struct iwm_tx_data *txd;
5850 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5851 	uint32_t ssn;
5852 	uint32_t len = iwm_rx_packet_len(pkt);
5853 
5854 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5855 	    BUS_DMASYNC_POSTREAD);
5856 
5857 	/* Sanity checks. */
5858 	if (sizeof(*tx_resp) > len)
5859 		return;
5860 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5861 		return;
5862 	if (qid > IWM_LAST_AGG_TX_QUEUE)
5863 		return;
5864 	if (sizeof(*tx_resp) + sizeof(ssn) +
5865 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5866 		return;
5867 
5868 	sc->sc_tx_timer[qid] = 0;
5869 
5870 	txd = &ring->data[idx];
5871 	if (txd->m == NULL)
5872 		return;
5873 
5874 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5875 	ssn = le32toh(ssn) & 0xfff;
5876 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5877 		int status;
5878 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5879 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5880 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5881 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5882 	} else {
5883 		/*
5884 		 * Even though this is not an agg queue, we must only free
5885 		 * frames before the firmware's starting sequence number.
5886 		 */
5887 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5888 		iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5889 		iwm_clear_oactive(sc, ring);
5890 	}
5891 }
5892 
5893 void
5894 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5895 {
5896 	struct ieee80211com *ic = &sc->sc_ic;
5897 	struct ifnet *ifp = IC2IFP(ic);
5898 
5899 	if (ring->queued < IWM_TX_RING_LOMARK) {
5900 		sc->qfullmsk &= ~(1 << ring->qid);
5901 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5902 			ifq_clr_oactive(&ifp->if_snd);
5903 			/*
5904 			 * Well, we're in interrupt context, but then again
5905 			 * I guess net80211 does all sorts of stunts in
5906 			 * interrupt context, so maybe this is no biggie.
5907 			 */
5908 			(*ifp->if_start)(ifp);
5909 		}
5910 	}
5911 }
5912 
5913 void
5914 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5915     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5916 {
5917 	struct ieee80211com *ic = &sc->sc_ic;
5918 	struct iwm_node *in = (void *)ni;
5919 	int idx, end_idx;
5920 
5921 	/*
5922 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5923 	 */
5924 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5925 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5926 	while (idx != end_idx) {
5927 		struct iwm_tx_data *txdata = &txq->data[idx];
5928 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5929 			/*
5930 			 * We can assume that this subframe has been ACKed
5931 			 * because ACK failures come as single frames and
5932 			 * before failing an A-MPDU subframe the firmware
5933 			 * sends it as a single frame at least once.
5934 			 */
5935 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5936 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5937 				    ic, ni, txdata->ampdu_txmcs,
5938 				    txdata->ampdu_txnss, 1, 0);
5939 			} else {
5940 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5941 				    txdata->ampdu_txmcs, 1, 0);
5942 			}
5943 			/* Report this frame only once. */
5944 			txdata->ampdu_nframes = 0;
5945 		}
5946 
5947 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5948 	}
5949 
5950 	iwm_ra_choose(sc, ni);
5951 }
5952 
5953 void
5954 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5955 {
5956 	struct iwm_ba_notif *ban = (void *)pkt->data;
5957 	struct ieee80211com *ic = &sc->sc_ic;
5958 	struct ieee80211_node *ni = ic->ic_bss;
5959 	struct iwm_node *in = (void *)ni;
5960 	struct ieee80211_tx_ba *ba;
5961 	struct iwm_tx_ring *ring;
5962 	uint16_t seq, ssn;
5963 	int qid;
5964 
5965 	if (ic->ic_state != IEEE80211_S_RUN)
5966 		return;
5967 
5968 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5969 		return;
5970 
5971 	if (ban->sta_id != IWM_STATION_ID ||
5972 	    !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr))
5973 		return;
5974 
5975 	qid = le16toh(ban->scd_flow);
5976 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5977 		return;
5978 
5979 	/* Protect against a firmware bug where the queue/TID are off. */
5980 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5981 		return;
5982 
5983 	sc->sc_tx_timer[qid] = 0;
5984 
5985 	ba = &ni->ni_tx_ba[ban->tid];
5986 	if (ba->ba_state != IEEE80211_BA_AGREED)
5987 		return;
5988 
5989 	ring = &sc->txq[qid];
5990 
5991 	/*
5992 	 * The first bit in ban->bitmap corresponds to the sequence number
5993 	 * stored in the sequence control field ban->seq_ctl.
5994 	 * Multiple BA notifications in a row may be using this number, with
5995 	 * additional bits being set in cba->bitmap. It is unclear how the
5996 	 * firmware decides to shift this window forward.
5997 	 * We rely on ba->ba_winstart instead.
5998 	 */
5999 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
6000 
6001 	/*
6002 	 * The firmware's new BA window starting sequence number
6003 	 * corresponds to the first hole in ban->scd_ssn, implying
6004 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
6005 	 * have been acked.
6006 	 */
6007 	ssn = le16toh(ban->scd_ssn);
6008 
6009 	if (SEQ_LT(ssn, ba->ba_winstart))
6010 		return;
6011 
6012 	/* Skip rate control if our Tx rate is fixed. */
6013 	if (ic->ic_fixed_mcs == -1)
6014 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
6015 		    ba->ba_winstart, ssn);
6016 
6017 	/*
6018 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
6019 	 * in firmware's BA window. Firmware is not going to retransmit any
6020 	 * frames before its BA window so mark them all as done.
6021 	 */
6022 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
6023 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
6024 	iwm_clear_oactive(sc, ring);
6025 }
6026 
6027 void
6028 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6029     struct iwm_rx_data *data)
6030 {
6031 	struct ieee80211com *ic = &sc->sc_ic;
6032 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
6033 	uint32_t missed;
6034 
6035 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
6036 	    (ic->ic_state != IEEE80211_S_RUN))
6037 		return;
6038 
6039 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
6040 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
6041 
6042 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
6043 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
6044 		if (ic->ic_if.if_flags & IFF_DEBUG)
6045 			printf("%s: receiving no beacons from %s; checking if "
6046 			    "this AP is still responding to probe requests\n",
6047 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
6048 		/*
6049 		 * Rather than go directly to scan state, try to send a
6050 		 * directed probe request first. If that fails then the
6051 		 * state machine will drop us into scanning after timing
6052 		 * out waiting for a probe response.
6053 		 */
6054 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
6055 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
6056 	}
6057 
6058 }
6059 
6060 int
6061 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6062 {
6063 	struct iwm_binding_cmd cmd;
6064 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
6065 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
6066 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
6067 	uint32_t status;
6068 	size_t len;
6069 
6070 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6071 		panic("binding already added");
6072 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6073 		panic("binding already removed");
6074 
6075 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
6076 		return EINVAL;
6077 
6078 	memset(&cmd, 0, sizeof(cmd));
6079 
6080 	cmd.id_and_color
6081 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6082 	cmd.action = htole32(action);
6083 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6084 
6085 	cmd.macs[0] = htole32(mac_id);
6086 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
6087 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
6088 
6089 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
6090 	    !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
6091 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
6092 	else
6093 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
6094 
6095 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
6096 		len = sizeof(cmd);
6097 	else
6098 		len = sizeof(struct iwm_binding_cmd_v1);
6099 	status = 0;
6100 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
6101 	    &status);
6102 	if (err == 0 && status != 0)
6103 		err = EIO;
6104 
6105 	return err;
6106 }
6107 
6108 void
6109 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6110     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
6111 {
6112 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
6113 
6114 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6115 	    ctxt->color));
6116 	cmd->action = htole32(action);
6117 	cmd->apply_time = htole32(apply_time);
6118 }
6119 
6120 void
6121 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6122     struct ieee80211_channel *chan, uint8_t chains_static,
6123     uint8_t chains_dynamic, uint8_t sco, uint8_t vht_chan_width)
6124 {
6125 	struct ieee80211com *ic = &sc->sc_ic;
6126 	uint8_t active_cnt, idle_cnt;
6127 
6128 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6129 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6130 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
6131 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6132 		cmd->ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6133 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6134 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6135 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6136 			/* secondary chan above -> control chan below */
6137 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6138 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6139 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6140 			/* secondary chan below -> control chan above */
6141 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6142 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6143 		} else {
6144 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6145 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6146 		}
6147 	} else {
6148 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6149 		cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6150 	}
6151 
6152 	/* Set rx the chains */
6153 	idle_cnt = chains_static;
6154 	active_cnt = chains_dynamic;
6155 
6156 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6157 					IWM_PHY_RX_CHAIN_VALID_POS);
6158 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6159 	cmd->rxchain_info |= htole32(active_cnt <<
6160 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6161 
6162 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6163 }
6164 
6165 uint8_t
6166 iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
6167 {
6168 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
6169 	int primary_idx = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
6170 	/*
6171 	 * The FW is expected to check the control channel position only
6172 	 * when in HT/VHT and the channel width is not 20MHz. Return
6173 	 * this value as the default one:
6174 	 */
6175 	uint8_t pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6176 
6177 	switch (primary_idx - center_idx) {
6178 	case -6:
6179 		pos = IWM_PHY_VHT_CTRL_POS_2_BELOW;
6180 		break;
6181 	case -2:
6182 		pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6183 		break;
6184 	case 2:
6185 		pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6186 		break;
6187 	case 6:
6188 		pos = IWM_PHY_VHT_CTRL_POS_2_ABOVE;
6189 		break;
6190 	default:
6191 		break;
6192 	}
6193 
6194 	return pos;
6195 }
6196 
6197 int
6198 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6199     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6200     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6201 {
6202 	struct ieee80211com *ic = &sc->sc_ic;
6203 	struct iwm_phy_context_cmd_uhb cmd;
6204 	uint8_t active_cnt, idle_cnt;
6205 	struct ieee80211_channel *chan = ctxt->channel;
6206 
6207 	memset(&cmd, 0, sizeof(cmd));
6208 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6209 	    ctxt->color));
6210 	cmd.action = htole32(action);
6211 	cmd.apply_time = htole32(apply_time);
6212 
6213 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6214 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6215 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
6216 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6217 		cmd.ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6218 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6219 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6220 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6221 			/* secondary chan above -> control chan below */
6222 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6223 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6224 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6225 			/* secondary chan below -> control chan above */
6226 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6227 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6228 		} else {
6229 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6230 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6231 		}
6232 	} else {
6233 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6234 		cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6235 	}
6236 
6237 	idle_cnt = chains_static;
6238 	active_cnt = chains_dynamic;
6239 	cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6240 					IWM_PHY_RX_CHAIN_VALID_POS);
6241 	cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6242 	cmd.rxchain_info |= htole32(active_cnt <<
6243 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6244 	cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6245 
6246 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6247 }
6248 
6249 int
6250 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6251     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6252     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6253 {
6254 	struct iwm_phy_context_cmd cmd;
6255 
6256 	/*
6257 	 * Intel increased the size of the fw_channel_info struct and neglected
6258 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6259 	 * member in the middle.
6260 	 * To keep things simple we use a separate function to handle the larger
6261 	 * variant of the phy context command.
6262 	 */
6263 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6264 		return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6265 		    chains_dynamic, action, apply_time, sco, vht_chan_width);
6266 
6267 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6268 
6269 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6270 	    chains_static, chains_dynamic, sco, vht_chan_width);
6271 
6272 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6273 	    sizeof(struct iwm_phy_context_cmd), &cmd);
6274 }
6275 
6276 int
6277 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6278 {
6279 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6280 	struct iwm_tfd *desc;
6281 	struct iwm_tx_data *txdata;
6282 	struct iwm_device_cmd *cmd;
6283 	struct mbuf *m;
6284 	bus_addr_t paddr;
6285 	uint32_t addr_lo;
6286 	int err = 0, i, paylen, off, s;
6287 	int idx, code, async, group_id;
6288 	size_t hdrlen, datasz;
6289 	uint8_t *data;
6290 	int generation = sc->sc_generation;
6291 
6292 	code = hcmd->id;
6293 	async = hcmd->flags & IWM_CMD_ASYNC;
6294 	idx = ring->cur;
6295 
6296 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
6297 		paylen += hcmd->len[i];
6298 	}
6299 
6300 	/* If this command waits for a response, allocate response buffer. */
6301 	hcmd->resp_pkt = NULL;
6302 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
6303 		uint8_t *resp_buf;
6304 		KASSERT(!async);
6305 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
6306 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
6307 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
6308 			return ENOSPC;
6309 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
6310 		    M_NOWAIT | M_ZERO);
6311 		if (resp_buf == NULL)
6312 			return ENOMEM;
6313 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
6314 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6315 	} else {
6316 		sc->sc_cmd_resp_pkt[idx] = NULL;
6317 	}
6318 
6319 	s = splnet();
6320 
6321 	desc = &ring->desc[idx];
6322 	txdata = &ring->data[idx];
6323 
6324 	group_id = iwm_cmd_groupid(code);
6325 	if (group_id != 0) {
6326 		hdrlen = sizeof(cmd->hdr_wide);
6327 		datasz = sizeof(cmd->data_wide);
6328 	} else {
6329 		hdrlen = sizeof(cmd->hdr);
6330 		datasz = sizeof(cmd->data);
6331 	}
6332 
6333 	if (paylen > datasz) {
6334 		/* Command is too large to fit in pre-allocated space. */
6335 		size_t totlen = hdrlen + paylen;
6336 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
6337 			printf("%s: firmware command too long (%zd bytes)\n",
6338 			    DEVNAME(sc), totlen);
6339 			err = EINVAL;
6340 			goto out;
6341 		}
6342 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
6343 		if (m == NULL) {
6344 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6345 			    DEVNAME(sc), totlen);
6346 			err = ENOMEM;
6347 			goto out;
6348 		}
6349 		cmd = mtod(m, struct iwm_device_cmd *);
6350 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6351 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6352 		if (err) {
6353 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6354 			    DEVNAME(sc), totlen);
6355 			m_freem(m);
6356 			goto out;
6357 		}
6358 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6359 		paddr = txdata->map->dm_segs[0].ds_addr;
6360 	} else {
6361 		cmd = &ring->cmd[idx];
6362 		paddr = txdata->cmd_paddr;
6363 	}
6364 
6365 	if (group_id != 0) {
6366 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6367 		cmd->hdr_wide.group_id = group_id;
6368 		cmd->hdr_wide.qid = ring->qid;
6369 		cmd->hdr_wide.idx = idx;
6370 		cmd->hdr_wide.length = htole16(paylen);
6371 		cmd->hdr_wide.version = iwm_cmd_version(code);
6372 		data = cmd->data_wide;
6373 	} else {
6374 		cmd->hdr.code = code;
6375 		cmd->hdr.flags = 0;
6376 		cmd->hdr.qid = ring->qid;
6377 		cmd->hdr.idx = idx;
6378 		data = cmd->data;
6379 	}
6380 
6381 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
6382 		if (hcmd->len[i] == 0)
6383 			continue;
6384 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
6385 		off += hcmd->len[i];
6386 	}
6387 	KASSERT(off == paylen);
6388 
6389 	/* lo field is not aligned */
6390 	addr_lo = htole32((uint32_t)paddr);
6391 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
6392 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
6393 	    | ((hdrlen + paylen) << 4));
6394 	desc->num_tbs = 1;
6395 
6396 	if (paylen > datasz) {
6397 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6398 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6399 	} else {
6400 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6401 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6402 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6403 	}
6404 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6405 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6406 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6407 
6408 	/*
6409 	 * Wake up the NIC to make sure that the firmware will see the host
6410 	 * command - we will let the NIC sleep once all the host commands
6411 	 * returned. This needs to be done only on 7000 family NICs.
6412 	 */
6413 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6414 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6415 			err = EBUSY;
6416 			goto out;
6417 		}
6418 	}
6419 
6420 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6421 
6422 	/* Kick command ring. */
6423 	ring->queued++;
6424 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6425 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6426 
6427 	if (!async) {
6428 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
6429 		if (err == 0) {
6430 			/* if hardware is no longer up, return error */
6431 			if (generation != sc->sc_generation) {
6432 				err = ENXIO;
6433 				goto out;
6434 			}
6435 
6436 			/* Response buffer will be freed in iwm_free_resp(). */
6437 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6438 			sc->sc_cmd_resp_pkt[idx] = NULL;
6439 		} else if (generation == sc->sc_generation) {
6440 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6441 			    sc->sc_cmd_resp_len[idx]);
6442 			sc->sc_cmd_resp_pkt[idx] = NULL;
6443 		}
6444 	}
6445  out:
6446 	splx(s);
6447 
6448 	return err;
6449 }
6450 
6451 int
6452 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6453     uint16_t len, const void *data)
6454 {
6455 	struct iwm_host_cmd cmd = {
6456 		.id = id,
6457 		.len = { len, },
6458 		.data = { data, },
6459 		.flags = flags,
6460 	};
6461 
6462 	return iwm_send_cmd(sc, &cmd);
6463 }
6464 
6465 int
6466 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6467     uint32_t *status)
6468 {
6469 	struct iwm_rx_packet *pkt;
6470 	struct iwm_cmd_response *resp;
6471 	int err, resp_len;
6472 
6473 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
6474 	cmd->flags |= IWM_CMD_WANT_RESP;
6475 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6476 
6477 	err = iwm_send_cmd(sc, cmd);
6478 	if (err)
6479 		return err;
6480 
6481 	pkt = cmd->resp_pkt;
6482 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
6483 		return EIO;
6484 
6485 	resp_len = iwm_rx_packet_payload_len(pkt);
6486 	if (resp_len != sizeof(*resp)) {
6487 		iwm_free_resp(sc, cmd);
6488 		return EIO;
6489 	}
6490 
6491 	resp = (void *)pkt->data;
6492 	*status = le32toh(resp->status);
6493 	iwm_free_resp(sc, cmd);
6494 	return err;
6495 }
6496 
6497 int
6498 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6499     const void *data, uint32_t *status)
6500 {
6501 	struct iwm_host_cmd cmd = {
6502 		.id = id,
6503 		.len = { len, },
6504 		.data = { data, },
6505 	};
6506 
6507 	return iwm_send_cmd_status(sc, &cmd, status);
6508 }
6509 
6510 void
6511 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6512 {
6513 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
6514 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6515 	hcmd->resp_pkt = NULL;
6516 }
6517 
6518 void
6519 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6520 {
6521 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6522 	struct iwm_tx_data *data;
6523 
6524 	if (qid != sc->cmdqid) {
6525 		return;	/* Not a command ack. */
6526 	}
6527 
6528 	data = &ring->data[idx];
6529 
6530 	if (data->m != NULL) {
6531 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6532 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6533 		bus_dmamap_unload(sc->sc_dmat, data->map);
6534 		m_freem(data->m);
6535 		data->m = NULL;
6536 	}
6537 	wakeup(&ring->desc[idx]);
6538 
6539 	if (ring->queued == 0) {
6540 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6541 		    DEVNAME(sc), code));
6542 	} else if (--ring->queued == 0) {
6543 		/*
6544 		 * 7000 family NICs are locked while commands are in progress.
6545 		 * All commands are now done so we may unlock the NIC again.
6546 		 */
6547 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6548 			iwm_nic_unlock(sc);
6549 	}
6550 }
6551 
6552 void
6553 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6554     uint16_t len)
6555 {
6556 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6557 	uint16_t val;
6558 
6559 	scd_bc_tbl = sc->sched_dma.vaddr;
6560 
6561 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6562 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6563 		len = roundup(len, 4) / 4;
6564 
6565 	val = htole16(sta_id << 12 | len);
6566 
6567 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6568 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6569 
6570 	/* Update TX scheduler. */
6571 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6572 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6573 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6574 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6575 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6576 }
6577 
6578 void
6579 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6580 {
6581 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6582 	uint16_t val;
6583 
6584 	scd_bc_tbl = sc->sched_dma.vaddr;
6585 
6586 	val = htole16(1 | (sta_id << 12));
6587 
6588 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6589 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6590 
6591 	/* Update TX scheduler. */
6592 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6593 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6594 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6595 
6596 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6597 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6598 }
6599 
6600 /*
6601  * Fill in various bit for management frames, and leave them
6602  * unfilled for data frames (firmware takes care of that).
6603  * Return the selected legacy TX rate, or zero if HT/VHT is used.
6604  */
6605 uint8_t
6606 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6607     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6608 {
6609 	struct ieee80211com *ic = &sc->sc_ic;
6610 	struct ieee80211_node *ni = &in->in_ni;
6611 	const struct iwm_rate *rinfo;
6612 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6613 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6614 	int ridx, rate_flags;
6615 	uint8_t rate = 0;
6616 
6617 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6618 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6619 
6620 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6621 	    type != IEEE80211_FC0_TYPE_DATA) {
6622 		/* for non-data, use the lowest supported rate */
6623 		ridx = min_ridx;
6624 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6625 	} else if (ic->ic_fixed_mcs != -1) {
6626 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6627 			ridx = IWM_FIRST_OFDM_RATE;
6628 		else
6629 			ridx = sc->sc_fixed_ridx;
6630 	} else if (ic->ic_fixed_rate != -1) {
6631 		ridx = sc->sc_fixed_ridx;
6632  	} else {
6633 		int i;
6634 		/* Use firmware rateset retry table. */
6635 		tx->initial_rate_index = 0;
6636 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6637 		if (ni->ni_flags & IEEE80211_NODE_HT) /* VHT implies HT */
6638 			return 0;
6639 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6640 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6641 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6642 			if (iwm_rates[i].rate == (ni->ni_txrate &
6643 			    IEEE80211_RATE_VAL)) {
6644 				ridx = i;
6645 				break;
6646 			}
6647 		}
6648 		return iwm_rates[ridx].rate & 0xff;
6649 	}
6650 
6651 	rinfo = &iwm_rates[ridx];
6652 	if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 &&
6653 	    iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6654 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6655 	else
6656 		rate_flags = iwm_valid_siso_ant_rate_mask(sc);
6657 	if (IWM_RIDX_IS_CCK(ridx))
6658 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6659 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6660 	    type == IEEE80211_FC0_TYPE_DATA &&
6661 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6662 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
6663 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
6664 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
6665 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
6666 		    ieee80211_node_supports_vht_chan80(ni))
6667 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
6668 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
6669 		    ieee80211_node_supports_ht_chan40(ni))
6670 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6671 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6672 			rate_flags |= IWM_RATE_MCS_VHT_MSK;
6673 		else
6674 			rate_flags |= IWM_RATE_MCS_HT_MSK;
6675 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
6676 		    in->in_phyctxt != NULL &&
6677 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
6678 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_80;
6679 			if (ieee80211_node_supports_vht_sgi80(ni))
6680 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6681 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
6682 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
6683 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
6684 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40;
6685 			if (ieee80211_node_supports_ht_sgi40(ni))
6686 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6687 		} else if (ieee80211_node_supports_ht_sgi20(ni))
6688 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6689 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6690 			/*
6691 			 * ifmedia only provides an MCS index, no NSS.
6692 			 * Use a fixed SISO rate.
6693 			 */
6694 			tx->rate_n_flags = htole32(rate_flags |
6695 			    (ic->ic_fixed_mcs &
6696 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK));
6697 		} else
6698 			tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6699 	} else
6700 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6701 
6702 	return rate;
6703 }
6704 
6705 #define TB0_SIZE 16
6706 int
6707 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6708 {
6709 	struct ieee80211com *ic = &sc->sc_ic;
6710 	struct iwm_node *in = (void *)ni;
6711 	struct iwm_tx_ring *ring;
6712 	struct iwm_tx_data *data;
6713 	struct iwm_tfd *desc;
6714 	struct iwm_device_cmd *cmd;
6715 	struct iwm_tx_cmd *tx;
6716 	struct ieee80211_frame *wh;
6717 	struct ieee80211_key *k = NULL;
6718 	uint8_t rate;
6719 	uint8_t *ivp;
6720 	uint32_t flags;
6721 	u_int hdrlen;
6722 	bus_dma_segment_t *seg;
6723 	uint8_t tid, type, subtype;
6724 	int i, totlen, err, pad;
6725 	int qid, hasqos;
6726 
6727 	wh = mtod(m, struct ieee80211_frame *);
6728 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6729 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6730 	if (type == IEEE80211_FC0_TYPE_CTL)
6731 		hdrlen = sizeof(struct ieee80211_frame_min);
6732 	else
6733 		hdrlen = ieee80211_get_hdrlen(wh);
6734 
6735 	hasqos = ieee80211_has_qos(wh);
6736 	if (type == IEEE80211_FC0_TYPE_DATA)
6737 		tid = IWM_TID_NON_QOS;
6738 	else
6739 		tid = IWM_MAX_TID_COUNT;
6740 
6741 	/*
6742 	 * Map EDCA categories to Tx data queues.
6743 	 *
6744 	 * We use static data queue assignments even in DQA mode. We do not
6745 	 * need to share Tx queues between stations because we only implement
6746 	 * client mode; the firmware's station table contains only one entry
6747 	 * which represents our access point.
6748 	 */
6749 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6750 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6751 	else
6752 		qid = ac;
6753 
6754 	/* If possible, put this frame on an aggregation queue. */
6755 	if (hasqos) {
6756 		struct ieee80211_tx_ba *ba;
6757 		uint16_t qos = ieee80211_get_qos(wh);
6758 		int qostid = qos & IEEE80211_QOS_TID;
6759 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6760 
6761 		ba = &ni->ni_tx_ba[qostid];
6762 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6763 		    type == IEEE80211_FC0_TYPE_DATA &&
6764 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6765 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6766 		    ba->ba_state == IEEE80211_BA_AGREED) {
6767 			qid = agg_qid;
6768 			tid = qostid;
6769 			ac = ieee80211_up_to_ac(ic, qostid);
6770 		}
6771 	}
6772 
6773 	ring = &sc->txq[qid];
6774 	desc = &ring->desc[ring->cur];
6775 	memset(desc, 0, sizeof(*desc));
6776 	data = &ring->data[ring->cur];
6777 
6778 	cmd = &ring->cmd[ring->cur];
6779 	cmd->hdr.code = IWM_TX_CMD;
6780 	cmd->hdr.flags = 0;
6781 	cmd->hdr.qid = ring->qid;
6782 	cmd->hdr.idx = ring->cur;
6783 
6784 	tx = (void *)cmd->data;
6785 	memset(tx, 0, sizeof(*tx));
6786 
6787 	rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6788 
6789 #if NBPFILTER > 0
6790 	if (sc->sc_drvbpf != NULL) {
6791 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6792 		uint16_t chan_flags;
6793 
6794 		tap->wt_flags = 0;
6795 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6796 		chan_flags = ni->ni_chan->ic_flags;
6797 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6798 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6799 			chan_flags &= ~IEEE80211_CHAN_HT;
6800 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6801 		}
6802 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6803 			chan_flags &= ~IEEE80211_CHAN_VHT;
6804 		tap->wt_chan_flags = htole16(chan_flags);
6805 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6806 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6807 		    type == IEEE80211_FC0_TYPE_DATA) {
6808 			tap->wt_rate = (0x80 | ni->ni_txmcs);
6809 		} else
6810 			tap->wt_rate = rate;
6811 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6812 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6813 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6814 
6815 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6816 		    m, BPF_DIRECTION_OUT);
6817 	}
6818 #endif
6819 	totlen = m->m_pkthdr.len;
6820 
6821 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6822 		k = ieee80211_get_txkey(ic, wh, ni);
6823 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6824 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6825 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6826 				return ENOBUFS;
6827 			/* 802.11 header may have moved. */
6828 			wh = mtod(m, struct ieee80211_frame *);
6829 			totlen = m->m_pkthdr.len;
6830 			k = NULL; /* skip hardware crypto below */
6831 		} else {
6832 			/* HW appends CCMP MIC */
6833 			totlen += IEEE80211_CCMP_HDRLEN;
6834 		}
6835 	}
6836 
6837 	flags = 0;
6838 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6839 		flags |= IWM_TX_CMD_FLG_ACK;
6840 	}
6841 
6842 	if (type == IEEE80211_FC0_TYPE_DATA &&
6843 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6844 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6845 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6846 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6847 
6848 	tx->sta_id = IWM_STATION_ID;
6849 
6850 	if (type == IEEE80211_FC0_TYPE_MGT) {
6851 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6852 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6853 			tx->pm_frame_timeout = htole16(3);
6854 		else
6855 			tx->pm_frame_timeout = htole16(2);
6856 	} else {
6857 		if (type == IEEE80211_FC0_TYPE_CTL &&
6858 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6859 			struct ieee80211_frame_min *mwh;
6860 			uint8_t *barfrm;
6861 			uint16_t ctl;
6862 			mwh = mtod(m, struct ieee80211_frame_min *);
6863 			barfrm = (uint8_t *)&mwh[1];
6864 			ctl = LE_READ_2(barfrm);
6865 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6866 			    IEEE80211_BA_TID_INFO_SHIFT;
6867 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6868 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6869 		}
6870 
6871 		tx->pm_frame_timeout = htole16(0);
6872 	}
6873 
6874 	if (hdrlen & 3) {
6875 		/* First segment length must be a multiple of 4. */
6876 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6877 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6878 		pad = 4 - (hdrlen & 3);
6879 	} else
6880 		pad = 0;
6881 
6882 	tx->len = htole16(totlen);
6883 	tx->tid_tspec = tid;
6884 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6885 
6886 	/* Set physical address of "scratch area". */
6887 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6888 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6889 
6890 	/* Copy 802.11 header in TX command. */
6891 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6892 
6893 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6894 		/* Trim 802.11 header and prepend CCMP IV. */
6895 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6896 		ivp = mtod(m, u_int8_t *);
6897 		k->k_tsc++;	/* increment the 48-bit PN */
6898 		ivp[0] = k->k_tsc; /* PN0 */
6899 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6900 		ivp[2] = 0;        /* Rsvd */
6901 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6902 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6903 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6904 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6905 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6906 
6907 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6908 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6909 		/* TX scheduler includes CCMP MIC length. */
6910 		totlen += IEEE80211_CCMP_MICLEN;
6911 	} else {
6912 		/* Trim 802.11 header. */
6913 		m_adj(m, hdrlen);
6914 		tx->sec_ctl = 0;
6915 	}
6916 
6917 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6918 	if (!hasqos)
6919 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6920 
6921 	tx->tx_flags |= htole32(flags);
6922 
6923 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6924 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6925 	if (err && err != EFBIG) {
6926 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6927 		m_freem(m);
6928 		return err;
6929 	}
6930 	if (err) {
6931 		/* Too many DMA segments, linearize mbuf. */
6932 		if (m_defrag(m, M_DONTWAIT)) {
6933 			m_freem(m);
6934 			return ENOBUFS;
6935 		}
6936 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6937 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6938 		if (err) {
6939 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6940 			    err);
6941 			m_freem(m);
6942 			return err;
6943 		}
6944 	}
6945 	data->m = m;
6946 	data->in = in;
6947 	data->txmcs = ni->ni_txmcs;
6948 	data->txrate = ni->ni_txrate;
6949 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6950 	data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
6951 
6952 	/* Fill TX descriptor. */
6953 	desc->num_tbs = 2 + data->map->dm_nsegs;
6954 
6955 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6956 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6957 	    (TB0_SIZE << 4));
6958 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6959 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6960 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6961 	      + hdrlen + pad - TB0_SIZE) << 4));
6962 
6963 	/* Other DMA segments are for data payload. */
6964 	seg = data->map->dm_segs;
6965 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6966 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6967 		desc->tbs[i+2].hi_n_len = \
6968 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6969 		    | ((seg->ds_len) << 4));
6970 	}
6971 
6972 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6973 	    BUS_DMASYNC_PREWRITE);
6974 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6975 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6976 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6977 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6978 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6979 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6980 
6981 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6982 
6983 	/* Kick TX ring. */
6984 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6985 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6986 
6987 	/* Mark TX ring as full if we reach a certain threshold. */
6988 	if (++ring->queued > IWM_TX_RING_HIMARK) {
6989 		sc->qfullmsk |= 1 << ring->qid;
6990 	}
6991 
6992 	if (ic->ic_if.if_flags & IFF_UP)
6993 		sc->sc_tx_timer[ring->qid] = 15;
6994 
6995 	return 0;
6996 }
6997 
6998 int
6999 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
7000 {
7001 	struct iwm_tx_path_flush_cmd flush_cmd = {
7002 		.sta_id = htole32(IWM_STATION_ID),
7003 		.tid_mask = htole16(0xffff),
7004 	};
7005 	int err;
7006 
7007 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
7008 	    sizeof(flush_cmd), &flush_cmd);
7009 	if (err)
7010                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
7011 	return err;
7012 }
7013 
7014 #define IWM_FLUSH_WAIT_MS	2000
7015 
7016 int
7017 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
7018 {
7019 	int i, err;
7020 
7021 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
7022 		struct iwm_tx_ring *ring = &sc->txq[i];
7023 
7024 		if (i == sc->cmdqid)
7025 			continue;
7026 
7027 		while (ring->queued > 0) {
7028 			err = tsleep_nsec(ring, 0, "iwmflush",
7029 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
7030 			if (err)
7031 				return err;
7032 		}
7033 	}
7034 
7035 	return 0;
7036 }
7037 
7038 void
7039 iwm_led_enable(struct iwm_softc *sc)
7040 {
7041 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
7042 }
7043 
7044 void
7045 iwm_led_disable(struct iwm_softc *sc)
7046 {
7047 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
7048 }
7049 
7050 int
7051 iwm_led_is_enabled(struct iwm_softc *sc)
7052 {
7053 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
7054 }
7055 
7056 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
7057 
7058 void
7059 iwm_led_blink_timeout(void *arg)
7060 {
7061 	struct iwm_softc *sc = arg;
7062 
7063 	if (iwm_led_is_enabled(sc))
7064 		iwm_led_disable(sc);
7065 	else
7066 		iwm_led_enable(sc);
7067 
7068 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7069 }
7070 
7071 void
7072 iwm_led_blink_start(struct iwm_softc *sc)
7073 {
7074 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7075 	iwm_led_enable(sc);
7076 }
7077 
7078 void
7079 iwm_led_blink_stop(struct iwm_softc *sc)
7080 {
7081 	timeout_del(&sc->sc_led_blink_to);
7082 	iwm_led_disable(sc);
7083 }
7084 
7085 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
7086 
7087 int
7088 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7089     struct iwm_beacon_filter_cmd *cmd)
7090 {
7091 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
7092 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
7093 }
7094 
7095 void
7096 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7097     struct iwm_beacon_filter_cmd *cmd)
7098 {
7099 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
7100 }
7101 
7102 int
7103 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7104 {
7105 	struct iwm_beacon_filter_cmd cmd = {
7106 		IWM_BF_CMD_CONFIG_DEFAULTS,
7107 		.bf_enable_beacon_filter = htole32(1),
7108 		.ba_enable_beacon_abort = htole32(enable),
7109 	};
7110 
7111 	if (!sc->sc_bf.bf_enabled)
7112 		return 0;
7113 
7114 	sc->sc_bf.ba_enabled = enable;
7115 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7116 	return iwm_beacon_filter_send_cmd(sc, &cmd);
7117 }
7118 
7119 void
7120 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7121     struct iwm_mac_power_cmd *cmd)
7122 {
7123 	struct ieee80211com *ic = &sc->sc_ic;
7124 	struct ieee80211_node *ni = &in->in_ni;
7125 	int dtim_period, dtim_msec, keep_alive;
7126 
7127 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7128 	    in->in_color));
7129 	if (ni->ni_dtimperiod)
7130 		dtim_period = ni->ni_dtimperiod;
7131 	else
7132 		dtim_period = 1;
7133 
7134 	/*
7135 	 * Regardless of power management state the driver must set
7136 	 * keep alive period. FW will use it for sending keep alive NDPs
7137 	 * immediately after association. Check that keep alive period
7138 	 * is at least 3 * DTIM.
7139 	 */
7140 	dtim_msec = dtim_period * ni->ni_intval;
7141 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
7142 	keep_alive = roundup(keep_alive, 1000) / 1000;
7143 	cmd->keep_alive_seconds = htole16(keep_alive);
7144 
7145 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7146 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7147 }
7148 
7149 int
7150 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7151 {
7152 	int err;
7153 	int ba_enable;
7154 	struct iwm_mac_power_cmd cmd;
7155 
7156 	memset(&cmd, 0, sizeof(cmd));
7157 
7158 	iwm_power_build_cmd(sc, in, &cmd);
7159 
7160 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
7161 	    sizeof(cmd), &cmd);
7162 	if (err != 0)
7163 		return err;
7164 
7165 	ba_enable = !!(cmd.flags &
7166 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
7167 	return iwm_update_beacon_abort(sc, in, ba_enable);
7168 }
7169 
7170 int
7171 iwm_power_update_device(struct iwm_softc *sc)
7172 {
7173 	struct iwm_device_power_cmd cmd = { };
7174 	struct ieee80211com *ic = &sc->sc_ic;
7175 
7176 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7177 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7178 
7179 	return iwm_send_cmd_pdu(sc,
7180 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
7181 }
7182 
7183 int
7184 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7185 {
7186 	struct iwm_beacon_filter_cmd cmd = {
7187 		IWM_BF_CMD_CONFIG_DEFAULTS,
7188 		.bf_enable_beacon_filter = htole32(1),
7189 	};
7190 	int err;
7191 
7192 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7193 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7194 
7195 	if (err == 0)
7196 		sc->sc_bf.bf_enabled = 1;
7197 
7198 	return err;
7199 }
7200 
7201 int
7202 iwm_disable_beacon_filter(struct iwm_softc *sc)
7203 {
7204 	struct iwm_beacon_filter_cmd cmd;
7205 	int err;
7206 
7207 	memset(&cmd, 0, sizeof(cmd));
7208 
7209 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7210 	if (err == 0)
7211 		sc->sc_bf.bf_enabled = 0;
7212 
7213 	return err;
7214 }
7215 
7216 int
7217 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7218 {
7219 	struct iwm_add_sta_cmd add_sta_cmd;
7220 	int err;
7221 	uint32_t status, aggsize;
7222 	const uint32_t max_aggsize = (IWM_STA_FLG_MAX_AGG_SIZE_64K >>
7223 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT);
7224 	size_t cmdsize;
7225 	struct ieee80211com *ic = &sc->sc_ic;
7226 
7227 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
7228 		panic("STA already added");
7229 
7230 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
7231 
7232 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7233 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7234 	else
7235 		add_sta_cmd.sta_id = IWM_STATION_ID;
7236 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
7237 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7238 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
7239 		else
7240 			add_sta_cmd.station_type = IWM_STA_LINK;
7241 	}
7242 	add_sta_cmd.mac_id_n_color
7243 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
7244 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7245 		int qid;
7246 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
7247 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7248 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7249 		else
7250 			qid = IWM_AUX_QUEUE;
7251 		in->tfd_queue_msk |= (1 << qid);
7252 	} else {
7253 		int ac;
7254 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7255 			int qid = ac;
7256 			if (isset(sc->sc_enabled_capa,
7257 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7258 				qid += IWM_DQA_MIN_MGMT_QUEUE;
7259 			in->tfd_queue_msk |= (1 << qid);
7260 		}
7261 	}
7262 	if (!update) {
7263 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7264 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7265 			    etherbroadcastaddr);
7266 		else
7267 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7268 			    in->in_macaddr);
7269 	}
7270 	add_sta_cmd.add_modify = update ? 1 : 0;
7271 	add_sta_cmd.station_flags_msk
7272 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
7273 	if (update) {
7274 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
7275 		    IWM_STA_MODIFY_TID_DISABLE_TX);
7276 	}
7277 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
7278 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
7279 
7280 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7281 		add_sta_cmd.station_flags_msk
7282 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
7283 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
7284 
7285 		if (iwm_mimo_enabled(sc)) {
7286 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7287 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
7288 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
7289 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
7290 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
7291 					add_sta_cmd.station_flags |=
7292 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7293 				}
7294 			} else {
7295 				if (in->in_ni.ni_rxmcs[1] != 0) {
7296 					add_sta_cmd.station_flags |=
7297 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7298 				}
7299 				if (in->in_ni.ni_rxmcs[2] != 0) {
7300 					add_sta_cmd.station_flags |=
7301 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
7302 				}
7303 			}
7304 		}
7305 
7306 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7307 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7308 			add_sta_cmd.station_flags |= htole32(
7309 			    IWM_STA_FLG_FAT_EN_40MHZ);
7310 		}
7311 
7312 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7313 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7314 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
7315 				add_sta_cmd.station_flags |= htole32(
7316 				    IWM_STA_FLG_FAT_EN_80MHZ);
7317 			}
7318 			aggsize = (in->in_ni.ni_vhtcaps &
7319 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
7320 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
7321 		} else {
7322 			aggsize = (in->in_ni.ni_ampdu_param &
7323 			    IEEE80211_AMPDU_PARAM_LE);
7324 		}
7325 		if (aggsize > max_aggsize)
7326 			aggsize = max_aggsize;
7327 		add_sta_cmd.station_flags |= htole32((aggsize <<
7328 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT) &
7329 		    IWM_STA_FLG_MAX_AGG_SIZE_MSK);
7330 
7331 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
7332 		case IEEE80211_AMPDU_PARAM_SS_2:
7333 			add_sta_cmd.station_flags
7334 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
7335 			break;
7336 		case IEEE80211_AMPDU_PARAM_SS_4:
7337 			add_sta_cmd.station_flags
7338 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
7339 			break;
7340 		case IEEE80211_AMPDU_PARAM_SS_8:
7341 			add_sta_cmd.station_flags
7342 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
7343 			break;
7344 		case IEEE80211_AMPDU_PARAM_SS_16:
7345 			add_sta_cmd.station_flags
7346 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
7347 			break;
7348 		default:
7349 			break;
7350 		}
7351 	}
7352 
7353 	status = IWM_ADD_STA_SUCCESS;
7354 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7355 		cmdsize = sizeof(add_sta_cmd);
7356 	else
7357 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7358 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7359 	    &add_sta_cmd, &status);
7360 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7361 		err = EIO;
7362 
7363 	return err;
7364 }
7365 
7366 int
7367 iwm_add_aux_sta(struct iwm_softc *sc)
7368 {
7369 	struct iwm_add_sta_cmd cmd;
7370 	int err, qid;
7371 	uint32_t status;
7372 	size_t cmdsize;
7373 
7374 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7375 		qid = IWM_DQA_AUX_QUEUE;
7376 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7377 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
7378 	} else {
7379 		qid = IWM_AUX_QUEUE;
7380 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7381 	}
7382 	if (err)
7383 		return err;
7384 
7385 	memset(&cmd, 0, sizeof(cmd));
7386 	cmd.sta_id = IWM_AUX_STA_ID;
7387 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7388 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
7389 	cmd.mac_id_n_color =
7390 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
7391 	cmd.tfd_queue_msk = htole32(1 << qid);
7392 	cmd.tid_disable_tx = htole16(0xffff);
7393 
7394 	status = IWM_ADD_STA_SUCCESS;
7395 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7396 		cmdsize = sizeof(cmd);
7397 	else
7398 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7399 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7400 	    &status);
7401 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7402 		err = EIO;
7403 
7404 	return err;
7405 }
7406 
7407 int
7408 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7409 {
7410 	struct iwm_add_sta_cmd cmd;
7411 	int err;
7412 	uint32_t status;
7413 	size_t cmdsize;
7414 
7415 	memset(&cmd, 0, sizeof(cmd));
7416 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7417 	    in->in_color));
7418 	cmd.sta_id = IWM_STATION_ID;
7419 	cmd.add_modify = IWM_STA_MODE_MODIFY;
7420 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
7421 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
7422 
7423 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7424 		cmdsize = sizeof(cmd);
7425 	else
7426 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7427 
7428 	status = IWM_ADD_STA_SUCCESS;
7429 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7430 	    cmdsize, &cmd, &status);
7431 	if (err) {
7432 		printf("%s: could not update sta (error %d)\n",
7433 		    DEVNAME(sc), err);
7434 		return err;
7435 	}
7436 
7437 	switch (status & IWM_ADD_STA_STATUS_MASK) {
7438 	case IWM_ADD_STA_SUCCESS:
7439 		break;
7440 	default:
7441 		err = EIO;
7442 		printf("%s: Couldn't %s draining for station\n",
7443 		    DEVNAME(sc), drain ? "enable" : "disable");
7444 		break;
7445 	}
7446 
7447 	return err;
7448 }
7449 
7450 int
7451 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7452 {
7453 	int err;
7454 
7455 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
7456 
7457 	err = iwm_drain_sta(sc, in, 1);
7458 	if (err)
7459 		goto done;
7460 
7461 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7462 	if (err) {
7463 		printf("%s: could not flush Tx path (error %d)\n",
7464 		    DEVNAME(sc), err);
7465 		goto done;
7466 	}
7467 
7468 	/*
7469 	 * Flushing Tx rings may fail if the AP has disappeared.
7470 	 * We can rely on iwm_newstate_task() to reset everything and begin
7471 	 * scanning again if we are left with outstanding frames on queues.
7472 	 */
7473 	err = iwm_wait_tx_queues_empty(sc);
7474 	if (err)
7475 		goto done;
7476 
7477 	err = iwm_drain_sta(sc, in, 0);
7478 done:
7479 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7480 	return err;
7481 }
7482 
7483 int
7484 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7485 {
7486 	struct ieee80211com *ic = &sc->sc_ic;
7487 	struct iwm_rm_sta_cmd rm_sta_cmd;
7488 	int err;
7489 
7490 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7491 		panic("sta already removed");
7492 
7493 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7494 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7495 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7496 	else
7497 		rm_sta_cmd.sta_id = IWM_STATION_ID;
7498 
7499 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7500 	    &rm_sta_cmd);
7501 
7502 	return err;
7503 }
7504 
7505 uint16_t
7506 iwm_scan_rx_chain(struct iwm_softc *sc)
7507 {
7508 	uint16_t rx_chain;
7509 	uint8_t rx_ant;
7510 
7511 	rx_ant = iwm_fw_valid_rx_ant(sc);
7512 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
7513 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
7514 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
7515 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
7516 	return htole16(rx_chain);
7517 }
7518 
7519 uint32_t
7520 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7521 {
7522 	uint32_t tx_ant;
7523 	int i, ind;
7524 
7525 	for (i = 0, ind = sc->sc_scan_last_antenna;
7526 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
7527 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
7528 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7529 			sc->sc_scan_last_antenna = ind;
7530 			break;
7531 		}
7532 	}
7533 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7534 
7535 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
7536 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
7537 				   tx_ant);
7538 	else
7539 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
7540 }
7541 
7542 uint8_t
7543 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7544     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7545 {
7546 	struct ieee80211com *ic = &sc->sc_ic;
7547 	struct ieee80211_channel *c;
7548 	uint8_t nchan;
7549 
7550 	for (nchan = 0, c = &ic->ic_channels[1];
7551 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7552 	    nchan < sc->sc_capa_n_scan_channels;
7553 	    c++) {
7554 		if (c->ic_flags == 0)
7555 			continue;
7556 
7557 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
7558 		chan->iter_count = htole16(1);
7559 		chan->iter_interval = 0;
7560 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
7561 		if (n_ssids != 0 && !bgscan)
7562 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
7563 		chan++;
7564 		nchan++;
7565 	}
7566 
7567 	return nchan;
7568 }
7569 
7570 uint8_t
7571 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7572     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7573 {
7574 	struct ieee80211com *ic = &sc->sc_ic;
7575 	struct ieee80211_channel *c;
7576 	uint8_t nchan;
7577 
7578 	for (nchan = 0, c = &ic->ic_channels[1];
7579 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7580 	    nchan < sc->sc_capa_n_scan_channels;
7581 	    c++) {
7582 		if (c->ic_flags == 0)
7583 			continue;
7584 
7585 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7586 		chan->iter_count = 1;
7587 		chan->iter_interval = htole16(0);
7588 		if (n_ssids != 0 && !bgscan)
7589 			chan->flags = htole32(1 << 0); /* select SSID 0 */
7590 		chan++;
7591 		nchan++;
7592 	}
7593 
7594 	return nchan;
7595 }
7596 
7597 int
7598 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7599 {
7600 	struct iwm_scan_probe_req preq2;
7601 	int err, i;
7602 
7603 	err = iwm_fill_probe_req(sc, &preq2);
7604 	if (err)
7605 		return err;
7606 
7607 	preq1->mac_header = preq2.mac_header;
7608 	for (i = 0; i < nitems(preq1->band_data); i++)
7609 		preq1->band_data[i] = preq2.band_data[i];
7610 	preq1->common_data = preq2.common_data;
7611 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7612 	return 0;
7613 }
7614 
7615 int
7616 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7617 {
7618 	struct ieee80211com *ic = &sc->sc_ic;
7619 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7620 	struct ieee80211_rateset *rs;
7621 	size_t remain = sizeof(preq->buf);
7622 	uint8_t *frm, *pos;
7623 
7624 	memset(preq, 0, sizeof(*preq));
7625 
7626 	if (remain < sizeof(*wh) + 2)
7627 		return ENOBUFS;
7628 
7629 	/*
7630 	 * Build a probe request frame.  Most of the following code is a
7631 	 * copy & paste of what is done in net80211.
7632 	 */
7633 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7634 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7635 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7636 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7637 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7638 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7639 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7640 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7641 
7642 	frm = (uint8_t *)(wh + 1);
7643 
7644 	*frm++ = IEEE80211_ELEMID_SSID;
7645 	*frm++ = 0;
7646 	/* hardware inserts SSID */
7647 
7648 	/* Tell firmware where the MAC header and SSID IE are. */
7649 	preq->mac_header.offset = 0;
7650 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7651 	remain -= frm - (uint8_t *)wh;
7652 
7653 	/* Fill in 2GHz IEs and tell firmware where they are. */
7654 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7655 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7656 		if (remain < 4 + rs->rs_nrates)
7657 			return ENOBUFS;
7658 	} else if (remain < 2 + rs->rs_nrates)
7659 		return ENOBUFS;
7660 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7661 	pos = frm;
7662 	frm = ieee80211_add_rates(frm, rs);
7663 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7664 		frm = ieee80211_add_xrates(frm, rs);
7665 	remain -= frm - pos;
7666 
7667 	if (isset(sc->sc_enabled_capa,
7668 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7669 		if (remain < 3)
7670 			return ENOBUFS;
7671 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7672 		*frm++ = 1;
7673 		*frm++ = 0;
7674 		remain -= 3;
7675 	}
7676 	preq->band_data[0].len = htole16(frm - pos);
7677 
7678 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7679 		/* Fill in 5GHz IEs. */
7680 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7681 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7682 			if (remain < 4 + rs->rs_nrates)
7683 				return ENOBUFS;
7684 		} else if (remain < 2 + rs->rs_nrates)
7685 			return ENOBUFS;
7686 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7687 		pos = frm;
7688 		frm = ieee80211_add_rates(frm, rs);
7689 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7690 			frm = ieee80211_add_xrates(frm, rs);
7691 		preq->band_data[1].len = htole16(frm - pos);
7692 		remain -= frm - pos;
7693 		if (ic->ic_flags & IEEE80211_F_VHTON) {
7694 			if (remain < 14)
7695 				return ENOBUFS;
7696 			frm = ieee80211_add_vhtcaps(frm, ic);
7697 			remain -= frm - pos;
7698 		}
7699 	}
7700 
7701 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7702 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7703 	pos = frm;
7704 	if (ic->ic_flags & IEEE80211_F_HTON) {
7705 		if (remain < 28)
7706 			return ENOBUFS;
7707 		frm = ieee80211_add_htcaps(frm, ic);
7708 		/* XXX add WME info? */
7709 		remain -= frm - pos;
7710 	}
7711 
7712 	preq->common_data.len = htole16(frm - pos);
7713 
7714 	return 0;
7715 }
7716 
7717 int
7718 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7719 {
7720 	struct ieee80211com *ic = &sc->sc_ic;
7721 	struct iwm_host_cmd hcmd = {
7722 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7723 		.len = { 0, },
7724 		.data = { NULL, },
7725 		.flags = 0,
7726 	};
7727 	struct iwm_scan_req_lmac *req;
7728 	struct iwm_scan_probe_req_v1 *preq;
7729 	size_t req_len;
7730 	int err, async = bgscan;
7731 
7732 	req_len = sizeof(struct iwm_scan_req_lmac) +
7733 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7734 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7735 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7736 		return ENOMEM;
7737 	req = malloc(req_len, M_DEVBUF,
7738 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7739 	if (req == NULL)
7740 		return ENOMEM;
7741 
7742 	hcmd.len[0] = (uint16_t)req_len;
7743 	hcmd.data[0] = (void *)req;
7744 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7745 
7746 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7747 	req->active_dwell = 10;
7748 	req->passive_dwell = 110;
7749 	req->fragmented_dwell = 44;
7750 	req->extended_dwell = 90;
7751 	if (bgscan) {
7752 		req->max_out_time = htole32(120);
7753 		req->suspend_time = htole32(120);
7754 	} else {
7755 		req->max_out_time = htole32(0);
7756 		req->suspend_time = htole32(0);
7757 	}
7758 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7759 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7760 	req->iter_num = htole32(1);
7761 	req->delay = 0;
7762 
7763 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7764 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7765 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7766 	if (ic->ic_des_esslen == 0)
7767 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7768 	else
7769 		req->scan_flags |=
7770 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7771 	if (isset(sc->sc_enabled_capa,
7772 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7773 	    isset(sc->sc_enabled_capa,
7774 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7775 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7776 
7777 	req->flags = htole32(IWM_PHY_BAND_24);
7778 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7779 		req->flags |= htole32(IWM_PHY_BAND_5);
7780 	req->filter_flags =
7781 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7782 
7783 	/* Tx flags 2 GHz. */
7784 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7785 	    IWM_TX_CMD_FLG_BT_DIS);
7786 	req->tx_cmd[0].rate_n_flags =
7787 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7788 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7789 
7790 	/* Tx flags 5 GHz. */
7791 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7792 	    IWM_TX_CMD_FLG_BT_DIS);
7793 	req->tx_cmd[1].rate_n_flags =
7794 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7795 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7796 
7797 	/* Check if we're doing an active directed scan. */
7798 	if (ic->ic_des_esslen != 0) {
7799 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7800 		req->direct_scan[0].len = ic->ic_des_esslen;
7801 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7802 		    ic->ic_des_esslen);
7803 	}
7804 
7805 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7806 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7807 	    ic->ic_des_esslen != 0, bgscan);
7808 
7809 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7810 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7811 	    sc->sc_capa_n_scan_channels));
7812 	err = iwm_fill_probe_req_v1(sc, preq);
7813 	if (err) {
7814 		free(req, M_DEVBUF, req_len);
7815 		return err;
7816 	}
7817 
7818 	/* Specify the scan plan: We'll do one iteration. */
7819 	req->schedule[0].iterations = 1;
7820 	req->schedule[0].full_scan_mul = 1;
7821 
7822 	/* Disable EBS. */
7823 	req->channel_opt[0].non_ebs_ratio = 1;
7824 	req->channel_opt[1].non_ebs_ratio = 1;
7825 
7826 	err = iwm_send_cmd(sc, &hcmd);
7827 	free(req, M_DEVBUF, req_len);
7828 	return err;
7829 }
7830 
7831 int
7832 iwm_config_umac_scan(struct iwm_softc *sc)
7833 {
7834 	struct ieee80211com *ic = &sc->sc_ic;
7835 	struct iwm_scan_config *scan_config;
7836 	int err, nchan;
7837 	size_t cmd_size;
7838 	struct ieee80211_channel *c;
7839 	struct iwm_host_cmd hcmd = {
7840 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7841 		.flags = 0,
7842 	};
7843 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7844 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7845 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7846 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7847 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7848 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7849 	    IWM_SCAN_CONFIG_RATE_54M);
7850 
7851 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7852 
7853 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7854 	if (scan_config == NULL)
7855 		return ENOMEM;
7856 
7857 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7858 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7859 	scan_config->legacy_rates = htole32(rates |
7860 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7861 
7862 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7863 	scan_config->dwell_active = 10;
7864 	scan_config->dwell_passive = 110;
7865 	scan_config->dwell_fragmented = 44;
7866 	scan_config->dwell_extended = 90;
7867 	scan_config->out_of_channel_time = htole32(0);
7868 	scan_config->suspend_time = htole32(0);
7869 
7870 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7871 
7872 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7873 	scan_config->channel_flags = 0;
7874 
7875 	for (c = &ic->ic_channels[1], nchan = 0;
7876 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7877 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7878 		if (c->ic_flags == 0)
7879 			continue;
7880 		scan_config->channel_array[nchan++] =
7881 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7882 	}
7883 
7884 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7885 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7886 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7887 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7888 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7889 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7890 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7891 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7892 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7893 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7894 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7895 
7896 	hcmd.data[0] = scan_config;
7897 	hcmd.len[0] = cmd_size;
7898 
7899 	err = iwm_send_cmd(sc, &hcmd);
7900 	free(scan_config, M_DEVBUF, cmd_size);
7901 	return err;
7902 }
7903 
7904 int
7905 iwm_umac_scan_size(struct iwm_softc *sc)
7906 {
7907 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7908 	int tail_size;
7909 
7910 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7911 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7912 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7913 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7914 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7915 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7916 	else
7917 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7918 
7919 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7920 	    sc->sc_capa_n_scan_channels + tail_size;
7921 }
7922 
7923 struct iwm_scan_umac_chan_param *
7924 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7925     struct iwm_scan_req_umac *req)
7926 {
7927 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7928 		return &req->v8.channel;
7929 
7930 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7931 		return &req->v7.channel;
7932 
7933 	return &req->v1.channel;
7934 }
7935 
7936 void *
7937 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7938 {
7939 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7940 		return (void *)&req->v8.data;
7941 
7942 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7943 		return (void *)&req->v7.data;
7944 
7945 	return (void *)&req->v1.data;
7946 
7947 }
7948 
7949 /* adaptive dwell max budget time [TU] for full scan */
7950 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7951 /* adaptive dwell max budget time [TU] for directed scan */
7952 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7953 /* adaptive dwell default high band APs number */
7954 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7955 /* adaptive dwell default low band APs number */
7956 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7957 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7958 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7959 
7960 int
7961 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7962 {
7963 	struct ieee80211com *ic = &sc->sc_ic;
7964 	struct iwm_host_cmd hcmd = {
7965 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7966 		.len = { 0, },
7967 		.data = { NULL, },
7968 		.flags = 0,
7969 	};
7970 	struct iwm_scan_req_umac *req;
7971 	void *cmd_data, *tail_data;
7972 	struct iwm_scan_req_umac_tail_v2 *tail;
7973 	struct iwm_scan_req_umac_tail_v1 *tailv1;
7974 	struct iwm_scan_umac_chan_param *chanparam;
7975 	size_t req_len;
7976 	int err, async = bgscan;
7977 
7978 	req_len = iwm_umac_scan_size(sc);
7979 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
7980 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7981 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7982 		return ERANGE;
7983 	req = malloc(req_len, M_DEVBUF,
7984 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7985 	if (req == NULL)
7986 		return ENOMEM;
7987 
7988 	hcmd.len[0] = (uint16_t)req_len;
7989 	hcmd.data[0] = (void *)req;
7990 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7991 
7992 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7993 		req->v7.adwell_default_n_aps_social =
7994 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7995 		req->v7.adwell_default_n_aps =
7996 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
7997 
7998 		if (ic->ic_des_esslen != 0)
7999 			req->v7.adwell_max_budget =
8000 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
8001 		else
8002 			req->v7.adwell_max_budget =
8003 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
8004 
8005 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8006 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8007 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8008 
8009 		if (isset(sc->sc_ucode_api,
8010 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8011 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
8012 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
8013 		} else {
8014 			req->v7.active_dwell = 10;
8015 			req->v7.passive_dwell = 110;
8016 			req->v7.fragmented_dwell = 44;
8017 		}
8018 	} else {
8019 		/* These timings correspond to iwlwifi's UNASSOC scan. */
8020 		req->v1.active_dwell = 10;
8021 		req->v1.passive_dwell = 110;
8022 		req->v1.fragmented_dwell = 44;
8023 		req->v1.extended_dwell = 90;
8024 
8025 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8026 	}
8027 
8028 	if (bgscan) {
8029 		const uint32_t timeout = htole32(120);
8030 		if (isset(sc->sc_ucode_api,
8031 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8032 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8033 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8034 		} else if (isset(sc->sc_ucode_api,
8035 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8036 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8037 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8038 		} else {
8039 			req->v1.max_out_time = timeout;
8040 			req->v1.suspend_time = timeout;
8041 		}
8042 	}
8043 
8044 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8045 
8046 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
8047 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8048 	chanparam->count = iwm_umac_scan_fill_channels(sc,
8049 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
8050 	    ic->ic_des_esslen != 0, bgscan);
8051 	chanparam->flags = 0;
8052 
8053 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
8054 	    sc->sc_capa_n_scan_channels;
8055 	tail = tail_data;
8056 	/* tail v1 layout differs in preq and direct_scan member fields. */
8057 	tailv1 = tail_data;
8058 
8059 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
8060 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
8061 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8062 		req->v8.general_flags2 =
8063 			IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
8064 	}
8065 
8066 	if (ic->ic_des_esslen != 0) {
8067 		if (isset(sc->sc_ucode_api,
8068 		    IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
8069 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8070 			tail->direct_scan[0].len = ic->ic_des_esslen;
8071 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
8072 			    ic->ic_des_esslen);
8073 		} else {
8074 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8075 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
8076 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
8077 			    ic->ic_des_esslen);
8078 		}
8079 		req->general_flags |=
8080 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
8081 	} else
8082 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
8083 
8084 	if (isset(sc->sc_enabled_capa,
8085 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
8086 	    isset(sc->sc_enabled_capa,
8087 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
8088 		req->general_flags |=
8089 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
8090 
8091 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8092 		req->general_flags |=
8093 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
8094 	} else {
8095 		req->general_flags |=
8096 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
8097 	}
8098 
8099 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
8100 		err = iwm_fill_probe_req(sc, &tail->preq);
8101 	else
8102 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8103 	if (err) {
8104 		free(req, M_DEVBUF, req_len);
8105 		return err;
8106 	}
8107 
8108 	/* Specify the scan plan: We'll do one iteration. */
8109 	tail->schedule[0].interval = 0;
8110 	tail->schedule[0].iter_count = 1;
8111 
8112 	err = iwm_send_cmd(sc, &hcmd);
8113 	free(req, M_DEVBUF, req_len);
8114 	return err;
8115 }
8116 
8117 void
8118 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8119 {
8120 	struct ieee80211com *ic = &sc->sc_ic;
8121 	struct ifnet *ifp = IC2IFP(ic);
8122 	char alpha2[3];
8123 
8124 	snprintf(alpha2, sizeof(alpha2), "%c%c",
8125 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
8126 
8127 	if (ifp->if_flags & IFF_DEBUG) {
8128 		printf("%s: firmware has detected regulatory domain '%s' "
8129 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
8130 	}
8131 
8132 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
8133 }
8134 
8135 uint8_t
8136 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
8137 {
8138 	int i;
8139 	uint8_t rval;
8140 
8141 	for (i = 0; i < rs->rs_nrates; i++) {
8142 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
8143 		if (rval == iwm_rates[ridx].rate)
8144 			return rs->rs_rates[i];
8145 	}
8146 
8147 	return 0;
8148 }
8149 
8150 int
8151 iwm_rval2ridx(int rval)
8152 {
8153 	int ridx;
8154 
8155 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
8156 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
8157 			continue;
8158 		if (rval == iwm_rates[ridx].rate)
8159 			break;
8160 	}
8161 
8162        return ridx;
8163 }
8164 
8165 void
8166 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8167     int *ofdm_rates)
8168 {
8169 	struct ieee80211_node *ni = &in->in_ni;
8170 	struct ieee80211_rateset *rs = &ni->ni_rates;
8171 	int lowest_present_ofdm = -1;
8172 	int lowest_present_cck = -1;
8173 	uint8_t cck = 0;
8174 	uint8_t ofdm = 0;
8175 	int i;
8176 
8177 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
8178 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
8179 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
8180 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8181 				continue;
8182 			cck |= (1 << i);
8183 			if (lowest_present_cck == -1 || lowest_present_cck > i)
8184 				lowest_present_cck = i;
8185 		}
8186 	}
8187 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
8188 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8189 			continue;
8190 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
8191 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
8192 			lowest_present_ofdm = i;
8193 	}
8194 
8195 	/*
8196 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
8197 	 * variables. This isn't sufficient though, as there might not
8198 	 * be all the right rates in the bitmap. E.g. if the only basic
8199 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
8200 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
8201 	 *
8202 	 *    [...] a STA responding to a received frame shall transmit
8203 	 *    its Control Response frame [...] at the highest rate in the
8204 	 *    BSSBasicRateSet parameter that is less than or equal to the
8205 	 *    rate of the immediately previous frame in the frame exchange
8206 	 *    sequence ([...]) and that is of the same modulation class
8207 	 *    ([...]) as the received frame. If no rate contained in the
8208 	 *    BSSBasicRateSet parameter meets these conditions, then the
8209 	 *    control frame sent in response to a received frame shall be
8210 	 *    transmitted at the highest mandatory rate of the PHY that is
8211 	 *    less than or equal to the rate of the received frame, and
8212 	 *    that is of the same modulation class as the received frame.
8213 	 *
8214 	 * As a consequence, we need to add all mandatory rates that are
8215 	 * lower than all of the basic rates to these bitmaps.
8216 	 */
8217 
8218 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
8219 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
8220 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
8221 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
8222 	/* 6M already there or needed so always add */
8223 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
8224 
8225 	/*
8226 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
8227 	 * Note, however:
8228 	 *  - if no CCK rates are basic, it must be ERP since there must
8229 	 *    be some basic rates at all, so they're OFDM => ERP PHY
8230 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
8231 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
8232 	 *  - if 5.5M is basic, 1M and 2M are mandatory
8233 	 *  - if 2M is basic, 1M is mandatory
8234 	 *  - if 1M is basic, that's the only valid ACK rate.
8235 	 * As a consequence, it's not as complicated as it sounds, just add
8236 	 * any lower rates to the ACK rate bitmap.
8237 	 */
8238 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
8239 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
8240 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
8241 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
8242 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
8243 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
8244 	/* 1M already there or needed so always add */
8245 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
8246 
8247 	*cck_rates = cck;
8248 	*ofdm_rates = ofdm;
8249 }
8250 
8251 void
8252 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8253     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
8254 {
8255 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
8256 	struct ieee80211com *ic = &sc->sc_ic;
8257 	struct ieee80211_node *ni = ic->ic_bss;
8258 	int cck_ack_rates, ofdm_ack_rates;
8259 	int i;
8260 
8261 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
8262 	    in->in_color));
8263 	cmd->action = htole32(action);
8264 
8265 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8266 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
8267 	else if (ic->ic_opmode == IEEE80211_M_STA)
8268 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
8269 	else
8270 		panic("unsupported operating mode %d", ic->ic_opmode);
8271 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
8272 
8273 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
8274 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8275 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
8276 		return;
8277 	}
8278 
8279 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
8280 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8281 	cmd->cck_rates = htole32(cck_ack_rates);
8282 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
8283 
8284 	cmd->cck_short_preamble
8285 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
8286 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
8287 	cmd->short_slot
8288 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
8289 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
8290 
8291 	for (i = 0; i < EDCA_NUM_AC; i++) {
8292 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8293 		int txf = iwm_ac_to_tx_fifo[i];
8294 
8295 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
8296 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
8297 		cmd->ac[txf].aifsn = ac->ac_aifsn;
8298 		cmd->ac[txf].fifos_mask = (1 << txf);
8299 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
8300 	}
8301 	if (ni->ni_flags & IEEE80211_NODE_QOS)
8302 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
8303 
8304 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8305 		enum ieee80211_htprot htprot =
8306 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
8307 		switch (htprot) {
8308 		case IEEE80211_HTPROT_NONE:
8309 			break;
8310 		case IEEE80211_HTPROT_NONMEMBER:
8311 		case IEEE80211_HTPROT_NONHT_MIXED:
8312 			cmd->protection_flags |=
8313 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8314 			    IWM_MAC_PROT_FLG_FAT_PROT);
8315 			break;
8316 		case IEEE80211_HTPROT_20MHZ:
8317 			if (in->in_phyctxt &&
8318 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8319 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
8320 				cmd->protection_flags |=
8321 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8322 				    IWM_MAC_PROT_FLG_FAT_PROT);
8323 			}
8324 			break;
8325 		default:
8326 			break;
8327 		}
8328 
8329 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
8330 	}
8331 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8332 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
8333 
8334 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
8335 #undef IWM_EXP2
8336 }
8337 
8338 void
8339 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8340     struct iwm_mac_data_sta *sta, int assoc)
8341 {
8342 	struct ieee80211_node *ni = &in->in_ni;
8343 	uint32_t dtim_off;
8344 	uint64_t tsf;
8345 
8346 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
8347 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
8348 	tsf = letoh64(tsf);
8349 
8350 	sta->is_assoc = htole32(assoc);
8351 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
8352 	sta->dtim_tsf = htole64(tsf + dtim_off);
8353 	sta->bi = htole32(ni->ni_intval);
8354 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
8355 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
8356 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
8357 	sta->listen_interval = htole32(10);
8358 	sta->assoc_id = htole32(ni->ni_associd);
8359 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
8360 }
8361 
8362 int
8363 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8364     int assoc)
8365 {
8366 	struct ieee80211com *ic = &sc->sc_ic;
8367 	struct ieee80211_node *ni = &in->in_ni;
8368 	struct iwm_mac_ctx_cmd cmd;
8369 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8370 
8371 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
8372 		panic("MAC already added");
8373 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
8374 		panic("MAC already removed");
8375 
8376 	memset(&cmd, 0, sizeof(cmd));
8377 
8378 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8379 
8380 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8381 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
8382 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
8383 		    IWM_MAC_FILTER_ACCEPT_GRP |
8384 		    IWM_MAC_FILTER_IN_BEACON |
8385 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
8386 		    IWM_MAC_FILTER_IN_CRC32);
8387 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8388 		/*
8389 		 * Allow beacons to pass through as long as we are not
8390 		 * associated or we do not have dtim period information.
8391 		 */
8392 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
8393 	else
8394 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8395 
8396 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8397 }
8398 
8399 int
8400 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8401 {
8402 	struct iwm_time_quota_cmd_v1 cmd;
8403 	int i, idx, num_active_macs, quota, quota_rem;
8404 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
8405 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
8406 	uint16_t id;
8407 
8408 	memset(&cmd, 0, sizeof(cmd));
8409 
8410 	/* currently, PHY ID == binding ID */
8411 	if (in && in->in_phyctxt) {
8412 		id = in->in_phyctxt->id;
8413 		KASSERT(id < IWM_MAX_BINDINGS);
8414 		colors[id] = in->in_phyctxt->color;
8415 		if (running)
8416 			n_ifs[id] = 1;
8417 	}
8418 
8419 	/*
8420 	 * The FW's scheduling session consists of
8421 	 * IWM_MAX_QUOTA fragments. Divide these fragments
8422 	 * equally between all the bindings that require quota
8423 	 */
8424 	num_active_macs = 0;
8425 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8426 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
8427 		num_active_macs += n_ifs[i];
8428 	}
8429 
8430 	quota = 0;
8431 	quota_rem = 0;
8432 	if (num_active_macs) {
8433 		quota = IWM_MAX_QUOTA / num_active_macs;
8434 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
8435 	}
8436 
8437 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8438 		if (colors[i] < 0)
8439 			continue;
8440 
8441 		cmd.quotas[idx].id_and_color =
8442 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
8443 
8444 		if (n_ifs[i] <= 0) {
8445 			cmd.quotas[idx].quota = htole32(0);
8446 			cmd.quotas[idx].max_duration = htole32(0);
8447 		} else {
8448 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8449 			cmd.quotas[idx].max_duration = htole32(0);
8450 		}
8451 		idx++;
8452 	}
8453 
8454 	/* Give the remainder of the session to the first binding */
8455 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
8456 
8457 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8458 		struct iwm_time_quota_cmd cmd_v2;
8459 
8460 		memset(&cmd_v2, 0, sizeof(cmd_v2));
8461 		for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8462 			cmd_v2.quotas[i].id_and_color =
8463 			    cmd.quotas[i].id_and_color;
8464 			cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8465 			cmd_v2.quotas[i].max_duration =
8466 			    cmd.quotas[i].max_duration;
8467 		}
8468 		return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8469 		    sizeof(cmd_v2), &cmd_v2);
8470 	}
8471 
8472 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8473 }
8474 
8475 void
8476 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8477 {
8478 	int s = splnet();
8479 
8480 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8481 		splx(s);
8482 		return;
8483 	}
8484 
8485 	refcnt_take(&sc->task_refs);
8486 	if (!task_add(taskq, task))
8487 		refcnt_rele_wake(&sc->task_refs);
8488 	splx(s);
8489 }
8490 
8491 void
8492 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8493 {
8494 	if (task_del(taskq, task))
8495 		refcnt_rele(&sc->task_refs);
8496 }
8497 
8498 int
8499 iwm_scan(struct iwm_softc *sc)
8500 {
8501 	struct ieee80211com *ic = &sc->sc_ic;
8502 	struct ifnet *ifp = IC2IFP(ic);
8503 	int err;
8504 
8505 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8506 		err = iwm_scan_abort(sc);
8507 		if (err) {
8508 			printf("%s: could not abort background scan\n",
8509 			    DEVNAME(sc));
8510 			return err;
8511 		}
8512 	}
8513 
8514 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8515 		err = iwm_umac_scan(sc, 0);
8516 	else
8517 		err = iwm_lmac_scan(sc, 0);
8518 	if (err) {
8519 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8520 		return err;
8521 	}
8522 
8523 	/*
8524 	 * The current mode might have been fixed during association.
8525 	 * Ensure all channels get scanned.
8526 	 */
8527 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
8528 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8529 
8530 	sc->sc_flags |= IWM_FLAG_SCANNING;
8531 	if (ifp->if_flags & IFF_DEBUG)
8532 		printf("%s: %s -> %s\n", ifp->if_xname,
8533 		    ieee80211_state_name[ic->ic_state],
8534 		    ieee80211_state_name[IEEE80211_S_SCAN]);
8535 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8536 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
8537 		ieee80211_node_cleanup(ic, ic->ic_bss);
8538 	}
8539 	ic->ic_state = IEEE80211_S_SCAN;
8540 	iwm_led_blink_start(sc);
8541 	wakeup(&ic->ic_state); /* wake iwm_init() */
8542 
8543 	return 0;
8544 }
8545 
8546 int
8547 iwm_bgscan(struct ieee80211com *ic)
8548 {
8549 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8550 	int err;
8551 
8552 	if (sc->sc_flags & IWM_FLAG_SCANNING)
8553 		return 0;
8554 
8555 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8556 		err = iwm_umac_scan(sc, 1);
8557 	else
8558 		err = iwm_lmac_scan(sc, 1);
8559 	if (err) {
8560 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8561 		return err;
8562 	}
8563 
8564 	sc->sc_flags |= IWM_FLAG_BGSCAN;
8565 	return 0;
8566 }
8567 
8568 void
8569 iwm_bgscan_done(struct ieee80211com *ic,
8570     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8571 {
8572 	struct iwm_softc *sc = ic->ic_softc;
8573 
8574 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8575 	sc->bgscan_unref_arg = arg;
8576 	sc->bgscan_unref_arg_size = arg_size;
8577 	iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
8578 }
8579 
8580 void
8581 iwm_bgscan_done_task(void *arg)
8582 {
8583 	struct iwm_softc *sc = arg;
8584 	struct ieee80211com *ic = &sc->sc_ic;
8585 	struct iwm_node *in = (void *)ic->ic_bss;
8586 	struct ieee80211_node *ni = &in->in_ni;
8587 	int tid, err = 0, s = splnet();
8588 
8589 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
8590 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
8591 	    ic->ic_state != IEEE80211_S_RUN) {
8592 		err = ENXIO;
8593 		goto done;
8594 	}
8595 
8596 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
8597 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
8598 
8599 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8600 			continue;
8601 
8602 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8603 		if (err)
8604 			goto done;
8605 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8606 		if (err)
8607 			goto done;
8608 		in->tfd_queue_msk &= ~(1 << qid);
8609 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8610 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
8611 		    IEEE80211_ACTION_DELBA,
8612 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
8613 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
8614 #endif
8615 		ieee80211_node_tx_ba_clear(ni, tid);
8616 	}
8617 
8618 	err = iwm_flush_sta(sc, in);
8619 	if (err)
8620 		goto done;
8621 
8622 	/*
8623 	 * Tx queues have been flushed and Tx agg has been stopped.
8624 	 * Allow roaming to proceed.
8625 	 */
8626 	ni->ni_unref_arg = sc->bgscan_unref_arg;
8627 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8628 	sc->bgscan_unref_arg = NULL;
8629 	sc->bgscan_unref_arg_size = 0;
8630 	ieee80211_node_tx_stopped(ic, &in->in_ni);
8631 done:
8632 	if (err) {
8633 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8634 		sc->bgscan_unref_arg = NULL;
8635 		sc->bgscan_unref_arg_size = 0;
8636 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8637 			task_add(systq, &sc->init_task);
8638 	}
8639 	refcnt_rele_wake(&sc->task_refs);
8640 	splx(s);
8641 }
8642 
8643 int
8644 iwm_umac_scan_abort(struct iwm_softc *sc)
8645 {
8646 	struct iwm_umac_scan_abort cmd = { 0 };
8647 
8648 	return iwm_send_cmd_pdu(sc,
8649 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
8650 	    0, sizeof(cmd), &cmd);
8651 }
8652 
8653 int
8654 iwm_lmac_scan_abort(struct iwm_softc *sc)
8655 {
8656 	struct iwm_host_cmd cmd = {
8657 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
8658 	};
8659 	int err, status;
8660 
8661 	err = iwm_send_cmd_status(sc, &cmd, &status);
8662 	if (err)
8663 		return err;
8664 
8665 	if (status != IWM_CAN_ABORT_STATUS) {
8666 		/*
8667 		 * The scan abort will return 1 for success or
8668 		 * 2 for "failure".  A failure condition can be
8669 		 * due to simply not being in an active scan which
8670 		 * can occur if we send the scan abort before the
8671 		 * microcode has notified us that a scan is completed.
8672 		 */
8673 		return EBUSY;
8674 	}
8675 
8676 	return 0;
8677 }
8678 
8679 int
8680 iwm_scan_abort(struct iwm_softc *sc)
8681 {
8682 	int err;
8683 
8684 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8685 		err = iwm_umac_scan_abort(sc);
8686 	else
8687 		err = iwm_lmac_scan_abort(sc);
8688 
8689 	if (err == 0)
8690 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8691 	return err;
8692 }
8693 
8694 int
8695 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8696     struct ieee80211_channel *chan, uint8_t chains_static,
8697     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8698     uint8_t vht_chan_width)
8699 {
8700 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8701 	int err;
8702 
8703 	if (isset(sc->sc_enabled_capa,
8704 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8705 	    (phyctxt->channel->ic_flags & band_flags) !=
8706 	    (chan->ic_flags & band_flags)) {
8707 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8708 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8709 		    vht_chan_width);
8710 		if (err) {
8711 			printf("%s: could not remove PHY context "
8712 			    "(error %d)\n", DEVNAME(sc), err);
8713 			return err;
8714 		}
8715 		phyctxt->channel = chan;
8716 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8717 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time, sco,
8718 		    vht_chan_width);
8719 		if (err) {
8720 			printf("%s: could not add PHY context "
8721 			    "(error %d)\n", DEVNAME(sc), err);
8722 			return err;
8723 		}
8724 	} else {
8725 		phyctxt->channel = chan;
8726 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8727 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8728 		    vht_chan_width);
8729 		if (err) {
8730 			printf("%s: could not update PHY context (error %d)\n",
8731 			    DEVNAME(sc), err);
8732 			return err;
8733 		}
8734 	}
8735 
8736 	phyctxt->sco = sco;
8737 	phyctxt->vht_chan_width = vht_chan_width;
8738 	return 0;
8739 }
8740 
8741 int
8742 iwm_auth(struct iwm_softc *sc)
8743 {
8744 	struct ieee80211com *ic = &sc->sc_ic;
8745 	struct iwm_node *in = (void *)ic->ic_bss;
8746 	uint32_t duration;
8747 	int generation = sc->sc_generation, err;
8748 
8749 	splassert(IPL_NET);
8750 
8751 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8752 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8753 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8754 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8755 		if (err)
8756 			return err;
8757 	} else {
8758 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8759 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8760 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8761 		if (err)
8762 			return err;
8763 	}
8764 	in->in_phyctxt = &sc->sc_phyctxt[0];
8765 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8766 	iwm_setrates(in, 0);
8767 
8768 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8769 	if (err) {
8770 		printf("%s: could not add MAC context (error %d)\n",
8771 		    DEVNAME(sc), err);
8772 		return err;
8773  	}
8774 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8775 
8776 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8777 	if (err) {
8778 		printf("%s: could not add binding (error %d)\n",
8779 		    DEVNAME(sc), err);
8780 		goto rm_mac_ctxt;
8781 	}
8782 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8783 
8784 	in->tid_disable_ampdu = 0xffff;
8785 	err = iwm_add_sta_cmd(sc, in, 0);
8786 	if (err) {
8787 		printf("%s: could not add sta (error %d)\n",
8788 		    DEVNAME(sc), err);
8789 		goto rm_binding;
8790 	}
8791 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8792 
8793 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8794 		return 0;
8795 
8796 	/*
8797 	 * Prevent the FW from wandering off channel during association
8798 	 * by "protecting" the session with a time event.
8799 	 */
8800 	if (in->in_ni.ni_intval)
8801 		duration = in->in_ni.ni_intval * 2;
8802 	else
8803 		duration = IEEE80211_DUR_TU;
8804 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8805 
8806 	return 0;
8807 
8808 rm_binding:
8809 	if (generation == sc->sc_generation) {
8810 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8811 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8812 	}
8813 rm_mac_ctxt:
8814 	if (generation == sc->sc_generation) {
8815 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8816 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8817 	}
8818 	return err;
8819 }
8820 
8821 int
8822 iwm_deauth(struct iwm_softc *sc)
8823 {
8824 	struct ieee80211com *ic = &sc->sc_ic;
8825 	struct iwm_node *in = (void *)ic->ic_bss;
8826 	int err;
8827 
8828 	splassert(IPL_NET);
8829 
8830 	iwm_unprotect_session(sc, in);
8831 
8832 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8833 		err = iwm_flush_sta(sc, in);
8834 		if (err)
8835 			return err;
8836 		err = iwm_rm_sta_cmd(sc, in);
8837 		if (err) {
8838 			printf("%s: could not remove STA (error %d)\n",
8839 			    DEVNAME(sc), err);
8840 			return err;
8841 		}
8842 		in->tid_disable_ampdu = 0xffff;
8843 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8844 		sc->sc_rx_ba_sessions = 0;
8845 		sc->ba_rx.start_tidmask = 0;
8846 		sc->ba_rx.stop_tidmask = 0;
8847 		sc->tx_ba_queue_mask = 0;
8848 		sc->ba_tx.start_tidmask = 0;
8849 		sc->ba_tx.stop_tidmask = 0;
8850 	}
8851 
8852 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8853 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8854 		if (err) {
8855 			printf("%s: could not remove binding (error %d)\n",
8856 			    DEVNAME(sc), err);
8857 			return err;
8858 		}
8859 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8860 	}
8861 
8862 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8863 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8864 		if (err) {
8865 			printf("%s: could not remove MAC context (error %d)\n",
8866 			    DEVNAME(sc), err);
8867 			return err;
8868 		}
8869 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8870 	}
8871 
8872 	/* Move unused PHY context to a default channel. */
8873 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8874 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8875 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8876 	if (err)
8877 		return err;
8878 
8879 	return 0;
8880 }
8881 
8882 int
8883 iwm_run(struct iwm_softc *sc)
8884 {
8885 	struct ieee80211com *ic = &sc->sc_ic;
8886 	struct iwm_node *in = (void *)ic->ic_bss;
8887 	struct ieee80211_node *ni = &in->in_ni;
8888 	int err;
8889 
8890 	splassert(IPL_NET);
8891 
8892 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8893 		/* Add a MAC context and a sniffing STA. */
8894 		err = iwm_auth(sc);
8895 		if (err)
8896 			return err;
8897 	}
8898 
8899 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8900 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8901 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8902 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8903 		    in->in_phyctxt->channel, chains, chains,
8904 		    0, IEEE80211_HTOP0_SCO_SCN,
8905 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8906 		if (err) {
8907 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8908 			return err;
8909 		}
8910 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8911 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8912 		uint8_t sco, vht_chan_width;
8913 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8914 		    ieee80211_node_supports_ht_chan40(ni))
8915 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8916 		else
8917 			sco = IEEE80211_HTOP0_SCO_SCN;
8918 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8919 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8920 		    ieee80211_node_supports_vht_chan80(ni))
8921 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8922 		else
8923 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8924 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8925 		    in->in_phyctxt->channel, chains, chains,
8926 		    0, sco, vht_chan_width);
8927 		if (err) {
8928 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8929 			return err;
8930 		}
8931 	}
8932 
8933 	/* Update STA again to apply HT and VHT settings. */
8934 	err = iwm_add_sta_cmd(sc, in, 1);
8935 	if (err) {
8936 		printf("%s: could not update STA (error %d)\n",
8937 		    DEVNAME(sc), err);
8938 		return err;
8939 	}
8940 
8941 	/* We have now been assigned an associd by the AP. */
8942 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8943 	if (err) {
8944 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8945 		return err;
8946 	}
8947 
8948 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8949 	if (err) {
8950 		printf("%s: could not set sf full on (error %d)\n",
8951 		    DEVNAME(sc), err);
8952 		return err;
8953 	}
8954 
8955 	err = iwm_allow_mcast(sc);
8956 	if (err) {
8957 		printf("%s: could not allow mcast (error %d)\n",
8958 		    DEVNAME(sc), err);
8959 		return err;
8960 	}
8961 
8962 	err = iwm_power_update_device(sc);
8963 	if (err) {
8964 		printf("%s: could not send power command (error %d)\n",
8965 		    DEVNAME(sc), err);
8966 		return err;
8967 	}
8968 #ifdef notyet
8969 	/*
8970 	 * Disabled for now. Default beacon filter settings
8971 	 * prevent net80211 from getting ERP and HT protection
8972 	 * updates from beacons.
8973 	 */
8974 	err = iwm_enable_beacon_filter(sc, in);
8975 	if (err) {
8976 		printf("%s: could not enable beacon filter\n",
8977 		    DEVNAME(sc));
8978 		return err;
8979 	}
8980 #endif
8981 	err = iwm_power_mac_update_mode(sc, in);
8982 	if (err) {
8983 		printf("%s: could not update MAC power (error %d)\n",
8984 		    DEVNAME(sc), err);
8985 		return err;
8986 	}
8987 
8988 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
8989 		err = iwm_update_quotas(sc, in, 1);
8990 		if (err) {
8991 			printf("%s: could not update quotas (error %d)\n",
8992 			    DEVNAME(sc), err);
8993 			return err;
8994 		}
8995 	}
8996 
8997 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
8998 	ieee80211_ra_node_init(&in->in_rn);
8999 	ieee80211_ra_vht_node_init(&in->in_rn_vht);
9000 
9001 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9002 		iwm_led_blink_start(sc);
9003 		return 0;
9004 	}
9005 
9006 	/* Start at lowest available bit-rate, AMRR will raise. */
9007 	in->in_ni.ni_txrate = 0;
9008 	in->in_ni.ni_txmcs = 0;
9009 	in->in_ni.ni_vht_ss = 1;
9010 	iwm_setrates(in, 0);
9011 
9012 	timeout_add_msec(&sc->sc_calib_to, 500);
9013 	iwm_led_enable(sc);
9014 
9015 	return 0;
9016 }
9017 
9018 int
9019 iwm_run_stop(struct iwm_softc *sc)
9020 {
9021 	struct ieee80211com *ic = &sc->sc_ic;
9022 	struct iwm_node *in = (void *)ic->ic_bss;
9023 	struct ieee80211_node *ni = &in->in_ni;
9024 	int err, i, tid;
9025 
9026 	splassert(IPL_NET);
9027 
9028 	/*
9029 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
9030 	 * for this when moving out of RUN state since it runs in a
9031 	 * separate thread.
9032 	 * Note that in->in_ni (struct ieee80211_node) already represents
9033 	 * our new access point in case we are roaming between APs.
9034 	 * This means we cannot rely on struct ieee802111_node to tell
9035 	 * us which BA sessions exist.
9036 	 */
9037 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9038 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9039 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
9040 			continue;
9041 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9042 		if (err)
9043 			return err;
9044 		iwm_clear_reorder_buffer(sc, rxba);
9045 		if (sc->sc_rx_ba_sessions > 0)
9046 			sc->sc_rx_ba_sessions--;
9047 	}
9048 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
9049 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
9050 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9051 			continue;
9052 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9053 		if (err)
9054 			return err;
9055 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
9056 		if (err)
9057 			return err;
9058 		in->tfd_queue_msk &= ~(1 << qid);
9059 	}
9060 	ieee80211_ba_del(ni);
9061 
9062 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9063 		iwm_led_blink_stop(sc);
9064 
9065 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
9066 	if (err)
9067 		return err;
9068 
9069 	iwm_disable_beacon_filter(sc);
9070 
9071 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9072 		err = iwm_update_quotas(sc, in, 0);
9073 		if (err) {
9074 			printf("%s: could not update quotas (error %d)\n",
9075 			    DEVNAME(sc), err);
9076 			return err;
9077 		}
9078 	}
9079 
9080 	/* Mark station as disassociated. */
9081 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
9082 	if (err) {
9083 		printf("%s: failed to update MAC\n", DEVNAME(sc));
9084 		return err;
9085 	}
9086 
9087 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
9088 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
9089 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9090 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
9091 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9092 		if (err) {
9093 			printf("%s: failed to update PHY\n", DEVNAME(sc));
9094 			return err;
9095 		}
9096 	}
9097 
9098 	return 0;
9099 }
9100 
9101 struct ieee80211_node *
9102 iwm_node_alloc(struct ieee80211com *ic)
9103 {
9104 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
9105 }
9106 
9107 int
9108 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9109     struct ieee80211_key *k)
9110 {
9111 	struct iwm_softc *sc = ic->ic_softc;
9112 	struct iwm_add_sta_key_cmd_v1 cmd;
9113 
9114 	memset(&cmd, 0, sizeof(cmd));
9115 
9116 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9117 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9118 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9119 	    IWM_STA_KEY_FLG_KEYID_MSK));
9120 	if (k->k_flags & IEEE80211_KEY_GROUP)
9121 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9122 
9123 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9124 	cmd.common.key_offset = 0;
9125 	cmd.common.sta_id = IWM_STATION_ID;
9126 
9127 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9128 	    sizeof(cmd), &cmd);
9129 }
9130 
9131 int
9132 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9133     struct ieee80211_key *k)
9134 {
9135 	struct iwm_softc *sc = ic->ic_softc;
9136 	struct iwm_add_sta_key_cmd cmd;
9137 
9138 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9139 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
9140 		/* Fallback to software crypto for other ciphers. */
9141 		return (ieee80211_set_key(ic, ni, k));
9142 	}
9143 
9144 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9145 		return iwm_set_key_v1(ic, ni, k);
9146 
9147 	memset(&cmd, 0, sizeof(cmd));
9148 
9149 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9150 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9151 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9152 	    IWM_STA_KEY_FLG_KEYID_MSK));
9153 	if (k->k_flags & IEEE80211_KEY_GROUP)
9154 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9155 
9156 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9157 	cmd.common.key_offset = 0;
9158 	cmd.common.sta_id = IWM_STATION_ID;
9159 
9160 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
9161 
9162 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9163 	    sizeof(cmd), &cmd);
9164 }
9165 
9166 void
9167 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9168     struct ieee80211_key *k)
9169 {
9170 	struct iwm_softc *sc = ic->ic_softc;
9171 	struct iwm_add_sta_key_cmd_v1 cmd;
9172 
9173 	memset(&cmd, 0, sizeof(cmd));
9174 
9175 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9176 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9177 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9178 	    IWM_STA_KEY_FLG_KEYID_MSK));
9179 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9180 	cmd.common.key_offset = 0;
9181 	cmd.common.sta_id = IWM_STATION_ID;
9182 
9183 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9184 }
9185 
9186 void
9187 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9188     struct ieee80211_key *k)
9189 {
9190 	struct iwm_softc *sc = ic->ic_softc;
9191 	struct iwm_add_sta_key_cmd cmd;
9192 
9193 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9194 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
9195 		/* Fallback to software crypto for other ciphers. */
9196                 ieee80211_delete_key(ic, ni, k);
9197 		return;
9198 	}
9199 
9200 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9201 		return iwm_delete_key_v1(ic, ni, k);
9202 
9203 	memset(&cmd, 0, sizeof(cmd));
9204 
9205 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9206 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9207 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9208 	    IWM_STA_KEY_FLG_KEYID_MSK));
9209 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9210 	cmd.common.key_offset = 0;
9211 	cmd.common.sta_id = IWM_STATION_ID;
9212 
9213 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9214 }
9215 
9216 void
9217 iwm_calib_timeout(void *arg)
9218 {
9219 	struct iwm_softc *sc = arg;
9220 	struct ieee80211com *ic = &sc->sc_ic;
9221 	struct iwm_node *in = (void *)ic->ic_bss;
9222 	struct ieee80211_node *ni = &in->in_ni;
9223 	int s;
9224 
9225 	s = splnet();
9226 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
9227 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
9228 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
9229 		int old_txrate = ni->ni_txrate;
9230 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9231 		/*
9232 		 * If AMRR has chosen a new TX rate we must update
9233 		 * the firwmare's LQ rate table.
9234 		 * ni_txrate may change again before the task runs so
9235 		 * cache the chosen rate in the iwm_node structure.
9236 		 */
9237 		if (ni->ni_txrate != old_txrate)
9238 			iwm_setrates(in, 1);
9239 	}
9240 
9241 	splx(s);
9242 
9243 	timeout_add_msec(&sc->sc_calib_to, 500);
9244 }
9245 
9246 void
9247 iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9248 {
9249 	struct ieee80211_node *ni = &in->in_ni;
9250 	struct ieee80211com *ic = ni->ni_ic;
9251 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9252 	int ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9253 	int i, tab, txmcs;
9254 
9255 	/*
9256 	 * Fill the LQ rate selection table with VHT rates in descending
9257 	 * order, i.e. with the node's current TX rate first. Keep reducing
9258 	 * channel width during later Tx attempts, and eventually fall back
9259 	 * to legacy OFDM. Do not mix SISO and MIMO rates.
9260 	 */
9261 	lqcmd->mimo_delim = 0;
9262 	txmcs = ni->ni_txmcs;
9263 	for (i = 0; i < nitems(lqcmd->rs_table); i++) {
9264 		if (txmcs >= 0) {
9265 			tab = IWM_RATE_MCS_VHT_MSK;
9266 			tab |= txmcs & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9267 			tab |= ((ni->ni_vht_ss - 1) <<
9268 			    IWM_RATE_VHT_MCS_NSS_POS) &
9269 			    IWM_RATE_VHT_MCS_NSS_MSK;
9270 			if (ni->ni_vht_ss > 1)
9271 				tab |= IWM_RATE_MCS_ANT_AB_MSK;
9272 			else
9273 				tab |= iwm_valid_siso_ant_rate_mask(sc);
9274 
9275 			/*
9276 			 * First two Tx attempts may use 80MHz/40MHz/SGI.
9277 			 * Next two Tx attempts may use 40MHz/SGI.
9278 			 * Beyond that use 20 MHz and decrease the rate.
9279 			 * As a special case, MCS 9 is invalid on 20 Mhz.
9280 			 */
9281 			if (txmcs == 9) {
9282 				if (i < 2 && in->in_phyctxt->vht_chan_width >=
9283 				    IEEE80211_VHTOP0_CHAN_WIDTH_80)
9284 					tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9285 				else
9286 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9287 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9288 				if (i < 4) {
9289 					if (ieee80211_ra_vht_use_sgi(ni))
9290 						tab |= IWM_RATE_MCS_SGI_MSK;
9291 				} else
9292 					txmcs--;
9293 			} else if (i < 2 && in->in_phyctxt->vht_chan_width >=
9294 			    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
9295 				tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9296 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9297 				if (ieee80211_ra_vht_use_sgi(ni))
9298 					tab |= IWM_RATE_MCS_SGI_MSK;
9299 			} else if (i < 4 &&
9300 			    in->in_phyctxt->vht_chan_width >=
9301 			    IEEE80211_VHTOP0_CHAN_WIDTH_HT &&
9302 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
9303 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
9304 				tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9305 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9306 				if (ieee80211_ra_vht_use_sgi(ni))
9307 					tab |= IWM_RATE_MCS_SGI_MSK;
9308 			} else if (txmcs >= 0)
9309 				txmcs--;
9310 		} else {
9311 			/* Fill the rest with the lowest possible rate. */
9312 			tab = iwm_rates[ridx_min].plcp;
9313 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9314 			if (ni->ni_vht_ss > 1 && lqcmd->mimo_delim == 0)
9315 				lqcmd->mimo_delim = i;
9316 		}
9317 
9318 		lqcmd->rs_table[i] = htole32(tab);
9319 	}
9320 }
9321 
9322 void
9323 iwm_set_rate_table(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9324 {
9325 	struct ieee80211_node *ni = &in->in_ni;
9326 	struct ieee80211com *ic = ni->ni_ic;
9327 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9328 	struct ieee80211_rateset *rs = &ni->ni_rates;
9329 	int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
9330 
9331 	/*
9332 	 * Fill the LQ rate selection table with legacy and/or HT rates
9333 	 * in descending order, i.e. with the node's current TX rate first.
9334 	 * In cases where throughput of an HT rate corresponds to a legacy
9335 	 * rate it makes no sense to add both. We rely on the fact that
9336 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
9337 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
9338 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
9339 	 */
9340 	j = 0;
9341 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9342 	mimo = iwm_is_mimo_ht_mcs(ni->ni_txmcs);
9343 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
9344 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
9345 		uint8_t plcp = iwm_rates[ridx].plcp;
9346 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
9347 
9348 		if (j >= nitems(lqcmd->rs_table))
9349 			break;
9350 		tab = 0;
9351 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9352 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
9353 				continue;
9354 	 		/* Do not mix SISO and MIMO HT rates. */
9355 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
9356 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9357 				continue;
9358 			for (i = ni->ni_txmcs; i >= 0; i--) {
9359 				if (isclr(ni->ni_rxmcs, i))
9360 					continue;
9361 				if (ridx != iwm_ht_mcs2ridx[i])
9362 					continue;
9363 				tab = ht_plcp;
9364 				tab |= IWM_RATE_MCS_HT_MSK;
9365 				/* First two Tx attempts may use 40MHz/SGI. */
9366 				if (j > 1)
9367 					break;
9368 				if (in->in_phyctxt->sco ==
9369 				    IEEE80211_HTOP0_SCO_SCA ||
9370 				    in->in_phyctxt->sco ==
9371 				    IEEE80211_HTOP0_SCO_SCB) {
9372 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9373 					tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9374 				}
9375 				if (ieee80211_ra_use_ht_sgi(ni))
9376 					tab |= IWM_RATE_MCS_SGI_MSK;
9377 				break;
9378 			}
9379 		} else if (plcp != IWM_RATE_INVM_PLCP) {
9380 			for (i = ni->ni_txrate; i >= 0; i--) {
9381 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9382 				    IEEE80211_RATE_VAL)) {
9383 					tab = plcp;
9384 					break;
9385 				}
9386 			}
9387 		}
9388 
9389 		if (tab == 0)
9390 			continue;
9391 
9392 		if (iwm_is_mimo_ht_plcp(ht_plcp))
9393 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
9394 		else
9395 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9396 
9397 		if (IWM_RIDX_IS_CCK(ridx))
9398 			tab |= IWM_RATE_MCS_CCK_MSK;
9399 		lqcmd->rs_table[j++] = htole32(tab);
9400 	}
9401 
9402 	lqcmd->mimo_delim = (mimo ? j : 0);
9403 
9404 	/* Fill the rest with the lowest possible rate */
9405 	while (j < nitems(lqcmd->rs_table)) {
9406 		tab = iwm_rates[ridx_min].plcp;
9407 		if (IWM_RIDX_IS_CCK(ridx_min))
9408 			tab |= IWM_RATE_MCS_CCK_MSK;
9409 		tab |= iwm_valid_siso_ant_rate_mask(sc);
9410 		lqcmd->rs_table[j++] = htole32(tab);
9411 	}
9412 }
9413 
9414 void
9415 iwm_setrates(struct iwm_node *in, int async)
9416 {
9417 	struct ieee80211_node *ni = &in->in_ni;
9418 	struct ieee80211com *ic = ni->ni_ic;
9419 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9420 	struct iwm_lq_cmd lqcmd;
9421 	struct iwm_host_cmd cmd = {
9422 		.id = IWM_LQ_CMD,
9423 		.len = { sizeof(lqcmd), },
9424 	};
9425 
9426 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
9427 
9428 	memset(&lqcmd, 0, sizeof(lqcmd));
9429 	lqcmd.sta_id = IWM_STATION_ID;
9430 
9431 	if (ic->ic_flags & IEEE80211_F_USEPROT)
9432 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
9433 
9434 	if (ni->ni_flags & IEEE80211_NODE_VHT)
9435 		iwm_set_rate_table_vht(in, &lqcmd);
9436 	else
9437 		iwm_set_rate_table(in, &lqcmd);
9438 
9439 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
9440 	    (iwm_fw_valid_tx_ant(sc) & IWM_ANT_B))
9441 		lqcmd.single_stream_ant_msk = IWM_ANT_B;
9442 	else
9443 		lqcmd.single_stream_ant_msk = IWM_ANT_A;
9444 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
9445 
9446 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
9447 	lqcmd.agg_disable_start_th = 3;
9448 	lqcmd.agg_frame_cnt_limit = 0x3f;
9449 
9450 	cmd.data[0] = &lqcmd;
9451 	iwm_send_cmd(sc, &cmd);
9452 }
9453 
9454 int
9455 iwm_media_change(struct ifnet *ifp)
9456 {
9457 	struct iwm_softc *sc = ifp->if_softc;
9458 	struct ieee80211com *ic = &sc->sc_ic;
9459 	uint8_t rate, ridx;
9460 	int err;
9461 
9462 	err = ieee80211_media_change(ifp);
9463 	if (err != ENETRESET)
9464 		return err;
9465 
9466 	if (ic->ic_fixed_mcs != -1)
9467 		sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9468 	else if (ic->ic_fixed_rate != -1) {
9469 		rate = ic->ic_sup_rates[ic->ic_curmode].
9470 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
9471 		/* Map 802.11 rate to HW rate index. */
9472 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
9473 			if (iwm_rates[ridx].rate == rate)
9474 				break;
9475 		sc->sc_fixed_ridx = ridx;
9476 	}
9477 
9478 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9479 	    (IFF_UP | IFF_RUNNING)) {
9480 		iwm_stop(ifp);
9481 		err = iwm_init(ifp);
9482 	}
9483 	return err;
9484 }
9485 
9486 void
9487 iwm_newstate_task(void *psc)
9488 {
9489 	struct iwm_softc *sc = (struct iwm_softc *)psc;
9490 	struct ieee80211com *ic = &sc->sc_ic;
9491 	enum ieee80211_state nstate = sc->ns_nstate;
9492 	enum ieee80211_state ostate = ic->ic_state;
9493 	int arg = sc->ns_arg;
9494 	int err = 0, s = splnet();
9495 
9496 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9497 		/* iwm_stop() is waiting for us. */
9498 		refcnt_rele_wake(&sc->task_refs);
9499 		splx(s);
9500 		return;
9501 	}
9502 
9503 	if (ostate == IEEE80211_S_SCAN) {
9504 		if (nstate == ostate) {
9505 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
9506 				refcnt_rele_wake(&sc->task_refs);
9507 				splx(s);
9508 				return;
9509 			}
9510 			/* Firmware is no longer scanning. Do another scan. */
9511 			goto next_scan;
9512 		} else
9513 			iwm_led_blink_stop(sc);
9514 	}
9515 
9516 	if (nstate <= ostate) {
9517 		switch (ostate) {
9518 		case IEEE80211_S_RUN:
9519 			err = iwm_run_stop(sc);
9520 			if (err)
9521 				goto out;
9522 			/* FALLTHROUGH */
9523 		case IEEE80211_S_ASSOC:
9524 		case IEEE80211_S_AUTH:
9525 			if (nstate <= IEEE80211_S_AUTH) {
9526 				err = iwm_deauth(sc);
9527 				if (err)
9528 					goto out;
9529 			}
9530 			/* FALLTHROUGH */
9531 		case IEEE80211_S_SCAN:
9532 		case IEEE80211_S_INIT:
9533 			break;
9534 		}
9535 
9536 		/* Die now if iwm_stop() was called while we were sleeping. */
9537 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9538 			refcnt_rele_wake(&sc->task_refs);
9539 			splx(s);
9540 			return;
9541 		}
9542 	}
9543 
9544 	switch (nstate) {
9545 	case IEEE80211_S_INIT:
9546 		break;
9547 
9548 	case IEEE80211_S_SCAN:
9549 next_scan:
9550 		err = iwm_scan(sc);
9551 		if (err)
9552 			break;
9553 		refcnt_rele_wake(&sc->task_refs);
9554 		splx(s);
9555 		return;
9556 
9557 	case IEEE80211_S_AUTH:
9558 		err = iwm_auth(sc);
9559 		break;
9560 
9561 	case IEEE80211_S_ASSOC:
9562 		break;
9563 
9564 	case IEEE80211_S_RUN:
9565 		err = iwm_run(sc);
9566 		break;
9567 	}
9568 
9569 out:
9570 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9571 		if (err)
9572 			task_add(systq, &sc->init_task);
9573 		else
9574 			sc->sc_newstate(ic, nstate, arg);
9575 	}
9576 	refcnt_rele_wake(&sc->task_refs);
9577 	splx(s);
9578 }
9579 
9580 int
9581 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9582 {
9583 	struct ifnet *ifp = IC2IFP(ic);
9584 	struct iwm_softc *sc = ifp->if_softc;
9585 
9586 	/*
9587 	 * Prevent attempts to transition towards the same state, unless
9588 	 * we are scanning in which case a SCAN -> SCAN transition
9589 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9590 	 * to support band-steering.
9591 	 */
9592 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9593 	    nstate != IEEE80211_S_AUTH)
9594 		return 0;
9595 
9596 	if (ic->ic_state == IEEE80211_S_RUN) {
9597 		timeout_del(&sc->sc_calib_to);
9598 		iwm_del_task(sc, systq, &sc->ba_task);
9599 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9600 		iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9601 		iwm_del_task(sc, systq, &sc->bgscan_done_task);
9602 	}
9603 
9604 	sc->ns_nstate = nstate;
9605 	sc->ns_arg = arg;
9606 
9607 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9608 
9609 	return 0;
9610 }
9611 
9612 void
9613 iwm_endscan(struct iwm_softc *sc)
9614 {
9615 	struct ieee80211com *ic = &sc->sc_ic;
9616 
9617 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9618 		return;
9619 
9620 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9621 	ieee80211_end_scan(&ic->ic_if);
9622 }
9623 
9624 /*
9625  * Aging and idle timeouts for the different possible scenarios
9626  * in default configuration
9627  */
9628 static const uint32_t
9629 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9630 	{
9631 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9632 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9633 	},
9634 	{
9635 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
9636 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9637 	},
9638 	{
9639 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
9640 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
9641 	},
9642 	{
9643 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
9644 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
9645 	},
9646 	{
9647 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
9648 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
9649 	},
9650 };
9651 
9652 /*
9653  * Aging and idle timeouts for the different possible scenarios
9654  * in single BSS MAC configuration.
9655  */
9656 static const uint32_t
9657 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9658 	{
9659 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
9660 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
9661 	},
9662 	{
9663 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
9664 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
9665 	},
9666 	{
9667 		htole32(IWM_SF_MCAST_AGING_TIMER),
9668 		htole32(IWM_SF_MCAST_IDLE_TIMER)
9669 	},
9670 	{
9671 		htole32(IWM_SF_BA_AGING_TIMER),
9672 		htole32(IWM_SF_BA_IDLE_TIMER)
9673 	},
9674 	{
9675 		htole32(IWM_SF_TX_RE_AGING_TIMER),
9676 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
9677 	},
9678 };
9679 
9680 void
9681 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9682     struct ieee80211_node *ni)
9683 {
9684 	int i, j, watermark;
9685 
9686 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
9687 
9688 	/*
9689 	 * If we are in association flow - check antenna configuration
9690 	 * capabilities of the AP station, and choose the watermark accordingly.
9691 	 */
9692 	if (ni) {
9693 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9694 			if (ni->ni_rxmcs[1] != 0)
9695 				watermark = IWM_SF_W_MARK_MIMO2;
9696 			else
9697 				watermark = IWM_SF_W_MARK_SISO;
9698 		} else {
9699 			watermark = IWM_SF_W_MARK_LEGACY;
9700 		}
9701 	/* default watermark value for unassociated mode. */
9702 	} else {
9703 		watermark = IWM_SF_W_MARK_MIMO2;
9704 	}
9705 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
9706 
9707 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
9708 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
9709 			sf_cmd->long_delay_timeouts[i][j] =
9710 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
9711 		}
9712 	}
9713 
9714 	if (ni) {
9715 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
9716 		       sizeof(iwm_sf_full_timeout));
9717 	} else {
9718 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
9719 		       sizeof(iwm_sf_full_timeout_def));
9720 	}
9721 
9722 }
9723 
9724 int
9725 iwm_sf_config(struct iwm_softc *sc, int new_state)
9726 {
9727 	struct ieee80211com *ic = &sc->sc_ic;
9728 	struct iwm_sf_cfg_cmd sf_cmd = {
9729 		.state = htole32(new_state),
9730 	};
9731 	int err = 0;
9732 
9733 #if 0	/* only used for models with sdio interface, in iwlwifi */
9734 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9735 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
9736 #endif
9737 
9738 	switch (new_state) {
9739 	case IWM_SF_UNINIT:
9740 	case IWM_SF_INIT_OFF:
9741 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
9742 		break;
9743 	case IWM_SF_FULL_ON:
9744 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9745 		break;
9746 	default:
9747 		return EINVAL;
9748 	}
9749 
9750 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9751 				   sizeof(sf_cmd), &sf_cmd);
9752 	return err;
9753 }
9754 
9755 int
9756 iwm_send_bt_init_conf(struct iwm_softc *sc)
9757 {
9758 	struct iwm_bt_coex_cmd bt_cmd;
9759 
9760 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
9761 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
9762 
9763 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9764 	    &bt_cmd);
9765 }
9766 
9767 int
9768 iwm_send_soc_conf(struct iwm_softc *sc)
9769 {
9770 	struct iwm_soc_configuration_cmd cmd;
9771 	int err;
9772 	uint32_t cmd_id, flags = 0;
9773 
9774 	memset(&cmd, 0, sizeof(cmd));
9775 
9776 	/*
9777 	 * In VER_1 of this command, the discrete value is considered
9778 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9779 	 * values in VER_1, this is backwards-compatible with VER_2,
9780 	 * as long as we don't set any other flag bits.
9781 	 */
9782 	if (!sc->sc_integrated) { /* VER_1 */
9783 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9784 	} else { /* VER_2 */
9785 		uint8_t scan_cmd_ver;
9786 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9787 			flags |= (sc->sc_ltr_delay &
9788 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9789 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9790 		    IWM_SCAN_REQ_UMAC);
9791 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
9792 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9793 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9794 	}
9795 	cmd.flags = htole32(flags);
9796 
9797 	cmd.latency = htole32(sc->sc_xtal_latency);
9798 
9799 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
9800 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9801 	if (err)
9802 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9803 	return err;
9804 }
9805 
9806 int
9807 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9808 {
9809 	struct iwm_mcc_update_cmd mcc_cmd;
9810 	struct iwm_host_cmd hcmd = {
9811 		.id = IWM_MCC_UPDATE_CMD,
9812 		.flags = IWM_CMD_WANT_RESP,
9813 		.resp_pkt_len = IWM_CMD_RESP_MAX,
9814 		.data = { &mcc_cmd },
9815 	};
9816 	struct iwm_rx_packet *pkt;
9817 	size_t resp_len;
9818 	int err;
9819 	int resp_v3 = isset(sc->sc_enabled_capa,
9820 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
9821 
9822 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9823 	    !sc->sc_nvm.lar_enabled) {
9824 		return 0;
9825 	}
9826 
9827 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9828 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9829 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9830 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9831 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
9832 	else
9833 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
9834 
9835 	if (resp_v3) { /* same size as resp_v2 */
9836 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9837 	} else {
9838 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9839 	}
9840 
9841 	err = iwm_send_cmd(sc, &hcmd);
9842 	if (err)
9843 		return err;
9844 
9845 	pkt = hcmd.resp_pkt;
9846 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
9847 		err = EIO;
9848 		goto out;
9849 	}
9850 
9851 	if (resp_v3) {
9852 		struct iwm_mcc_update_resp_v3 *resp;
9853 		resp_len = iwm_rx_packet_payload_len(pkt);
9854 		if (resp_len < sizeof(*resp)) {
9855 			err = EIO;
9856 			goto out;
9857 		}
9858 
9859 		resp = (void *)pkt->data;
9860 		if (resp_len != sizeof(*resp) +
9861 		    resp->n_channels * sizeof(resp->channels[0])) {
9862 			err = EIO;
9863 			goto out;
9864 		}
9865 	} else {
9866 		struct iwm_mcc_update_resp_v1 *resp_v1;
9867 		resp_len = iwm_rx_packet_payload_len(pkt);
9868 		if (resp_len < sizeof(*resp_v1)) {
9869 			err = EIO;
9870 			goto out;
9871 		}
9872 
9873 		resp_v1 = (void *)pkt->data;
9874 		if (resp_len != sizeof(*resp_v1) +
9875 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9876 			err = EIO;
9877 			goto out;
9878 		}
9879 	}
9880 out:
9881 	iwm_free_resp(sc, &hcmd);
9882 	return err;
9883 }
9884 
9885 int
9886 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9887 {
9888 	struct iwm_temp_report_ths_cmd cmd;
9889 	int err;
9890 
9891 	/*
9892 	 * In order to give responsibility for critical-temperature-kill
9893 	 * and TX backoff to FW we need to send an empty temperature
9894 	 * reporting command at init time.
9895 	 */
9896 	memset(&cmd, 0, sizeof(cmd));
9897 
9898 	err = iwm_send_cmd_pdu(sc,
9899 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
9900 	    0, sizeof(cmd), &cmd);
9901 	if (err)
9902 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9903 		    DEVNAME(sc), err);
9904 
9905 	return err;
9906 }
9907 
9908 void
9909 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9910 {
9911 	struct iwm_host_cmd cmd = {
9912 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
9913 		.len = { sizeof(uint32_t), },
9914 		.data = { &backoff, },
9915 	};
9916 
9917 	iwm_send_cmd(sc, &cmd);
9918 }
9919 
9920 void
9921 iwm_free_fw_paging(struct iwm_softc *sc)
9922 {
9923 	int i;
9924 
9925 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9926 		return;
9927 
9928 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
9929 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9930 	}
9931 
9932 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9933 }
9934 
9935 int
9936 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9937 {
9938 	int sec_idx, idx;
9939 	uint32_t offset = 0;
9940 
9941 	/*
9942 	 * find where is the paging image start point:
9943 	 * if CPU2 exist and it's in paging format, then the image looks like:
9944 	 * CPU1 sections (2 or more)
9945 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9946 	 * CPU2 sections (not paged)
9947 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9948 	 * non paged to CPU2 paging sec
9949 	 * CPU2 paging CSS
9950 	 * CPU2 paging image (including instruction and data)
9951 	 */
9952 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
9953 		if (image->fw_sect[sec_idx].fws_devoff ==
9954 		    IWM_PAGING_SEPARATOR_SECTION) {
9955 			sec_idx++;
9956 			break;
9957 		}
9958 	}
9959 
9960 	/*
9961 	 * If paging is enabled there should be at least 2 more sections left
9962 	 * (one for CSS and one for Paging data)
9963 	 */
9964 	if (sec_idx >= nitems(image->fw_sect) - 1) {
9965 		printf("%s: Paging: Missing CSS and/or paging sections\n",
9966 		    DEVNAME(sc));
9967 		iwm_free_fw_paging(sc);
9968 		return EINVAL;
9969 	}
9970 
9971 	/* copy the CSS block to the dram */
9972 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
9973 	    DEVNAME(sc), sec_idx));
9974 
9975 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
9976 	    image->fw_sect[sec_idx].fws_data,
9977 	    sc->fw_paging_db[0].fw_paging_size);
9978 
9979 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
9980 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
9981 
9982 	sec_idx++;
9983 
9984 	/*
9985 	 * copy the paging blocks to the dram
9986 	 * loop index start from 1 since that CSS block already copied to dram
9987 	 * and CSS index is 0.
9988 	 * loop stop at num_of_paging_blk since that last block is not full.
9989 	 */
9990 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
9991 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
9992 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
9993 		    sc->fw_paging_db[idx].fw_paging_size);
9994 
9995 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
9996 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
9997 
9998 		offset += sc->fw_paging_db[idx].fw_paging_size;
9999 	}
10000 
10001 	/* copy the last paging block */
10002 	if (sc->num_of_pages_in_last_blk > 0) {
10003 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10004 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
10005 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
10006 
10007 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
10008 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10009 	}
10010 
10011 	return 0;
10012 }
10013 
10014 int
10015 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10016 {
10017 	int blk_idx = 0;
10018 	int error, num_of_pages;
10019 
10020 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
10021 		int i;
10022 		/* Device got reset, and we setup firmware paging again */
10023 		bus_dmamap_sync(sc->sc_dmat,
10024 		    sc->fw_paging_db[0].fw_paging_block.map,
10025 		    0, IWM_FW_PAGING_SIZE,
10026 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10027 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10028 			bus_dmamap_sync(sc->sc_dmat,
10029 			    sc->fw_paging_db[i].fw_paging_block.map,
10030 			    0, IWM_PAGING_BLOCK_SIZE,
10031 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10032 		}
10033 		return 0;
10034 	}
10035 
10036 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
10037 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
10038 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
10039 #endif
10040 
10041 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
10042 	sc->num_of_paging_blk =
10043 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
10044 
10045 	sc->num_of_pages_in_last_blk =
10046 		num_of_pages -
10047 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
10048 
10049 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
10050 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
10051 	    sc->num_of_paging_blk,
10052 	    sc->num_of_pages_in_last_blk));
10053 
10054 	/* allocate block of 4Kbytes for paging CSS */
10055 	error = iwm_dma_contig_alloc(sc->sc_dmat,
10056 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
10057 	    4096);
10058 	if (error) {
10059 		/* free all the previous pages since we failed */
10060 		iwm_free_fw_paging(sc);
10061 		return ENOMEM;
10062 	}
10063 
10064 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
10065 
10066 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
10067 	    DEVNAME(sc)));
10068 
10069 	/*
10070 	 * allocate blocks in dram.
10071 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
10072 	 */
10073 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10074 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
10075 		/* XXX Use iwm_dma_contig_alloc for allocating */
10076 		error = iwm_dma_contig_alloc(sc->sc_dmat,
10077 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
10078 		    IWM_PAGING_BLOCK_SIZE, 4096);
10079 		if (error) {
10080 			/* free all the previous pages since we failed */
10081 			iwm_free_fw_paging(sc);
10082 			return ENOMEM;
10083 		}
10084 
10085 		sc->fw_paging_db[blk_idx].fw_paging_size =
10086 		    IWM_PAGING_BLOCK_SIZE;
10087 
10088 		DPRINTF((
10089 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
10090 		    DEVNAME(sc)));
10091 	}
10092 
10093 	return 0;
10094 }
10095 
10096 int
10097 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10098 {
10099 	int ret;
10100 
10101 	ret = iwm_alloc_fw_paging_mem(sc, fw);
10102 	if (ret)
10103 		return ret;
10104 
10105 	return iwm_fill_paging_mem(sc, fw);
10106 }
10107 
10108 /* send paging cmd to FW in case CPU2 has paging image */
10109 int
10110 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10111 {
10112 	int blk_idx;
10113 	uint32_t dev_phy_addr;
10114 	struct iwm_fw_paging_cmd fw_paging_cmd = {
10115 		.flags =
10116 			htole32(IWM_PAGING_CMD_IS_SECURED |
10117 				IWM_PAGING_CMD_IS_ENABLED |
10118 				(sc->num_of_pages_in_last_blk <<
10119 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
10120 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
10121 		.block_num = htole32(sc->num_of_paging_blk),
10122 	};
10123 
10124 	/* loop for all paging blocks + CSS block */
10125 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10126 		dev_phy_addr = htole32(
10127 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
10128 		    IWM_PAGE_2_EXP_SIZE);
10129 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
10130 		bus_dmamap_sync(sc->sc_dmat,
10131 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
10132 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
10133 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10134 	}
10135 
10136 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
10137 					       IWM_LONG_GROUP, 0),
10138 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
10139 }
10140 
10141 int
10142 iwm_init_hw(struct iwm_softc *sc)
10143 {
10144 	struct ieee80211com *ic = &sc->sc_ic;
10145 	int err, i, ac, qid, s;
10146 
10147 	err = iwm_run_init_mvm_ucode(sc, 0);
10148 	if (err)
10149 		return err;
10150 
10151 	/* Should stop and start HW since INIT image just loaded. */
10152 	iwm_stop_device(sc);
10153 	err = iwm_start_hw(sc);
10154 	if (err) {
10155 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10156 		return err;
10157 	}
10158 
10159 	/* Restart, this time with the regular firmware */
10160 	s = splnet();
10161 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10162 	if (err) {
10163 		printf("%s: could not load firmware\n", DEVNAME(sc));
10164 		splx(s);
10165 		return err;
10166 	}
10167 
10168 	if (!iwm_nic_lock(sc)) {
10169 		splx(s);
10170 		return EBUSY;
10171 	}
10172 
10173 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10174 	if (err) {
10175 		printf("%s: could not init tx ant config (error %d)\n",
10176 		    DEVNAME(sc), err);
10177 		goto err;
10178 	}
10179 
10180 	err = iwm_send_phy_db_data(sc);
10181 	if (err) {
10182 		printf("%s: could not init phy db (error %d)\n",
10183 		    DEVNAME(sc), err);
10184 		goto err;
10185 	}
10186 
10187 	err = iwm_send_phy_cfg_cmd(sc);
10188 	if (err) {
10189 		printf("%s: could not send phy config (error %d)\n",
10190 		    DEVNAME(sc), err);
10191 		goto err;
10192 	}
10193 
10194 	err = iwm_send_bt_init_conf(sc);
10195 	if (err) {
10196 		printf("%s: could not init bt coex (error %d)\n",
10197 		    DEVNAME(sc), err);
10198 		goto err;
10199 	}
10200 
10201 	if (isset(sc->sc_enabled_capa,
10202 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
10203 		err = iwm_send_soc_conf(sc);
10204 		if (err)
10205 			goto err;
10206 	}
10207 
10208 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
10209 		err = iwm_send_dqa_cmd(sc);
10210 		if (err)
10211 			goto err;
10212 	}
10213 
10214 	/* Add auxiliary station for scanning */
10215 	err = iwm_add_aux_sta(sc);
10216 	if (err) {
10217 		printf("%s: could not add aux station (error %d)\n",
10218 		    DEVNAME(sc), err);
10219 		goto err;
10220 	}
10221 
10222 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
10223 		/*
10224 		 * The channel used here isn't relevant as it's
10225 		 * going to be overwritten in the other flows.
10226 		 * For now use the first channel we have.
10227 		 */
10228 		sc->sc_phyctxt[i].id = i;
10229 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10230 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10231 		    IWM_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
10232 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
10233 		if (err) {
10234 			printf("%s: could not add phy context %d (error %d)\n",
10235 			    DEVNAME(sc), i, err);
10236 			goto err;
10237 		}
10238 	}
10239 
10240 	/* Initialize tx backoffs to the minimum. */
10241 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
10242 		iwm_tt_tx_backoff(sc, 0);
10243 
10244 
10245 	err = iwm_config_ltr(sc);
10246 	if (err) {
10247 		printf("%s: PCIe LTR configuration failed (error %d)\n",
10248 		    DEVNAME(sc), err);
10249 	}
10250 
10251 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
10252 		err = iwm_send_temp_report_ths_cmd(sc);
10253 		if (err)
10254 			goto err;
10255 	}
10256 
10257 	err = iwm_power_update_device(sc);
10258 	if (err) {
10259 		printf("%s: could not send power command (error %d)\n",
10260 		    DEVNAME(sc), err);
10261 		goto err;
10262 	}
10263 
10264 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
10265 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
10266 		if (err) {
10267 			printf("%s: could not init LAR (error %d)\n",
10268 			    DEVNAME(sc), err);
10269 			goto err;
10270 		}
10271 	}
10272 
10273 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
10274 		err = iwm_config_umac_scan(sc);
10275 		if (err) {
10276 			printf("%s: could not configure scan (error %d)\n",
10277 			    DEVNAME(sc), err);
10278 			goto err;
10279 		}
10280 	}
10281 
10282 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10283 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10284 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
10285 		else
10286 			qid = IWM_AUX_QUEUE;
10287 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
10288 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
10289 		if (err) {
10290 			printf("%s: could not enable monitor inject Tx queue "
10291 			    "(error %d)\n", DEVNAME(sc), err);
10292 			goto err;
10293 		}
10294 	} else {
10295 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
10296 			if (isset(sc->sc_enabled_capa,
10297 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10298 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
10299 			else
10300 				qid = ac;
10301 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
10302 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
10303 			if (err) {
10304 				printf("%s: could not enable Tx queue %d "
10305 				    "(error %d)\n", DEVNAME(sc), ac, err);
10306 				goto err;
10307 			}
10308 		}
10309 	}
10310 
10311 	err = iwm_disable_beacon_filter(sc);
10312 	if (err) {
10313 		printf("%s: could not disable beacon filter (error %d)\n",
10314 		    DEVNAME(sc), err);
10315 		goto err;
10316 	}
10317 
10318 err:
10319 	iwm_nic_unlock(sc);
10320 	splx(s);
10321 	return err;
10322 }
10323 
10324 /* Allow multicast from our BSSID. */
10325 int
10326 iwm_allow_mcast(struct iwm_softc *sc)
10327 {
10328 	struct ieee80211com *ic = &sc->sc_ic;
10329 	struct iwm_node *in = (void *)ic->ic_bss;
10330 	struct iwm_mcast_filter_cmd *cmd;
10331 	size_t size;
10332 	int err;
10333 
10334 	size = roundup(sizeof(*cmd), 4);
10335 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
10336 	if (cmd == NULL)
10337 		return ENOMEM;
10338 	cmd->filter_own = 1;
10339 	cmd->port_id = 0;
10340 	cmd->count = 0;
10341 	cmd->pass_all = 1;
10342 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
10343 
10344 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
10345 	    0, size, cmd);
10346 	free(cmd, M_DEVBUF, size);
10347 	return err;
10348 }
10349 
10350 int
10351 iwm_init(struct ifnet *ifp)
10352 {
10353 	struct iwm_softc *sc = ifp->if_softc;
10354 	struct ieee80211com *ic = &sc->sc_ic;
10355 	int err, generation;
10356 
10357 	rw_assert_wrlock(&sc->ioctl_rwl);
10358 
10359 	generation = ++sc->sc_generation;
10360 
10361 	err = iwm_preinit(sc);
10362 	if (err)
10363 		return err;
10364 
10365 	err = iwm_start_hw(sc);
10366 	if (err) {
10367 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10368 		return err;
10369 	}
10370 
10371 	err = iwm_init_hw(sc);
10372 	if (err) {
10373 		if (generation == sc->sc_generation)
10374 			iwm_stop_device(sc);
10375 		return err;
10376 	}
10377 
10378 	if (sc->sc_nvm.sku_cap_11n_enable)
10379 		iwm_setup_ht_rates(sc);
10380 	if (sc->sc_nvm.sku_cap_11ac_enable)
10381 		iwm_setup_vht_rates(sc);
10382 
10383 	KASSERT(sc->task_refs.r_refs == 0);
10384 	refcnt_init(&sc->task_refs);
10385 	ifq_clr_oactive(&ifp->if_snd);
10386 	ifp->if_flags |= IFF_RUNNING;
10387 
10388 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10389 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10390 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
10391 		return 0;
10392 	}
10393 
10394 	ieee80211_begin_scan(ifp);
10395 
10396 	/*
10397 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10398 	 * Wait until the transition to SCAN state has completed.
10399 	 */
10400 	do {
10401 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
10402 		    SEC_TO_NSEC(1));
10403 		if (generation != sc->sc_generation)
10404 			return ENXIO;
10405 		if (err) {
10406 			iwm_stop(ifp);
10407 			return err;
10408 		}
10409 	} while (ic->ic_state != IEEE80211_S_SCAN);
10410 
10411 	return 0;
10412 }
10413 
10414 void
10415 iwm_start(struct ifnet *ifp)
10416 {
10417 	struct iwm_softc *sc = ifp->if_softc;
10418 	struct ieee80211com *ic = &sc->sc_ic;
10419 	struct ieee80211_node *ni;
10420 	struct ether_header *eh;
10421 	struct mbuf *m;
10422 	int ac = EDCA_AC_BE; /* XXX */
10423 
10424 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
10425 		return;
10426 
10427 	for (;;) {
10428 		/* why isn't this done per-queue? */
10429 		if (sc->qfullmsk != 0) {
10430 			ifq_set_oactive(&ifp->if_snd);
10431 			break;
10432 		}
10433 
10434 		/* Don't queue additional frames while flushing Tx queues. */
10435 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
10436 			break;
10437 
10438 		/* need to send management frames even if we're not RUNning */
10439 		m = mq_dequeue(&ic->ic_mgtq);
10440 		if (m) {
10441 			ni = m->m_pkthdr.ph_cookie;
10442 			goto sendit;
10443 		}
10444 
10445 		if (ic->ic_state != IEEE80211_S_RUN ||
10446 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
10447 			break;
10448 
10449 		m = ifq_dequeue(&ifp->if_snd);
10450 		if (!m)
10451 			break;
10452 		if (m->m_len < sizeof (*eh) &&
10453 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
10454 			ifp->if_oerrors++;
10455 			continue;
10456 		}
10457 #if NBPFILTER > 0
10458 		if (ifp->if_bpf != NULL)
10459 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
10460 #endif
10461 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
10462 			ifp->if_oerrors++;
10463 			continue;
10464 		}
10465 
10466  sendit:
10467 #if NBPFILTER > 0
10468 		if (ic->ic_rawbpf != NULL)
10469 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
10470 #endif
10471 		if (iwm_tx(sc, m, ni, ac) != 0) {
10472 			ieee80211_release_node(ic, ni);
10473 			ifp->if_oerrors++;
10474 			continue;
10475 		}
10476 
10477 		if (ifp->if_flags & IFF_UP)
10478 			ifp->if_timer = 1;
10479 	}
10480 
10481 	return;
10482 }
10483 
10484 void
10485 iwm_stop(struct ifnet *ifp)
10486 {
10487 	struct iwm_softc *sc = ifp->if_softc;
10488 	struct ieee80211com *ic = &sc->sc_ic;
10489 	struct iwm_node *in = (void *)ic->ic_bss;
10490 	int i, s = splnet();
10491 
10492 	rw_assert_wrlock(&sc->ioctl_rwl);
10493 
10494 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10495 
10496 	/* Cancel scheduled tasks and let any stale tasks finish up. */
10497 	task_del(systq, &sc->init_task);
10498 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10499 	iwm_del_task(sc, systq, &sc->ba_task);
10500 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10501 	iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10502 	iwm_del_task(sc, systq, &sc->bgscan_done_task);
10503 	KASSERT(sc->task_refs.r_refs >= 1);
10504 	refcnt_finalize(&sc->task_refs, "iwmstop");
10505 
10506 	iwm_stop_device(sc);
10507 
10508 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
10509 	sc->bgscan_unref_arg = NULL;
10510 	sc->bgscan_unref_arg_size = 0;
10511 
10512 	/* Reset soft state. */
10513 
10514 	sc->sc_generation++;
10515 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10516 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10517 		sc->sc_cmd_resp_pkt[i] = NULL;
10518 		sc->sc_cmd_resp_len[i] = 0;
10519 	}
10520 	ifp->if_flags &= ~IFF_RUNNING;
10521 	ifq_clr_oactive(&ifp->if_snd);
10522 
10523 	in->in_phyctxt = NULL;
10524 	in->tid_disable_ampdu = 0xffff;
10525 	in->tfd_queue_msk = 0;
10526 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
10527 
10528 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10529 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10530 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10531 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10532 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10533 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10534 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10535 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10536 
10537 	sc->sc_rx_ba_sessions = 0;
10538 	sc->ba_rx.start_tidmask = 0;
10539 	sc->ba_rx.stop_tidmask = 0;
10540 	sc->tx_ba_queue_mask = 0;
10541 	sc->ba_tx.start_tidmask = 0;
10542 	sc->ba_tx.stop_tidmask = 0;
10543 
10544 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10545 	sc->ns_nstate = IEEE80211_S_INIT;
10546 
10547 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10548 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10549 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10550 		iwm_clear_reorder_buffer(sc, rxba);
10551 	}
10552 	iwm_led_blink_stop(sc);
10553 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
10554 	ifp->if_timer = 0;
10555 
10556 	splx(s);
10557 }
10558 
10559 void
10560 iwm_watchdog(struct ifnet *ifp)
10561 {
10562 	struct iwm_softc *sc = ifp->if_softc;
10563 	int i;
10564 
10565 	ifp->if_timer = 0;
10566 
10567 	/*
10568 	 * We maintain a separate timer for each Tx queue because
10569 	 * Tx aggregation queues can get "stuck" while other queues
10570 	 * keep working. The Linux driver uses a similar workaround.
10571 	 */
10572 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
10573 		if (sc->sc_tx_timer[i] > 0) {
10574 			if (--sc->sc_tx_timer[i] == 0) {
10575 				printf("%s: device timeout\n", DEVNAME(sc));
10576 				if (ifp->if_flags & IFF_DEBUG) {
10577 					iwm_nic_error(sc);
10578 					iwm_dump_driver_status(sc);
10579 				}
10580 				if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10581 					task_add(systq, &sc->init_task);
10582 				ifp->if_oerrors++;
10583 				return;
10584 			}
10585 			ifp->if_timer = 1;
10586 		}
10587 	}
10588 
10589 	ieee80211_watchdog(ifp);
10590 }
10591 
10592 int
10593 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10594 {
10595 	struct iwm_softc *sc = ifp->if_softc;
10596 	int s, err = 0, generation = sc->sc_generation;
10597 
10598 	/*
10599 	 * Prevent processes from entering this function while another
10600 	 * process is tsleep'ing in it.
10601 	 */
10602 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10603 	if (err == 0 && generation != sc->sc_generation) {
10604 		rw_exit(&sc->ioctl_rwl);
10605 		return ENXIO;
10606 	}
10607 	if (err)
10608 		return err;
10609 	s = splnet();
10610 
10611 	switch (cmd) {
10612 	case SIOCSIFADDR:
10613 		ifp->if_flags |= IFF_UP;
10614 		/* FALLTHROUGH */
10615 	case SIOCSIFFLAGS:
10616 		if (ifp->if_flags & IFF_UP) {
10617 			if (!(ifp->if_flags & IFF_RUNNING)) {
10618 				/* Force reload of firmware image from disk. */
10619 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10620 				err = iwm_init(ifp);
10621 			}
10622 		} else {
10623 			if (ifp->if_flags & IFF_RUNNING)
10624 				iwm_stop(ifp);
10625 		}
10626 		break;
10627 
10628 	default:
10629 		err = ieee80211_ioctl(ifp, cmd, data);
10630 	}
10631 
10632 	if (err == ENETRESET) {
10633 		err = 0;
10634 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
10635 		    (IFF_UP | IFF_RUNNING)) {
10636 			iwm_stop(ifp);
10637 			err = iwm_init(ifp);
10638 		}
10639 	}
10640 
10641 	splx(s);
10642 	rw_exit(&sc->ioctl_rwl);
10643 
10644 	return err;
10645 }
10646 
10647 /*
10648  * Note: This structure is read from the device with IO accesses,
10649  * and the reading already does the endian conversion. As it is
10650  * read with uint32_t-sized accesses, any members with a different size
10651  * need to be ordered correctly though!
10652  */
10653 struct iwm_error_event_table {
10654 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10655 	uint32_t error_id;		/* type of error */
10656 	uint32_t trm_hw_status0;	/* TRM HW status */
10657 	uint32_t trm_hw_status1;	/* TRM HW status */
10658 	uint32_t blink2;		/* branch link */
10659 	uint32_t ilink1;		/* interrupt link */
10660 	uint32_t ilink2;		/* interrupt link */
10661 	uint32_t data1;		/* error-specific data */
10662 	uint32_t data2;		/* error-specific data */
10663 	uint32_t data3;		/* error-specific data */
10664 	uint32_t bcon_time;		/* beacon timer */
10665 	uint32_t tsf_low;		/* network timestamp function timer */
10666 	uint32_t tsf_hi;		/* network timestamp function timer */
10667 	uint32_t gp1;		/* GP1 timer register */
10668 	uint32_t gp2;		/* GP2 timer register */
10669 	uint32_t fw_rev_type;	/* firmware revision type */
10670 	uint32_t major;		/* uCode version major */
10671 	uint32_t minor;		/* uCode version minor */
10672 	uint32_t hw_ver;		/* HW Silicon version */
10673 	uint32_t brd_ver;		/* HW board version */
10674 	uint32_t log_pc;		/* log program counter */
10675 	uint32_t frame_ptr;		/* frame pointer */
10676 	uint32_t stack_ptr;		/* stack pointer */
10677 	uint32_t hcmd;		/* last host command header */
10678 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
10679 				 * rxtx_flag */
10680 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
10681 				 * host_flag */
10682 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
10683 				 * enc_flag */
10684 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
10685 				 * time_flag */
10686 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
10687 				 * wico interrupt */
10688 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
10689 	uint32_t wait_event;		/* wait event() caller address */
10690 	uint32_t l2p_control;	/* L2pControlField */
10691 	uint32_t l2p_duration;	/* L2pDurationField */
10692 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
10693 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
10694 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
10695 				 * (LMPM_PMG_SEL) */
10696 	uint32_t u_timestamp;	/* indicate when the date and time of the
10697 				 * compilation */
10698 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
10699 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
10700 
10701 /*
10702  * UMAC error struct - relevant starting from family 8000 chip.
10703  * Note: This structure is read from the device with IO accesses,
10704  * and the reading already does the endian conversion. As it is
10705  * read with u32-sized accesses, any members with a different size
10706  * need to be ordered correctly though!
10707  */
10708 struct iwm_umac_error_event_table {
10709 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10710 	uint32_t error_id;	/* type of error */
10711 	uint32_t blink1;	/* branch link */
10712 	uint32_t blink2;	/* branch link */
10713 	uint32_t ilink1;	/* interrupt link */
10714 	uint32_t ilink2;	/* interrupt link */
10715 	uint32_t data1;		/* error-specific data */
10716 	uint32_t data2;		/* error-specific data */
10717 	uint32_t data3;		/* error-specific data */
10718 	uint32_t umac_major;
10719 	uint32_t umac_minor;
10720 	uint32_t frame_pointer;	/* core register 27*/
10721 	uint32_t stack_pointer;	/* core register 28 */
10722 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
10723 	uint32_t nic_isr_pref;	/* ISR status register */
10724 } __packed;
10725 
10726 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
10727 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
10728 
10729 void
10730 iwm_nic_umac_error(struct iwm_softc *sc)
10731 {
10732 	struct iwm_umac_error_event_table table;
10733 	uint32_t base;
10734 
10735 	base = sc->sc_uc.uc_umac_error_event_table;
10736 
10737 	if (base < 0x800000) {
10738 		printf("%s: Invalid error log pointer 0x%08x\n",
10739 		    DEVNAME(sc), base);
10740 		return;
10741 	}
10742 
10743 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10744 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10745 		return;
10746 	}
10747 
10748 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10749 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10750 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10751 			sc->sc_flags, table.valid);
10752 	}
10753 
10754 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10755 		iwm_desc_lookup(table.error_id));
10756 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10757 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10758 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10759 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10760 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10761 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10762 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10763 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10764 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10765 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10766 	    table.frame_pointer);
10767 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10768 	    table.stack_pointer);
10769 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10770 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10771 	    table.nic_isr_pref);
10772 }
10773 
10774 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
10775 static struct {
10776 	const char *name;
10777 	uint8_t num;
10778 } advanced_lookup[] = {
10779 	{ "NMI_INTERRUPT_WDG", 0x34 },
10780 	{ "SYSASSERT", 0x35 },
10781 	{ "UCODE_VERSION_MISMATCH", 0x37 },
10782 	{ "BAD_COMMAND", 0x38 },
10783 	{ "BAD_COMMAND", 0x39 },
10784 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10785 	{ "FATAL_ERROR", 0x3D },
10786 	{ "NMI_TRM_HW_ERR", 0x46 },
10787 	{ "NMI_INTERRUPT_TRM", 0x4C },
10788 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10789 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10790 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10791 	{ "NMI_INTERRUPT_HOST", 0x66 },
10792 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10793 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10794 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10795 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
10796 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
10797 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10798 	{ "ADVANCED_SYSASSERT", 0 },
10799 };
10800 
10801 const char *
10802 iwm_desc_lookup(uint32_t num)
10803 {
10804 	int i;
10805 
10806 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
10807 		if (advanced_lookup[i].num ==
10808 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
10809 			return advanced_lookup[i].name;
10810 
10811 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10812 	return advanced_lookup[i].name;
10813 }
10814 
10815 /*
10816  * Support for dumping the error log seemed like a good idea ...
10817  * but it's mostly hex junk and the only sensible thing is the
10818  * hw/ucode revision (which we know anyway).  Since it's here,
10819  * I'll just leave it in, just in case e.g. the Intel guys want to
10820  * help us decipher some "ADVANCED_SYSASSERT" later.
10821  */
10822 void
10823 iwm_nic_error(struct iwm_softc *sc)
10824 {
10825 	struct iwm_error_event_table table;
10826 	uint32_t base;
10827 
10828 	printf("%s: dumping device error log\n", DEVNAME(sc));
10829 	base = sc->sc_uc.uc_error_event_table;
10830 	if (base < 0x800000) {
10831 		printf("%s: Invalid error log pointer 0x%08x\n",
10832 		    DEVNAME(sc), base);
10833 		return;
10834 	}
10835 
10836 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10837 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10838 		return;
10839 	}
10840 
10841 	if (!table.valid) {
10842 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10843 		return;
10844 	}
10845 
10846 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10847 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10848 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10849 		    sc->sc_flags, table.valid);
10850 	}
10851 
10852 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10853 	    iwm_desc_lookup(table.error_id));
10854 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10855 	    table.trm_hw_status0);
10856 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10857 	    table.trm_hw_status1);
10858 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10859 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10860 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10861 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10862 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10863 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10864 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10865 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10866 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10867 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10868 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10869 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10870 	    table.fw_rev_type);
10871 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10872 	    table.major);
10873 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10874 	    table.minor);
10875 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10876 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10877 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10878 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10879 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10880 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10881 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10882 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10883 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10884 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10885 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10886 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10887 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10888 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10889 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10890 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10891 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10892 
10893 	if (sc->sc_uc.uc_umac_error_event_table)
10894 		iwm_nic_umac_error(sc);
10895 }
10896 
10897 void
10898 iwm_dump_driver_status(struct iwm_softc *sc)
10899 {
10900 	int i;
10901 
10902 	printf("driver status:\n");
10903 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
10904 		struct iwm_tx_ring *ring = &sc->txq[i];
10905 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10906 		    "queued=%-3d\n",
10907 		    i, ring->qid, ring->cur, ring->queued);
10908 	}
10909 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10910 	printf("  802.11 state %s\n",
10911 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10912 }
10913 
10914 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10915 do {									\
10916 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10917 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10918 	_var_ = (void *)((_pkt_)+1);					\
10919 } while (/*CONSTCOND*/0)
10920 
10921 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10922 do {									\
10923 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10924 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10925 	_ptr_ = (void *)((_pkt_)+1);					\
10926 } while (/*CONSTCOND*/0)
10927 
10928 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10929 
10930 int
10931 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10932 {
10933 	int qid, idx, code;
10934 
10935 	qid = pkt->hdr.qid & ~0x80;
10936 	idx = pkt->hdr.idx;
10937 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10938 
10939 	return (!(qid == 0 && idx == 0 && code == 0) &&
10940 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
10941 }
10942 
10943 void
10944 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10945 {
10946 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10947 	struct iwm_rx_packet *pkt, *nextpkt;
10948 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10949 	struct mbuf *m0, *m;
10950 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10951 	int qid, idx, code, handled = 1;
10952 
10953 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
10954 	    BUS_DMASYNC_POSTREAD);
10955 
10956 	m0 = data->m;
10957 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
10958 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
10959 		qid = pkt->hdr.qid;
10960 		idx = pkt->hdr.idx;
10961 
10962 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10963 
10964 		if (!iwm_rx_pkt_valid(pkt))
10965 			break;
10966 
10967 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10968 		if (len < minsz || len > (IWM_RBUF_SIZE - offset))
10969 			break;
10970 
10971 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
10972 			/* Take mbuf m0 off the RX ring. */
10973 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
10974 				ifp->if_ierrors++;
10975 				break;
10976 			}
10977 			KASSERT(data->m != m0);
10978 		}
10979 
10980 		switch (code) {
10981 		case IWM_REPLY_RX_PHY_CMD:
10982 			iwm_rx_rx_phy_cmd(sc, pkt, data);
10983 			break;
10984 
10985 		case IWM_REPLY_RX_MPDU_CMD: {
10986 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
10987 			nextoff = offset +
10988 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
10989 			nextpkt = (struct iwm_rx_packet *)
10990 			    (m0->m_data + nextoff);
10991 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
10992 			    !iwm_rx_pkt_valid(nextpkt)) {
10993 				/* No need to copy last frame in buffer. */
10994 				if (offset > 0)
10995 					m_adj(m0, offset);
10996 				if (sc->sc_mqrx_supported)
10997 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
10998 					    maxlen, ml);
10999 				else
11000 					iwm_rx_mpdu(sc, m0, pkt->data,
11001 					    maxlen, ml);
11002 				m0 = NULL; /* stack owns m0 now; abort loop */
11003 			} else {
11004 				/*
11005 				 * Create an mbuf which points to the current
11006 				 * packet. Always copy from offset zero to
11007 				 * preserve m_pkthdr.
11008 				 */
11009 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
11010 				if (m == NULL) {
11011 					ifp->if_ierrors++;
11012 					m_freem(m0);
11013 					m0 = NULL;
11014 					break;
11015 				}
11016 				m_adj(m, offset);
11017 				if (sc->sc_mqrx_supported)
11018 					iwm_rx_mpdu_mq(sc, m, pkt->data,
11019 					    maxlen, ml);
11020 				else
11021 					iwm_rx_mpdu(sc, m, pkt->data,
11022 					    maxlen, ml);
11023 			}
11024  			break;
11025 		}
11026 
11027 		case IWM_TX_CMD:
11028 			iwm_rx_tx_cmd(sc, pkt, data);
11029 			break;
11030 
11031 		case IWM_BA_NOTIF:
11032 			iwm_rx_compressed_ba(sc, pkt);
11033 			break;
11034 
11035 		case IWM_MISSED_BEACONS_NOTIFICATION:
11036 			iwm_rx_bmiss(sc, pkt, data);
11037 			break;
11038 
11039 		case IWM_MFUART_LOAD_NOTIFICATION:
11040 			break;
11041 
11042 		case IWM_ALIVE: {
11043 			struct iwm_alive_resp_v1 *resp1;
11044 			struct iwm_alive_resp_v2 *resp2;
11045 			struct iwm_alive_resp_v3 *resp3;
11046 
11047 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
11048 				SYNC_RESP_STRUCT(resp1, pkt);
11049 				sc->sc_uc.uc_error_event_table
11050 				    = le32toh(resp1->error_event_table_ptr);
11051 				sc->sc_uc.uc_log_event_table
11052 				    = le32toh(resp1->log_event_table_ptr);
11053 				sc->sched_base = le32toh(resp1->scd_base_ptr);
11054 				if (resp1->status == IWM_ALIVE_STATUS_OK)
11055 					sc->sc_uc.uc_ok = 1;
11056 				else
11057 					sc->sc_uc.uc_ok = 0;
11058 			}
11059 
11060 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
11061 				SYNC_RESP_STRUCT(resp2, pkt);
11062 				sc->sc_uc.uc_error_event_table
11063 				    = le32toh(resp2->error_event_table_ptr);
11064 				sc->sc_uc.uc_log_event_table
11065 				    = le32toh(resp2->log_event_table_ptr);
11066 				sc->sched_base = le32toh(resp2->scd_base_ptr);
11067 				sc->sc_uc.uc_umac_error_event_table
11068 				    = le32toh(resp2->error_info_addr);
11069 				if (resp2->status == IWM_ALIVE_STATUS_OK)
11070 					sc->sc_uc.uc_ok = 1;
11071 				else
11072 					sc->sc_uc.uc_ok = 0;
11073 			}
11074 
11075 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
11076 				SYNC_RESP_STRUCT(resp3, pkt);
11077 				sc->sc_uc.uc_error_event_table
11078 				    = le32toh(resp3->error_event_table_ptr);
11079 				sc->sc_uc.uc_log_event_table
11080 				    = le32toh(resp3->log_event_table_ptr);
11081 				sc->sched_base = le32toh(resp3->scd_base_ptr);
11082 				sc->sc_uc.uc_umac_error_event_table
11083 				    = le32toh(resp3->error_info_addr);
11084 				if (resp3->status == IWM_ALIVE_STATUS_OK)
11085 					sc->sc_uc.uc_ok = 1;
11086 				else
11087 					sc->sc_uc.uc_ok = 0;
11088 			}
11089 
11090 			sc->sc_uc.uc_intr = 1;
11091 			wakeup(&sc->sc_uc);
11092 			break;
11093 		}
11094 
11095 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
11096 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
11097 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
11098 			iwm_phy_db_set_section(sc, phy_db_notif);
11099 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
11100 			wakeup(&sc->sc_init_complete);
11101 			break;
11102 		}
11103 
11104 		case IWM_STATISTICS_NOTIFICATION: {
11105 			struct iwm_notif_statistics *stats;
11106 			SYNC_RESP_STRUCT(stats, pkt);
11107 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
11108 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
11109 			break;
11110 		}
11111 
11112 		case IWM_MCC_CHUB_UPDATE_CMD: {
11113 			struct iwm_mcc_chub_notif *notif;
11114 			SYNC_RESP_STRUCT(notif, pkt);
11115 			iwm_mcc_update(sc, notif);
11116 			break;
11117 		}
11118 
11119 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
11120 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11121 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
11122 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11123 				 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
11124 			break;
11125 
11126 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11127 		    IWM_CT_KILL_NOTIFICATION): {
11128 			struct iwm_ct_kill_notif *notif;
11129 			SYNC_RESP_STRUCT(notif, pkt);
11130 			printf("%s: device at critical temperature (%u degC), "
11131 			    "stopping device\n",
11132 			    DEVNAME(sc), le16toh(notif->temperature));
11133 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11134 			task_add(systq, &sc->init_task);
11135 			break;
11136 		}
11137 
11138 		case IWM_ADD_STA_KEY:
11139 		case IWM_PHY_CONFIGURATION_CMD:
11140 		case IWM_TX_ANT_CONFIGURATION_CMD:
11141 		case IWM_ADD_STA:
11142 		case IWM_MAC_CONTEXT_CMD:
11143 		case IWM_REPLY_SF_CFG_CMD:
11144 		case IWM_POWER_TABLE_CMD:
11145 		case IWM_LTR_CONFIG:
11146 		case IWM_PHY_CONTEXT_CMD:
11147 		case IWM_BINDING_CONTEXT_CMD:
11148 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
11149 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
11150 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
11151 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
11152 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
11153 		case IWM_REPLY_BEACON_FILTERING_CMD:
11154 		case IWM_MAC_PM_POWER_TABLE:
11155 		case IWM_TIME_QUOTA_CMD:
11156 		case IWM_REMOVE_STA:
11157 		case IWM_TXPATH_FLUSH:
11158 		case IWM_LQ_CMD:
11159 		case IWM_WIDE_ID(IWM_LONG_GROUP,
11160 				 IWM_FW_PAGING_BLOCK_CMD):
11161 		case IWM_BT_CONFIG:
11162 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
11163 		case IWM_NVM_ACCESS_CMD:
11164 		case IWM_MCC_UPDATE_CMD:
11165 		case IWM_TIME_EVENT_CMD: {
11166 			size_t pkt_len;
11167 
11168 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
11169 				break;
11170 
11171 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
11172 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11173 
11174 			pkt_len = sizeof(pkt->len_n_flags) +
11175 			    iwm_rx_packet_len(pkt);
11176 
11177 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
11178 			    pkt_len < sizeof(*pkt) ||
11179 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
11180 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11181 				    sc->sc_cmd_resp_len[idx]);
11182 				sc->sc_cmd_resp_pkt[idx] = NULL;
11183 				break;
11184 			}
11185 
11186 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
11187 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11188 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11189 			break;
11190 		}
11191 
11192 		/* ignore */
11193 		case IWM_PHY_DB_CMD:
11194 			break;
11195 
11196 		case IWM_INIT_COMPLETE_NOTIF:
11197 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
11198 			wakeup(&sc->sc_init_complete);
11199 			break;
11200 
11201 		case IWM_SCAN_OFFLOAD_COMPLETE: {
11202 			struct iwm_periodic_scan_complete *notif;
11203 			SYNC_RESP_STRUCT(notif, pkt);
11204 			break;
11205 		}
11206 
11207 		case IWM_SCAN_ITERATION_COMPLETE: {
11208 			struct iwm_lmac_scan_complete_notif *notif;
11209 			SYNC_RESP_STRUCT(notif, pkt);
11210 			iwm_endscan(sc);
11211 			break;
11212 		}
11213 
11214 		case IWM_SCAN_COMPLETE_UMAC: {
11215 			struct iwm_umac_scan_complete *notif;
11216 			SYNC_RESP_STRUCT(notif, pkt);
11217 			iwm_endscan(sc);
11218 			break;
11219 		}
11220 
11221 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
11222 			struct iwm_umac_scan_iter_complete_notif *notif;
11223 			SYNC_RESP_STRUCT(notif, pkt);
11224 			iwm_endscan(sc);
11225 			break;
11226 		}
11227 
11228 		case IWM_REPLY_ERROR: {
11229 			struct iwm_error_resp *resp;
11230 			SYNC_RESP_STRUCT(resp, pkt);
11231 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
11232 				DEVNAME(sc), le32toh(resp->error_type),
11233 				resp->cmd_id);
11234 			break;
11235 		}
11236 
11237 		case IWM_TIME_EVENT_NOTIFICATION: {
11238 			struct iwm_time_event_notif *notif;
11239 			uint32_t action;
11240 			SYNC_RESP_STRUCT(notif, pkt);
11241 
11242 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
11243 				break;
11244 			action = le32toh(notif->action);
11245 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
11246 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
11247 			break;
11248 		}
11249 
11250 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
11251 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
11252 		    break;
11253 
11254 		/*
11255 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
11256 		 * messages. Just ignore them for now.
11257 		 */
11258 		case IWM_DEBUG_LOG_MSG:
11259 			break;
11260 
11261 		case IWM_MCAST_FILTER_CMD:
11262 			break;
11263 
11264 		case IWM_SCD_QUEUE_CFG: {
11265 			struct iwm_scd_txq_cfg_rsp *rsp;
11266 			SYNC_RESP_STRUCT(rsp, pkt);
11267 
11268 			break;
11269 		}
11270 
11271 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
11272 			break;
11273 
11274 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
11275 			break;
11276 
11277 		default:
11278 			handled = 0;
11279 			printf("%s: unhandled firmware response 0x%x/0x%x "
11280 			    "rx ring %d[%d]\n",
11281 			    DEVNAME(sc), code, pkt->len_n_flags,
11282 			    (qid & ~0x80), idx);
11283 			break;
11284 		}
11285 
11286 		/*
11287 		 * uCode sets bit 0x80 when it originates the notification,
11288 		 * i.e. when the notification is not a direct response to a
11289 		 * command sent by the driver.
11290 		 * For example, uCode issues IWM_REPLY_RX when it sends a
11291 		 * received frame to the driver.
11292 		 */
11293 		if (handled && !(qid & (1 << 7))) {
11294 			iwm_cmd_done(sc, qid, idx, code);
11295 		}
11296 
11297 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11298 	}
11299 
11300 	if (m0 && m0 != data->m)
11301 		m_freem(m0);
11302 }
11303 
11304 void
11305 iwm_notif_intr(struct iwm_softc *sc)
11306 {
11307 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
11308 	uint32_t wreg;
11309 	uint16_t hw;
11310 	int count;
11311 
11312 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
11313 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
11314 
11315 	if (sc->sc_mqrx_supported) {
11316 		count = IWM_RX_MQ_RING_COUNT;
11317 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
11318 	} else {
11319 		count = IWM_RX_RING_COUNT;
11320 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
11321 	}
11322 
11323 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
11324 	hw &= (count - 1);
11325 	while (sc->rxq.cur != hw) {
11326 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11327 		iwm_rx_pkt(sc, data, &ml);
11328 		ADVANCE_RXQ(sc);
11329 	}
11330 	if_input(&sc->sc_ic.ic_if, &ml);
11331 
11332 	/*
11333 	 * Tell the firmware what we have processed.
11334 	 * Seems like the hardware gets upset unless we align the write by 8??
11335 	 */
11336 	hw = (hw == 0) ? count - 1 : hw - 1;
11337 	IWM_WRITE(sc, wreg, hw & ~7);
11338 }
11339 
11340 int
11341 iwm_intr(void *arg)
11342 {
11343 	struct iwm_softc *sc = arg;
11344 	struct ieee80211com *ic = &sc->sc_ic;
11345 	struct ifnet *ifp = IC2IFP(ic);
11346 	int handled = 0;
11347 	int rv = 0;
11348 	uint32_t r1, r2;
11349 
11350 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
11351 
11352 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
11353 		uint32_t *ict = sc->ict_dma.vaddr;
11354 		int tmp;
11355 
11356 		tmp = htole32(ict[sc->ict_cur]);
11357 		if (!tmp)
11358 			goto out_ena;
11359 
11360 		/*
11361 		 * ok, there was something.  keep plowing until we have all.
11362 		 */
11363 		r1 = r2 = 0;
11364 		while (tmp) {
11365 			r1 |= tmp;
11366 			ict[sc->ict_cur] = 0;
11367 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
11368 			tmp = htole32(ict[sc->ict_cur]);
11369 		}
11370 
11371 		/* this is where the fun begins.  don't ask */
11372 		if (r1 == 0xffffffff)
11373 			r1 = 0;
11374 
11375 		/*
11376 		 * Workaround for hardware bug where bits are falsely cleared
11377 		 * when using interrupt coalescing.  Bit 15 should be set if
11378 		 * bits 18 and 19 are set.
11379 		 */
11380 		if (r1 & 0xc0000)
11381 			r1 |= 0x8000;
11382 
11383 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11384 	} else {
11385 		r1 = IWM_READ(sc, IWM_CSR_INT);
11386 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
11387 	}
11388 	if (r1 == 0 && r2 == 0) {
11389 		goto out_ena;
11390 	}
11391 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11392 		goto out;
11393 
11394 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
11395 
11396 	/* ignored */
11397 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
11398 
11399 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
11400 		handled |= IWM_CSR_INT_BIT_RF_KILL;
11401 		iwm_check_rfkill(sc);
11402 		task_add(systq, &sc->init_task);
11403 		rv = 1;
11404 		goto out_ena;
11405 	}
11406 
11407 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
11408 		if (ifp->if_flags & IFF_DEBUG) {
11409 			iwm_nic_error(sc);
11410 			iwm_dump_driver_status(sc);
11411 		}
11412 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11413 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11414 			task_add(systq, &sc->init_task);
11415 		rv = 1;
11416 		goto out;
11417 
11418 	}
11419 
11420 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
11421 		handled |= IWM_CSR_INT_BIT_HW_ERR;
11422 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11423 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11424 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11425 			task_add(systq, &sc->init_task);
11426 		}
11427 		rv = 1;
11428 		goto out;
11429 	}
11430 
11431 	/* firmware chunk loaded */
11432 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
11433 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
11434 		handled |= IWM_CSR_INT_BIT_FH_TX;
11435 
11436 		sc->sc_fw_chunk_done = 1;
11437 		wakeup(&sc->sc_fw);
11438 	}
11439 
11440 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
11441 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
11442 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
11443 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
11444 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
11445 		}
11446 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
11447 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
11448 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
11449 		}
11450 
11451 		/* Disable periodic interrupt; we use it as just a one-shot. */
11452 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
11453 
11454 		/*
11455 		 * Enable periodic interrupt in 8 msec only if we received
11456 		 * real RX interrupt (instead of just periodic int), to catch
11457 		 * any dangling Rx interrupt.  If it was just the periodic
11458 		 * interrupt, there was no dangling Rx activity, and no need
11459 		 * to extend the periodic interrupt; one-shot is enough.
11460 		 */
11461 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
11462 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
11463 			    IWM_CSR_INT_PERIODIC_ENA);
11464 
11465 		iwm_notif_intr(sc);
11466 	}
11467 
11468 	rv = 1;
11469 
11470  out_ena:
11471 	iwm_restore_interrupts(sc);
11472  out:
11473 	return rv;
11474 }
11475 
11476 int
11477 iwm_intr_msix(void *arg)
11478 {
11479 	struct iwm_softc *sc = arg;
11480 	struct ieee80211com *ic = &sc->sc_ic;
11481 	struct ifnet *ifp = IC2IFP(ic);
11482 	uint32_t inta_fh, inta_hw;
11483 	int vector = 0;
11484 
11485 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11486 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11487 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11488 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11489 	inta_fh &= sc->sc_fh_mask;
11490 	inta_hw &= sc->sc_hw_mask;
11491 
11492 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11493 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11494 		iwm_notif_intr(sc);
11495 	}
11496 
11497 	/* firmware chunk loaded */
11498 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11499 		sc->sc_fw_chunk_done = 1;
11500 		wakeup(&sc->sc_fw);
11501 	}
11502 
11503 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11504 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11505 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11506 		if (ifp->if_flags & IFF_DEBUG) {
11507 			iwm_nic_error(sc);
11508 			iwm_dump_driver_status(sc);
11509 		}
11510 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11511 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11512 			task_add(systq, &sc->init_task);
11513 		return 1;
11514 	}
11515 
11516 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11517 		iwm_check_rfkill(sc);
11518 		task_add(systq, &sc->init_task);
11519 	}
11520 
11521 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11522 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11523 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11524 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11525 			task_add(systq, &sc->init_task);
11526 		}
11527 		return 1;
11528 	}
11529 
11530 	/*
11531 	 * Before sending the interrupt the HW disables it to prevent
11532 	 * a nested interrupt. This is done by writing 1 to the corresponding
11533 	 * bit in the mask register. After handling the interrupt, it should be
11534 	 * re-enabled by clearing this bit. This register is defined as
11535 	 * write 1 clear (W1C) register, meaning that it's being clear
11536 	 * by writing 1 to the bit.
11537 	 */
11538 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11539 	return 1;
11540 }
11541 
11542 typedef void *iwm_match_t;
11543 
11544 static const struct pci_matchid iwm_devices[] = {
11545 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
11546 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
11547 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
11548 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
11549 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
11550 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
11551 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
11552 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
11553 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
11554 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
11555 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
11556 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
11557 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
11558 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
11559 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
11560 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_3 },
11561 };
11562 
11563 int
11564 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
11565 {
11566 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11567 	    nitems(iwm_devices));
11568 }
11569 
11570 int
11571 iwm_preinit(struct iwm_softc *sc)
11572 {
11573 	struct ieee80211com *ic = &sc->sc_ic;
11574 	struct ifnet *ifp = IC2IFP(ic);
11575 	int err;
11576 
11577 	err = iwm_prepare_card_hw(sc);
11578 	if (err) {
11579 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11580 		return err;
11581 	}
11582 
11583 	if (sc->attached) {
11584 		/* Update MAC in case the upper layers changed it. */
11585 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11586 		    ((struct arpcom *)ifp)->ac_enaddr);
11587 		return 0;
11588 	}
11589 
11590 	err = iwm_start_hw(sc);
11591 	if (err) {
11592 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11593 		return err;
11594 	}
11595 
11596 	err = iwm_run_init_mvm_ucode(sc, 1);
11597 	iwm_stop_device(sc);
11598 	if (err)
11599 		return err;
11600 
11601 	/* Print version info and MAC address on first successful fw load. */
11602 	sc->attached = 1;
11603 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11604 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11605 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11606 
11607 	if (sc->sc_nvm.sku_cap_11n_enable)
11608 		iwm_setup_ht_rates(sc);
11609 
11610 	/* not all hardware can do 5GHz band */
11611 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11612 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11613 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11614 
11615 	/* Configure channel information obtained from firmware. */
11616 	ieee80211_channel_init(ifp);
11617 
11618 	/* Configure MAC address. */
11619 	err = if_setlladdr(ifp, ic->ic_myaddr);
11620 	if (err)
11621 		printf("%s: could not set MAC address (error %d)\n",
11622 		    DEVNAME(sc), err);
11623 
11624 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11625 
11626 	return 0;
11627 }
11628 
11629 void
11630 iwm_attach_hook(struct device *self)
11631 {
11632 	struct iwm_softc *sc = (void *)self;
11633 
11634 	KASSERT(!cold);
11635 
11636 	iwm_preinit(sc);
11637 }
11638 
11639 void
11640 iwm_attach(struct device *parent, struct device *self, void *aux)
11641 {
11642 	struct iwm_softc *sc = (void *)self;
11643 	struct pci_attach_args *pa = aux;
11644 	pci_intr_handle_t ih;
11645 	pcireg_t reg, memtype;
11646 	struct ieee80211com *ic = &sc->sc_ic;
11647 	struct ifnet *ifp = &ic->ic_if;
11648 	const char *intrstr;
11649 	int err;
11650 	int txq_i, i, j;
11651 
11652 	sc->sc_pct = pa->pa_pc;
11653 	sc->sc_pcitag = pa->pa_tag;
11654 	sc->sc_dmat = pa->pa_dmat;
11655 
11656 	rw_init(&sc->ioctl_rwl, "iwmioctl");
11657 
11658 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11659 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11660 	if (err == 0) {
11661 		printf("%s: PCIe capability structure not found!\n",
11662 		    DEVNAME(sc));
11663 		return;
11664 	}
11665 
11666 	/*
11667 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11668 	 * PCI Tx retries from interfering with C3 CPU state.
11669 	 */
11670 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11671 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11672 
11673 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11674 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11675 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11676 	if (err) {
11677 		printf("%s: can't map mem space\n", DEVNAME(sc));
11678 		return;
11679 	}
11680 
11681 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11682 		sc->sc_msix = 1;
11683 	} else if (pci_intr_map_msi(pa, &ih)) {
11684 		if (pci_intr_map(pa, &ih)) {
11685 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11686 			return;
11687 		}
11688 		/* Hardware bug workaround. */
11689 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11690 		    PCI_COMMAND_STATUS_REG);
11691 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11692 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11693 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11694 		    PCI_COMMAND_STATUS_REG, reg);
11695 	}
11696 
11697 	intrstr = pci_intr_string(sc->sc_pct, ih);
11698 	if (sc->sc_msix)
11699 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11700 		    iwm_intr_msix, sc, DEVNAME(sc));
11701 	else
11702 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11703 		    iwm_intr, sc, DEVNAME(sc));
11704 
11705 	if (sc->sc_ih == NULL) {
11706 		printf("\n");
11707 		printf("%s: can't establish interrupt", DEVNAME(sc));
11708 		if (intrstr != NULL)
11709 			printf(" at %s", intrstr);
11710 		printf("\n");
11711 		return;
11712 	}
11713 	printf(", %s\n", intrstr);
11714 
11715 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11716 	switch (PCI_PRODUCT(pa->pa_id)) {
11717 	case PCI_PRODUCT_INTEL_WL_3160_1:
11718 	case PCI_PRODUCT_INTEL_WL_3160_2:
11719 		sc->sc_fwname = "iwm-3160-17";
11720 		sc->host_interrupt_operation_mode = 1;
11721 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11722 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11723 		sc->sc_nvm_max_section_size = 16384;
11724 		sc->nvm_type = IWM_NVM;
11725 		break;
11726 	case PCI_PRODUCT_INTEL_WL_3165_1:
11727 	case PCI_PRODUCT_INTEL_WL_3165_2:
11728 		sc->sc_fwname = "iwm-7265D-29";
11729 		sc->host_interrupt_operation_mode = 0;
11730 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11731 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11732 		sc->sc_nvm_max_section_size = 16384;
11733 		sc->nvm_type = IWM_NVM;
11734 		break;
11735 	case PCI_PRODUCT_INTEL_WL_3168_1:
11736 		sc->sc_fwname = "iwm-3168-29";
11737 		sc->host_interrupt_operation_mode = 0;
11738 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11739 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11740 		sc->sc_nvm_max_section_size = 16384;
11741 		sc->nvm_type = IWM_NVM_SDP;
11742 		break;
11743 	case PCI_PRODUCT_INTEL_WL_7260_1:
11744 	case PCI_PRODUCT_INTEL_WL_7260_2:
11745 		sc->sc_fwname = "iwm-7260-17";
11746 		sc->host_interrupt_operation_mode = 1;
11747 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11748 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11749 		sc->sc_nvm_max_section_size = 16384;
11750 		sc->nvm_type = IWM_NVM;
11751 		break;
11752 	case PCI_PRODUCT_INTEL_WL_7265_1:
11753 	case PCI_PRODUCT_INTEL_WL_7265_2:
11754 		sc->sc_fwname = "iwm-7265-17";
11755 		sc->host_interrupt_operation_mode = 0;
11756 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11757 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11758 		sc->sc_nvm_max_section_size = 16384;
11759 		sc->nvm_type = IWM_NVM;
11760 		break;
11761 	case PCI_PRODUCT_INTEL_WL_8260_1:
11762 	case PCI_PRODUCT_INTEL_WL_8260_2:
11763 		sc->sc_fwname = "iwm-8000C-36";
11764 		sc->host_interrupt_operation_mode = 0;
11765 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11766 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11767 		sc->sc_nvm_max_section_size = 32768;
11768 		sc->nvm_type = IWM_NVM_EXT;
11769 		break;
11770 	case PCI_PRODUCT_INTEL_WL_8265_1:
11771 		sc->sc_fwname = "iwm-8265-36";
11772 		sc->host_interrupt_operation_mode = 0;
11773 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11774 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11775 		sc->sc_nvm_max_section_size = 32768;
11776 		sc->nvm_type = IWM_NVM_EXT;
11777 		break;
11778 	case PCI_PRODUCT_INTEL_WL_9260_1:
11779 		sc->sc_fwname = "iwm-9260-46";
11780 		sc->host_interrupt_operation_mode = 0;
11781 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11782 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11783 		sc->sc_nvm_max_section_size = 32768;
11784 		sc->sc_mqrx_supported = 1;
11785 		break;
11786 	case PCI_PRODUCT_INTEL_WL_9560_1:
11787 	case PCI_PRODUCT_INTEL_WL_9560_2:
11788 	case PCI_PRODUCT_INTEL_WL_9560_3:
11789 		sc->sc_fwname = "iwm-9000-46";
11790 		sc->host_interrupt_operation_mode = 0;
11791 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11792 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11793 		sc->sc_nvm_max_section_size = 32768;
11794 		sc->sc_mqrx_supported = 1;
11795 		sc->sc_integrated = 1;
11796 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_WL_9560_3) {
11797 			sc->sc_xtal_latency = 670;
11798 			sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK;
11799 		} else
11800 			sc->sc_xtal_latency = 650;
11801 		break;
11802 	default:
11803 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11804 		return;
11805 	}
11806 
11807 	/*
11808 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11809 	 * changed, and now the revision step also includes bit 0-1 (no more
11810 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11811 	 * in the old format.
11812 	 */
11813 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11814 		uint32_t hw_step;
11815 
11816 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11817 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11818 
11819 		if (iwm_prepare_card_hw(sc) != 0) {
11820 			printf("%s: could not initialize hardware\n",
11821 			    DEVNAME(sc));
11822 			return;
11823 		}
11824 
11825 		/*
11826 		 * In order to recognize C step the driver should read the
11827 		 * chip version id located at the AUX bus MISC address.
11828 		 */
11829 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11830 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
11831 		DELAY(2);
11832 
11833 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11834 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11835 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11836 				   25000);
11837 		if (!err) {
11838 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11839 			return;
11840 		}
11841 
11842 		if (iwm_nic_lock(sc)) {
11843 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11844 			hw_step |= IWM_ENABLE_WFPM;
11845 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11846 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11847 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
11848 			if (hw_step == 0x3)
11849 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11850 						(IWM_SILICON_C_STEP << 2);
11851 			iwm_nic_unlock(sc);
11852 		} else {
11853 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11854 			return;
11855 		}
11856 	}
11857 
11858 	/*
11859 	 * Allocate DMA memory for firmware transfers.
11860 	 * Must be aligned on a 16-byte boundary.
11861 	 */
11862 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11863 	    sc->sc_fwdmasegsz, 16);
11864 	if (err) {
11865 		printf("%s: could not allocate memory for firmware\n",
11866 		    DEVNAME(sc));
11867 		return;
11868 	}
11869 
11870 	/* Allocate "Keep Warm" page, used internally by the card. */
11871 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11872 	if (err) {
11873 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11874 		goto fail1;
11875 	}
11876 
11877 	/* Allocate interrupt cause table (ICT).*/
11878 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11879 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
11880 	if (err) {
11881 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11882 		goto fail2;
11883 	}
11884 
11885 	/* TX scheduler rings must be aligned on a 1KB boundary. */
11886 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11887 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11888 	if (err) {
11889 		printf("%s: could not allocate TX scheduler rings\n",
11890 		    DEVNAME(sc));
11891 		goto fail3;
11892 	}
11893 
11894 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11895 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11896 		if (err) {
11897 			printf("%s: could not allocate TX ring %d\n",
11898 			    DEVNAME(sc), txq_i);
11899 			goto fail4;
11900 		}
11901 	}
11902 
11903 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
11904 	if (err) {
11905 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11906 		goto fail4;
11907 	}
11908 
11909 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
11910 	if (sc->sc_nswq == NULL)
11911 		goto fail4;
11912 
11913 	/* Clear pending interrupts. */
11914 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
11915 
11916 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11917 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11918 	ic->ic_state = IEEE80211_S_INIT;
11919 
11920 	/* Set device capabilities. */
11921 	ic->ic_caps =
11922 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11923 	    IEEE80211_C_WEP |		/* WEP */
11924 	    IEEE80211_C_RSN |		/* WPA/RSN */
11925 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11926 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11927 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11928 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11929 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11930 
11931 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11932 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11933 	ic->ic_htcaps |=
11934 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11935 	ic->ic_htxcaps = 0;
11936 	ic->ic_txbfcaps = 0;
11937 	ic->ic_aselcaps = 0;
11938 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11939 
11940 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11941 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11942 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11943 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11944 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11945 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11946 
11947 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11948 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11949 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11950 
11951 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11952 		sc->sc_phyctxt[i].id = i;
11953 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11954 		sc->sc_phyctxt[i].vht_chan_width =
11955 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11956 	}
11957 
11958 	sc->sc_amrr.amrr_min_success_threshold =  1;
11959 	sc->sc_amrr.amrr_max_success_threshold = 15;
11960 
11961 	/* IBSS channel undefined for now. */
11962 	ic->ic_ibss_chan = &ic->ic_channels[1];
11963 
11964 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
11965 
11966 	ifp->if_softc = sc;
11967 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11968 	ifp->if_ioctl = iwm_ioctl;
11969 	ifp->if_start = iwm_start;
11970 	ifp->if_watchdog = iwm_watchdog;
11971 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11972 
11973 	if_attach(ifp);
11974 	ieee80211_ifattach(ifp);
11975 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11976 
11977 #if NBPFILTER > 0
11978 	iwm_radiotap_attach(sc);
11979 #endif
11980 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
11981 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
11982 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11983 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
11984 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
11985 		rxba->sc = sc;
11986 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
11987 		    rxba);
11988 		timeout_set(&rxba->reorder_buf.reorder_timer,
11989 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
11990 		for (j = 0; j < nitems(rxba->entries); j++)
11991 			ml_init(&rxba->entries[j].frames);
11992 	}
11993 	task_set(&sc->init_task, iwm_init_task, sc);
11994 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
11995 	task_set(&sc->ba_task, iwm_ba_task, sc);
11996 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
11997 	task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
11998 	task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
11999 
12000 	ic->ic_node_alloc = iwm_node_alloc;
12001 	ic->ic_bgscan_start = iwm_bgscan;
12002 	ic->ic_bgscan_done = iwm_bgscan_done;
12003 	ic->ic_set_key = iwm_set_key;
12004 	ic->ic_delete_key = iwm_delete_key;
12005 
12006 	/* Override 802.11 state transition machine. */
12007 	sc->sc_newstate = ic->ic_newstate;
12008 	ic->ic_newstate = iwm_newstate;
12009 	ic->ic_updateprot = iwm_updateprot;
12010 	ic->ic_updateslot = iwm_updateslot;
12011 	ic->ic_updateedca = iwm_updateedca;
12012 	ic->ic_updatechan = iwm_updatechan;
12013 	ic->ic_updatedtim = iwm_updatedtim;
12014 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
12015 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
12016 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
12017 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
12018 	/*
12019 	 * We cannot read the MAC address without loading the
12020 	 * firmware from disk. Postpone until mountroot is done.
12021 	 */
12022 	config_mountroot(self, iwm_attach_hook);
12023 
12024 	return;
12025 
12026 fail4:	while (--txq_i >= 0)
12027 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12028 	iwm_free_rx_ring(sc, &sc->rxq);
12029 	iwm_dma_contig_free(&sc->sched_dma);
12030 fail3:	if (sc->ict_dma.vaddr != NULL)
12031 		iwm_dma_contig_free(&sc->ict_dma);
12032 
12033 fail2:	iwm_dma_contig_free(&sc->kw_dma);
12034 fail1:	iwm_dma_contig_free(&sc->fw_dma);
12035 	return;
12036 }
12037 
12038 #if NBPFILTER > 0
12039 void
12040 iwm_radiotap_attach(struct iwm_softc *sc)
12041 {
12042 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
12043 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
12044 
12045 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12046 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
12047 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
12048 
12049 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
12050 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
12051 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
12052 }
12053 #endif
12054 
12055 void
12056 iwm_init_task(void *arg1)
12057 {
12058 	struct iwm_softc *sc = arg1;
12059 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12060 	int s = splnet();
12061 	int generation = sc->sc_generation;
12062 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
12063 
12064 	rw_enter_write(&sc->ioctl_rwl);
12065 	if (generation != sc->sc_generation) {
12066 		rw_exit(&sc->ioctl_rwl);
12067 		splx(s);
12068 		return;
12069 	}
12070 
12071 	if (ifp->if_flags & IFF_RUNNING)
12072 		iwm_stop(ifp);
12073 	else
12074 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
12075 
12076 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
12077 		iwm_init(ifp);
12078 
12079 	rw_exit(&sc->ioctl_rwl);
12080 	splx(s);
12081 }
12082 
12083 void
12084 iwm_resume(struct iwm_softc *sc)
12085 {
12086 	pcireg_t reg;
12087 
12088 	/*
12089 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
12090 	 * PCI Tx retries from interfering with C3 CPU state.
12091 	 */
12092 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12093 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12094 
12095 	if (!sc->sc_msix) {
12096 		/* Hardware bug workaround. */
12097 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12098 		    PCI_COMMAND_STATUS_REG);
12099 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
12100 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
12101 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12102 		    PCI_COMMAND_STATUS_REG, reg);
12103 	}
12104 
12105 	iwm_disable_interrupts(sc);
12106 }
12107 
12108 int
12109 iwm_wakeup(struct iwm_softc *sc)
12110 {
12111 	struct ieee80211com *ic = &sc->sc_ic;
12112 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12113 	int err;
12114 
12115 	err = iwm_start_hw(sc);
12116 	if (err)
12117 		return err;
12118 
12119 	err = iwm_init_hw(sc);
12120 	if (err)
12121 		return err;
12122 
12123 	refcnt_init(&sc->task_refs);
12124 	ifq_clr_oactive(&ifp->if_snd);
12125 	ifp->if_flags |= IFF_RUNNING;
12126 
12127 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
12128 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
12129 	else
12130 		ieee80211_begin_scan(ifp);
12131 
12132 	return 0;
12133 }
12134 
12135 int
12136 iwm_activate(struct device *self, int act)
12137 {
12138 	struct iwm_softc *sc = (struct iwm_softc *)self;
12139 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12140 	int err = 0;
12141 
12142 	switch (act) {
12143 	case DVACT_QUIESCE:
12144 		if (ifp->if_flags & IFF_RUNNING) {
12145 			rw_enter_write(&sc->ioctl_rwl);
12146 			iwm_stop(ifp);
12147 			rw_exit(&sc->ioctl_rwl);
12148 		}
12149 		break;
12150 	case DVACT_RESUME:
12151 		iwm_resume(sc);
12152 		break;
12153 	case DVACT_WAKEUP:
12154 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
12155 			err = iwm_wakeup(sc);
12156 			if (err)
12157 				printf("%s: could not initialize hardware\n",
12158 				    DEVNAME(sc));
12159 		}
12160 		break;
12161 	}
12162 
12163 	return 0;
12164 }
12165 
12166 struct cfdriver iwm_cd = {
12167 	NULL, "iwm", DV_IFNET
12168 };
12169 
12170 const struct cfattach iwm_ca = {
12171 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
12172 	NULL, iwm_activate
12173 };
12174