xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_iwm.c,v 1.300 2020/02/29 09:41:58 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_mira.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
149 
150 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
151 
152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154 
155 #ifdef IWM_DEBUG
156 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
157 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
158 int iwm_debug = 1;
159 #else
160 #define DPRINTF(x)	do { ; } while (0)
161 #define DPRINTFN(n, x)	do { ; } while (0)
162 #endif
163 
164 #include <dev/pci/if_iwmreg.h>
165 #include <dev/pci/if_iwmvar.h>
166 
167 const uint8_t iwm_nvm_channels[] = {
168 	/* 2.4 GHz */
169 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 	/* 5 GHz */
171 	36, 40, 44 , 48, 52, 56, 60, 64,
172 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 	149, 153, 157, 161, 165
174 };
175 
176 const uint8_t iwm_nvm_channels_8000[] = {
177 	/* 2.4 GHz */
178 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179 	/* 5 GHz */
180 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
181 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182 	149, 153, 157, 161, 165, 169, 173, 177, 181
183 };
184 
185 #define IWM_NUM_2GHZ_CHANNELS	14
186 
187 const struct iwm_rate {
188 	uint16_t rate;
189 	uint8_t plcp;
190 	uint8_t ht_plcp;
191 } iwm_rates[] = {
192 		/* Legacy */		/* HT */
193 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
198 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
200 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
201 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
202 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
203 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
204 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
205 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
206 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
207 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
208 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
209 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
210 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
211 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
212 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
213 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
214 };
215 #define IWM_RIDX_CCK	0
216 #define IWM_RIDX_OFDM	4
217 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
218 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
219 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
220 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
221 
222 /* Convert an MCS index into an iwm_rates[] index. */
223 const int iwm_mcs2ridx[] = {
224 	IWM_RATE_MCS_0_INDEX,
225 	IWM_RATE_MCS_1_INDEX,
226 	IWM_RATE_MCS_2_INDEX,
227 	IWM_RATE_MCS_3_INDEX,
228 	IWM_RATE_MCS_4_INDEX,
229 	IWM_RATE_MCS_5_INDEX,
230 	IWM_RATE_MCS_6_INDEX,
231 	IWM_RATE_MCS_7_INDEX,
232 	IWM_RATE_MCS_8_INDEX,
233 	IWM_RATE_MCS_9_INDEX,
234 	IWM_RATE_MCS_10_INDEX,
235 	IWM_RATE_MCS_11_INDEX,
236 	IWM_RATE_MCS_12_INDEX,
237 	IWM_RATE_MCS_13_INDEX,
238 	IWM_RATE_MCS_14_INDEX,
239 	IWM_RATE_MCS_15_INDEX,
240 };
241 
242 struct iwm_nvm_section {
243 	uint16_t length;
244 	uint8_t *data;
245 };
246 
247 int	iwm_is_mimo_ht_plcp(uint8_t);
248 int	iwm_is_mimo_mcs(int);
249 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
250 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
251 	    uint8_t *, size_t);
252 int	iwm_set_default_calib(struct iwm_softc *, const void *);
253 void	iwm_fw_info_free(struct iwm_fw_info *);
254 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
255 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
256 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
257 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
258 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
259 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
260 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
261 int	iwm_nic_lock(struct iwm_softc *);
262 void	iwm_nic_assert_locked(struct iwm_softc *);
263 void	iwm_nic_unlock(struct iwm_softc *);
264 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
267 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
268 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwm_dma_contig_free(struct iwm_dma_info *);
271 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 void	iwm_disable_rx_dma(struct iwm_softc *);
273 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
276 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 void	iwm_enable_rfkill_int(struct iwm_softc *);
279 int	iwm_check_rfkill(struct iwm_softc *);
280 void	iwm_enable_interrupts(struct iwm_softc *);
281 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
282 void	iwm_restore_interrupts(struct iwm_softc *);
283 void	iwm_disable_interrupts(struct iwm_softc *);
284 void	iwm_ict_reset(struct iwm_softc *);
285 int	iwm_set_hw_ready(struct iwm_softc *);
286 int	iwm_prepare_card_hw(struct iwm_softc *);
287 void	iwm_apm_config(struct iwm_softc *);
288 int	iwm_apm_init(struct iwm_softc *);
289 void	iwm_apm_stop(struct iwm_softc *);
290 int	iwm_allow_mcast(struct iwm_softc *);
291 void	iwm_init_msix_hw(struct iwm_softc *);
292 void	iwm_conf_msix_hw(struct iwm_softc *, int);
293 int	iwm_start_hw(struct iwm_softc *);
294 void	iwm_stop_device(struct iwm_softc *);
295 void	iwm_nic_config(struct iwm_softc *);
296 int	iwm_nic_rx_init(struct iwm_softc *);
297 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
298 int	iwm_nic_rx_mq_init(struct iwm_softc *);
299 int	iwm_nic_tx_init(struct iwm_softc *);
300 int	iwm_nic_init(struct iwm_softc *);
301 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
302 int	iwm_enable_txq(struct iwm_softc *, int, int, int);
303 int	iwm_post_alive(struct iwm_softc *);
304 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
305 	    uint16_t);
306 int	iwm_phy_db_set_section(struct iwm_softc *,
307 	    struct iwm_calib_res_notif_phy_db *);
308 int	iwm_is_valid_channel(uint16_t);
309 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
310 uint16_t iwm_channel_id_to_papd(uint16_t);
311 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
312 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
313 	    uint16_t *, uint16_t);
314 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
315 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
316 	    uint8_t);
317 int	iwm_send_phy_db_data(struct iwm_softc *);
318 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
319 	    uint32_t);
320 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
321 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
322 	    uint8_t *, uint16_t *);
323 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
324 	    uint16_t *, size_t);
325 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
326 	    const uint8_t *nvm_channels, int nchan);
327 void	iwm_setup_ht_rates(struct iwm_softc *);
328 void	iwm_htprot_task(void *);
329 void	iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
330 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
331 	    uint8_t);
332 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
333 	    uint8_t);
334 void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
335 	    uint16_t, uint16_t, int);
336 #ifdef notyet
337 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
338 	    uint8_t);
339 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
340 	    uint8_t);
341 #endif
342 void	iwm_ba_task(void *);
343 
344 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
345 	    const uint16_t *, const uint16_t *,
346 	    const uint16_t *, const uint16_t *,
347 	    const uint16_t *, int);
348 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
349 	    const uint16_t *, const uint16_t *);
350 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
351 int	iwm_nvm_init(struct iwm_softc *);
352 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
353 	    uint32_t);
354 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
355 	    uint32_t);
356 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
357 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
358 	    int , int *);
359 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
360 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
361 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
362 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
363 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
364 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
365 int	iwm_send_dqa_cmd(struct iwm_softc *);
366 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
367 int	iwm_config_ltr(struct iwm_softc *);
368 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
369 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
370 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
371 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
372 	    struct iwm_rx_data *);
373 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
374 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, int, int, uint32_t,
375 	    struct ieee80211_rxinfo *, struct mbuf_list *);
376 void	iwm_enable_ht_cck_fallback(struct iwm_softc *, struct iwm_node *);
377 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
378 	    struct iwm_node *);
379 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
380 	    struct iwm_rx_data *);
381 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
382 	    struct iwm_rx_data *);
383 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
384 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
385 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
386 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
387 	    struct ieee80211_channel *, uint8_t, uint8_t);
388 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
389 	    uint8_t, uint32_t, uint32_t);
390 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
391 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
392 	    const void *);
393 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
394 	    uint32_t *);
395 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
396 	    const void *, uint32_t *);
397 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
398 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
399 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
400 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
401 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
402 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
403 int	iwm_flush_tx_path(struct iwm_softc *, int);
404 void	iwm_led_enable(struct iwm_softc *);
405 void	iwm_led_disable(struct iwm_softc *);
406 int	iwm_led_is_enabled(struct iwm_softc *);
407 void	iwm_led_blink_timeout(void *);
408 void	iwm_led_blink_start(struct iwm_softc *);
409 void	iwm_led_blink_stop(struct iwm_softc *);
410 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
411 	    struct iwm_beacon_filter_cmd *);
412 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
413 	    struct iwm_beacon_filter_cmd *);
414 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
415 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 	    struct iwm_mac_power_cmd *);
417 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
418 int	iwm_power_update_device(struct iwm_softc *);
419 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
420 int	iwm_disable_beacon_filter(struct iwm_softc *);
421 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
422 int	iwm_add_aux_sta(struct iwm_softc *);
423 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
424 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
425 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
426 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
427 	    struct iwm_scan_channel_cfg_lmac *, int, int);
428 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
429 int	iwm_lmac_scan(struct iwm_softc *, int);
430 int	iwm_config_umac_scan(struct iwm_softc *);
431 int	iwm_umac_scan(struct iwm_softc *, int);
432 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
433 int	iwm_rval2ridx(int);
434 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
435 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
436 	    struct iwm_mac_ctx_cmd *, uint32_t);
437 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
438 	    struct iwm_mac_data_sta *, int);
439 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
440 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
441 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
442 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
443 int	iwm_scan(struct iwm_softc *);
444 int	iwm_bgscan(struct ieee80211com *);
445 int	iwm_umac_scan_abort(struct iwm_softc *);
446 int	iwm_lmac_scan_abort(struct iwm_softc *);
447 int	iwm_scan_abort(struct iwm_softc *);
448 int	iwm_auth(struct iwm_softc *);
449 int	iwm_deauth(struct iwm_softc *);
450 int	iwm_assoc(struct iwm_softc *);
451 int	iwm_disassoc(struct iwm_softc *);
452 int	iwm_run(struct iwm_softc *);
453 int	iwm_run_stop(struct iwm_softc *);
454 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
455 void	iwm_calib_timeout(void *);
456 void	iwm_setrates_task(void *);
457 void	iwm_setrates(struct iwm_node *);
458 int	iwm_media_change(struct ifnet *);
459 void	iwm_newstate_task(void *);
460 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 void	iwm_endscan(struct iwm_softc *);
462 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 	    struct ieee80211_node *);
464 int	iwm_sf_config(struct iwm_softc *, int);
465 int	iwm_send_bt_init_conf(struct iwm_softc *);
466 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 void	iwm_free_fw_paging(struct iwm_softc *);
469 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
470 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
471 int	iwm_init_hw(struct iwm_softc *);
472 int	iwm_init(struct ifnet *);
473 void	iwm_start(struct ifnet *);
474 void	iwm_stop(struct ifnet *);
475 void	iwm_watchdog(struct ifnet *);
476 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
477 #ifdef IWM_DEBUG
478 const char *iwm_desc_lookup(uint32_t);
479 void	iwm_nic_error(struct iwm_softc *);
480 void	iwm_nic_umac_error(struct iwm_softc *);
481 #endif
482 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
483 	    struct mbuf_list *);
484 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
485 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
486 	    struct mbuf_list *);
487 void	iwm_notif_intr(struct iwm_softc *);
488 int	iwm_intr(void *);
489 int	iwm_intr_msix(void *);
490 int	iwm_match(struct device *, void *, void *);
491 int	iwm_preinit(struct iwm_softc *);
492 void	iwm_attach_hook(struct device *);
493 void	iwm_attach(struct device *, struct device *, void *);
494 void	iwm_init_task(void *);
495 int	iwm_activate(struct device *, int);
496 int	iwm_resume(struct iwm_softc *);
497 
498 #if NBPFILTER > 0
499 void	iwm_radiotap_attach(struct iwm_softc *);
500 #endif
501 
502 int
503 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
504 {
505 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
506 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
507 }
508 
509 int
510 iwm_is_mimo_mcs(int mcs)
511 {
512 	int ridx = iwm_mcs2ridx[mcs];
513 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
514 
515 }
516 
517 int
518 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
519 {
520 	struct iwm_fw_cscheme_list *l = (void *)data;
521 
522 	if (dlen < sizeof(*l) ||
523 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
524 		return EINVAL;
525 
526 	/* we don't actually store anything for now, always use s/w crypto */
527 
528 	return 0;
529 }
530 
531 int
532 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
533     uint8_t *data, size_t dlen)
534 {
535 	struct iwm_fw_sects *fws;
536 	struct iwm_fw_onesect *fwone;
537 
538 	if (type >= IWM_UCODE_TYPE_MAX)
539 		return EINVAL;
540 	if (dlen < sizeof(uint32_t))
541 		return EINVAL;
542 
543 	fws = &sc->sc_fw.fw_sects[type];
544 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
545 		return EINVAL;
546 
547 	fwone = &fws->fw_sect[fws->fw_count];
548 
549 	/* first 32bit are device load offset */
550 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
551 
552 	/* rest is data */
553 	fwone->fws_data = data + sizeof(uint32_t);
554 	fwone->fws_len = dlen - sizeof(uint32_t);
555 
556 	fws->fw_count++;
557 	fws->fw_totlen += fwone->fws_len;
558 
559 	return 0;
560 }
561 
562 #define IWM_DEFAULT_SCAN_CHANNELS	40
563 /* Newer firmware might support more channels. Raise this value if needed. */
564 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
565 
566 struct iwm_tlv_calib_data {
567 	uint32_t ucode_type;
568 	struct iwm_tlv_calib_ctrl calib;
569 } __packed;
570 
571 int
572 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
573 {
574 	const struct iwm_tlv_calib_data *def_calib = data;
575 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
576 
577 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
578 		return EINVAL;
579 
580 	sc->sc_default_calib[ucode_type].flow_trigger =
581 	    def_calib->calib.flow_trigger;
582 	sc->sc_default_calib[ucode_type].event_trigger =
583 	    def_calib->calib.event_trigger;
584 
585 	return 0;
586 }
587 
588 void
589 iwm_fw_info_free(struct iwm_fw_info *fw)
590 {
591 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
592 	fw->fw_rawdata = NULL;
593 	fw->fw_rawsize = 0;
594 	/* don't touch fw->fw_status */
595 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
596 }
597 
598 int
599 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
600 {
601 	struct iwm_fw_info *fw = &sc->sc_fw;
602 	struct iwm_tlv_ucode_header *uhdr;
603 	struct iwm_ucode_tlv tlv;
604 	uint32_t tlv_type;
605 	uint8_t *data;
606 	uint32_t usniffer_img;
607 	uint32_t paging_mem_size;
608 	int err;
609 	size_t len;
610 
611 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
612 	    ucode_type != IWM_UCODE_TYPE_INIT)
613 		return 0;
614 
615 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
616 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
617 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
618 
619 	if (fw->fw_rawdata != NULL)
620 		iwm_fw_info_free(fw);
621 
622 	err = loadfirmware(sc->sc_fwname,
623 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
624 	if (err) {
625 		printf("%s: could not read firmware %s (error %d)\n",
626 		    DEVNAME(sc), sc->sc_fwname, err);
627 		goto out;
628 	}
629 
630 	sc->sc_capaflags = 0;
631 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
632 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
633 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
634 
635 	uhdr = (void *)fw->fw_rawdata;
636 	if (*(uint32_t *)fw->fw_rawdata != 0
637 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
638 		printf("%s: invalid firmware %s\n",
639 		    DEVNAME(sc), sc->sc_fwname);
640 		err = EINVAL;
641 		goto out;
642 	}
643 
644 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
645 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
646 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
647 	    IWM_UCODE_API(le32toh(uhdr->ver)));
648 	data = uhdr->data;
649 	len = fw->fw_rawsize - sizeof(*uhdr);
650 
651 	while (len >= sizeof(tlv)) {
652 		size_t tlv_len;
653 		void *tlv_data;
654 
655 		memcpy(&tlv, data, sizeof(tlv));
656 		tlv_len = le32toh(tlv.length);
657 		tlv_type = le32toh(tlv.type);
658 
659 		len -= sizeof(tlv);
660 		data += sizeof(tlv);
661 		tlv_data = data;
662 
663 		if (len < tlv_len) {
664 			printf("%s: firmware too short: %zu bytes\n",
665 			    DEVNAME(sc), len);
666 			err = EINVAL;
667 			goto parse_out;
668 		}
669 
670 		switch (tlv_type) {
671 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
672 			if (tlv_len < sizeof(uint32_t)) {
673 				err = EINVAL;
674 				goto parse_out;
675 			}
676 			sc->sc_capa_max_probe_len
677 			    = le32toh(*(uint32_t *)tlv_data);
678 			if (sc->sc_capa_max_probe_len >
679 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
680 				err = EINVAL;
681 				goto parse_out;
682 			}
683 			break;
684 		case IWM_UCODE_TLV_PAN:
685 			if (tlv_len) {
686 				err = EINVAL;
687 				goto parse_out;
688 			}
689 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
690 			break;
691 		case IWM_UCODE_TLV_FLAGS:
692 			if (tlv_len < sizeof(uint32_t)) {
693 				err = EINVAL;
694 				goto parse_out;
695 			}
696 			/*
697 			 * Apparently there can be many flags, but Linux driver
698 			 * parses only the first one, and so do we.
699 			 *
700 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
701 			 * Intentional or a bug?  Observations from
702 			 * current firmware file:
703 			 *  1) TLV_PAN is parsed first
704 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
705 			 * ==> this resets TLV_PAN to itself... hnnnk
706 			 */
707 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
708 			break;
709 		case IWM_UCODE_TLV_CSCHEME:
710 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
711 			if (err)
712 				goto parse_out;
713 			break;
714 		case IWM_UCODE_TLV_NUM_OF_CPU: {
715 			uint32_t num_cpu;
716 			if (tlv_len != sizeof(uint32_t)) {
717 				err = EINVAL;
718 				goto parse_out;
719 			}
720 			num_cpu = le32toh(*(uint32_t *)tlv_data);
721 			if (num_cpu < 1 || num_cpu > 2) {
722 				err = EINVAL;
723 				goto parse_out;
724 			}
725 			break;
726 		}
727 		case IWM_UCODE_TLV_SEC_RT:
728 			err = iwm_firmware_store_section(sc,
729 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
730 			if (err)
731 				goto parse_out;
732 			break;
733 		case IWM_UCODE_TLV_SEC_INIT:
734 			err = iwm_firmware_store_section(sc,
735 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
736 			if (err)
737 				goto parse_out;
738 			break;
739 		case IWM_UCODE_TLV_SEC_WOWLAN:
740 			err = iwm_firmware_store_section(sc,
741 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
742 			if (err)
743 				goto parse_out;
744 			break;
745 		case IWM_UCODE_TLV_DEF_CALIB:
746 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
747 				err = EINVAL;
748 				goto parse_out;
749 			}
750 			err = iwm_set_default_calib(sc, tlv_data);
751 			if (err)
752 				goto parse_out;
753 			break;
754 		case IWM_UCODE_TLV_PHY_SKU:
755 			if (tlv_len != sizeof(uint32_t)) {
756 				err = EINVAL;
757 				goto parse_out;
758 			}
759 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
760 			break;
761 
762 		case IWM_UCODE_TLV_API_CHANGES_SET: {
763 			struct iwm_ucode_api *api;
764 			int idx, i;
765 			if (tlv_len != sizeof(*api)) {
766 				err = EINVAL;
767 				goto parse_out;
768 			}
769 			api = (struct iwm_ucode_api *)tlv_data;
770 			idx = le32toh(api->api_index);
771 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
772 				err = EINVAL;
773 				goto parse_out;
774 			}
775 			for (i = 0; i < 32; i++) {
776 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
777 					continue;
778 				setbit(sc->sc_ucode_api, i + (32 * idx));
779 			}
780 			break;
781 		}
782 
783 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
784 			struct iwm_ucode_capa *capa;
785 			int idx, i;
786 			if (tlv_len != sizeof(*capa)) {
787 				err = EINVAL;
788 				goto parse_out;
789 			}
790 			capa = (struct iwm_ucode_capa *)tlv_data;
791 			idx = le32toh(capa->api_index);
792 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
793 				goto parse_out;
794 			}
795 			for (i = 0; i < 32; i++) {
796 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
797 					continue;
798 				setbit(sc->sc_enabled_capa, i + (32 * idx));
799 			}
800 			break;
801 		}
802 
803 		case 48: /* undocumented TLV */
804 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
805 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
806 			/* ignore, not used by current driver */
807 			break;
808 
809 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
810 			err = iwm_firmware_store_section(sc,
811 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
812 			    tlv_len);
813 			if (err)
814 				goto parse_out;
815 			break;
816 
817 		case IWM_UCODE_TLV_PAGING:
818 			if (tlv_len != sizeof(uint32_t)) {
819 				err = EINVAL;
820 				goto parse_out;
821 			}
822 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
823 
824 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
825 			    DEVNAME(sc), paging_mem_size));
826 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
827 				printf("%s: Driver only supports up to %u"
828 				    " bytes for paging image (%u requested)\n",
829 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
830 				    paging_mem_size);
831 				err = EINVAL;
832 				goto out;
833 			}
834 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
835 				printf("%s: Paging: image isn't multiple of %u\n",
836 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
837 				err = EINVAL;
838 				goto out;
839 			}
840 
841 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
842 			    paging_mem_size;
843 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
844 			fw->fw_sects[usniffer_img].paging_mem_size =
845 			    paging_mem_size;
846 			break;
847 
848 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
849 			if (tlv_len != sizeof(uint32_t)) {
850 				err = EINVAL;
851 				goto parse_out;
852 			}
853 			sc->sc_capa_n_scan_channels =
854 			  le32toh(*(uint32_t *)tlv_data);
855 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
856 				err = ERANGE;
857 				goto parse_out;
858 			}
859 			break;
860 
861 		case IWM_UCODE_TLV_FW_VERSION:
862 			if (tlv_len != sizeof(uint32_t) * 3) {
863 				err = EINVAL;
864 				goto parse_out;
865 			}
866 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
867 			    "%u.%u.%u",
868 			    le32toh(((uint32_t *)tlv_data)[0]),
869 			    le32toh(((uint32_t *)tlv_data)[1]),
870 			    le32toh(((uint32_t *)tlv_data)[2]));
871 			break;
872 
873 		case IWM_UCODE_TLV_FW_DBG_DEST:
874 		case IWM_UCODE_TLV_FW_DBG_CONF:
875 			break;
876 
877 		case IWM_UCODE_TLV_FW_MEM_SEG:
878 			break;
879 
880 		default:
881 			err = EINVAL;
882 			goto parse_out;
883 		}
884 
885 		len -= roundup(tlv_len, 4);
886 		data += roundup(tlv_len, 4);
887 	}
888 
889 	KASSERT(err == 0);
890 
891  parse_out:
892 	if (err) {
893 		printf("%s: firmware parse error %d, "
894 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
895 	}
896 
897  out:
898 	if (err) {
899 		fw->fw_status = IWM_FW_STATUS_NONE;
900 		if (fw->fw_rawdata != NULL)
901 			iwm_fw_info_free(fw);
902 	} else
903 		fw->fw_status = IWM_FW_STATUS_DONE;
904 	wakeup(&sc->sc_fw);
905 
906 	return err;
907 }
908 
909 uint32_t
910 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
911 {
912 	iwm_nic_assert_locked(sc);
913 	IWM_WRITE(sc,
914 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
915 	IWM_BARRIER_READ_WRITE(sc);
916 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
917 }
918 
919 void
920 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
921 {
922 	iwm_nic_assert_locked(sc);
923 	IWM_WRITE(sc,
924 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
925 	IWM_BARRIER_WRITE(sc);
926 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
927 }
928 
929 void
930 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
931 {
932 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
933 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
934 }
935 
936 int
937 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
938 {
939 	int offs, err = 0;
940 	uint32_t *vals = buf;
941 
942 	if (iwm_nic_lock(sc)) {
943 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
944 		for (offs = 0; offs < dwords; offs++)
945 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
946 		iwm_nic_unlock(sc);
947 	} else {
948 		err = EBUSY;
949 	}
950 	return err;
951 }
952 
953 int
954 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
955 {
956 	int offs;
957 	const uint32_t *vals = buf;
958 
959 	if (iwm_nic_lock(sc)) {
960 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
961 		/* WADDR auto-increments */
962 		for (offs = 0; offs < dwords; offs++) {
963 			uint32_t val = vals ? vals[offs] : 0;
964 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
965 		}
966 		iwm_nic_unlock(sc);
967 	} else {
968 		return EBUSY;
969 	}
970 	return 0;
971 }
972 
973 int
974 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
975 {
976 	return iwm_write_mem(sc, addr, &val, 1);
977 }
978 
979 int
980 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
981     int timo)
982 {
983 	for (;;) {
984 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
985 			return 1;
986 		}
987 		if (timo < 10) {
988 			return 0;
989 		}
990 		timo -= 10;
991 		DELAY(10);
992 	}
993 }
994 
995 int
996 iwm_nic_lock(struct iwm_softc *sc)
997 {
998 	if (sc->sc_nic_locks > 0) {
999 		iwm_nic_assert_locked(sc);
1000 		sc->sc_nic_locks++;
1001 		return 1; /* already locked */
1002 	}
1003 
1004 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1005 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1006 
1007 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1008 		DELAY(2);
1009 
1010 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1011 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1012 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1013 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1014 		sc->sc_nic_locks++;
1015 		return 1;
1016 	}
1017 
1018 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1019 	return 0;
1020 }
1021 
1022 void
1023 iwm_nic_assert_locked(struct iwm_softc *sc)
1024 {
1025 	uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1026 	if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1027 		panic("%s: mac clock not ready", DEVNAME(sc));
1028 	if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1029 		panic("%s: mac gone to sleep", DEVNAME(sc));
1030 	if (sc->sc_nic_locks <= 0)
1031 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1032 }
1033 
1034 void
1035 iwm_nic_unlock(struct iwm_softc *sc)
1036 {
1037 	if (sc->sc_nic_locks > 0) {
1038 		if (--sc->sc_nic_locks == 0)
1039 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1040 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1041 	} else
1042 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1043 }
1044 
1045 void
1046 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1047     uint32_t mask)
1048 {
1049 	uint32_t val;
1050 
1051 	/* XXX: no error path? */
1052 	if (iwm_nic_lock(sc)) {
1053 		val = iwm_read_prph(sc, reg) & mask;
1054 		val |= bits;
1055 		iwm_write_prph(sc, reg, val);
1056 		iwm_nic_unlock(sc);
1057 	}
1058 }
1059 
1060 void
1061 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1062 {
1063 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1064 }
1065 
1066 void
1067 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1068 {
1069 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1070 }
1071 
1072 int
1073 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1074     bus_size_t size, bus_size_t alignment)
1075 {
1076 	int nsegs, err;
1077 	caddr_t va;
1078 
1079 	dma->tag = tag;
1080 	dma->size = size;
1081 
1082 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1083 	    &dma->map);
1084 	if (err)
1085 		goto fail;
1086 
1087 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1088 	    BUS_DMA_NOWAIT);
1089 	if (err)
1090 		goto fail;
1091 
1092 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1093 	    BUS_DMA_NOWAIT);
1094 	if (err)
1095 		goto fail;
1096 	dma->vaddr = va;
1097 
1098 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1099 	    BUS_DMA_NOWAIT);
1100 	if (err)
1101 		goto fail;
1102 
1103 	memset(dma->vaddr, 0, size);
1104 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1105 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1106 
1107 	return 0;
1108 
1109 fail:	iwm_dma_contig_free(dma);
1110 	return err;
1111 }
1112 
1113 void
1114 iwm_dma_contig_free(struct iwm_dma_info *dma)
1115 {
1116 	if (dma->map != NULL) {
1117 		if (dma->vaddr != NULL) {
1118 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1119 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1120 			bus_dmamap_unload(dma->tag, dma->map);
1121 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1122 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1123 			dma->vaddr = NULL;
1124 		}
1125 		bus_dmamap_destroy(dma->tag, dma->map);
1126 		dma->map = NULL;
1127 	}
1128 }
1129 
1130 int
1131 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1132 {
1133 	bus_size_t size;
1134 	size_t descsz;
1135 	int count, i, err;
1136 
1137 	ring->cur = 0;
1138 
1139 	if (sc->sc_mqrx_supported) {
1140 		count = IWM_RX_MQ_RING_COUNT;
1141 		descsz = sizeof(uint64_t);
1142 	} else {
1143 		count = IWM_RX_RING_COUNT;
1144 		descsz = sizeof(uint32_t);
1145 	}
1146 
1147 	/* Allocate RX descriptors (256-byte aligned). */
1148 	size = count * descsz;
1149 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1150 	if (err) {
1151 		printf("%s: could not allocate RX ring DMA memory\n",
1152 		    DEVNAME(sc));
1153 		goto fail;
1154 	}
1155 	ring->desc = ring->free_desc_dma.vaddr;
1156 
1157 	/* Allocate RX status area (16-byte aligned). */
1158 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1159 	    sizeof(*ring->stat), 16);
1160 	if (err) {
1161 		printf("%s: could not allocate RX status DMA memory\n",
1162 		    DEVNAME(sc));
1163 		goto fail;
1164 	}
1165 	ring->stat = ring->stat_dma.vaddr;
1166 
1167 	if (sc->sc_mqrx_supported) {
1168 		size = count * sizeof(uint32_t);
1169 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1170 		    size, 256);
1171 		if (err) {
1172 			printf("%s: could not allocate RX ring DMA memory\n",
1173 			    DEVNAME(sc));
1174 			goto fail;
1175 		}
1176 	}
1177 
1178 	for (i = 0; i < count; i++) {
1179 		struct iwm_rx_data *data = &ring->data[i];
1180 
1181 		memset(data, 0, sizeof(*data));
1182 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1183 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1184 		    &data->map);
1185 		if (err) {
1186 			printf("%s: could not create RX buf DMA map\n",
1187 			    DEVNAME(sc));
1188 			goto fail;
1189 		}
1190 
1191 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1192 		if (err)
1193 			goto fail;
1194 	}
1195 	return 0;
1196 
1197 fail:	iwm_free_rx_ring(sc, ring);
1198 	return err;
1199 }
1200 
1201 void
1202 iwm_disable_rx_dma(struct iwm_softc *sc)
1203 {
1204 	int ntries;
1205 
1206 	if (iwm_nic_lock(sc)) {
1207 		if (sc->sc_mqrx_supported) {
1208 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1209 			for (ntries = 0; ntries < 1000; ntries++) {
1210 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1211 				    IWM_RXF_DMA_IDLE)
1212 					break;
1213 				DELAY(10);
1214 			}
1215 		} else {
1216 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1217 			for (ntries = 0; ntries < 1000; ntries++) {
1218 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1219 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1220 					break;
1221 				DELAY(10);
1222 			}
1223 		}
1224 		iwm_nic_unlock(sc);
1225 	}
1226 }
1227 
1228 void
1229 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1230 {
1231 	ring->cur = 0;
1232 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1233 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1234 	memset(ring->stat, 0, sizeof(*ring->stat));
1235 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1236 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1237 
1238 }
1239 
1240 void
1241 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1242 {
1243 	int count, i;
1244 
1245 	iwm_dma_contig_free(&ring->free_desc_dma);
1246 	iwm_dma_contig_free(&ring->stat_dma);
1247 	iwm_dma_contig_free(&ring->used_desc_dma);
1248 
1249 	if (sc->sc_mqrx_supported)
1250 		count = IWM_RX_MQ_RING_COUNT;
1251 	else
1252 		count = IWM_RX_RING_COUNT;
1253 
1254 	for (i = 0; i < count; i++) {
1255 		struct iwm_rx_data *data = &ring->data[i];
1256 
1257 		if (data->m != NULL) {
1258 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1259 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1260 			bus_dmamap_unload(sc->sc_dmat, data->map);
1261 			m_freem(data->m);
1262 			data->m = NULL;
1263 		}
1264 		if (data->map != NULL)
1265 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1266 	}
1267 }
1268 
1269 int
1270 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1271 {
1272 	bus_addr_t paddr;
1273 	bus_size_t size;
1274 	int i, err;
1275 
1276 	ring->qid = qid;
1277 	ring->queued = 0;
1278 	ring->cur = 0;
1279 	ring->tail = 0;
1280 
1281 	/* Allocate TX descriptors (256-byte aligned). */
1282 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1283 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1284 	if (err) {
1285 		printf("%s: could not allocate TX ring DMA memory\n",
1286 		    DEVNAME(sc));
1287 		goto fail;
1288 	}
1289 	ring->desc = ring->desc_dma.vaddr;
1290 
1291 	/*
1292 	 * There is no need to allocate DMA buffers for unused rings.
1293 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1294 	 * than we currently need.
1295 	 *
1296 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1297 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1298 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1299 	 * in order to provide one queue per EDCA category.
1300 	 *
1301 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd).
1302 	 *
1303 	 * Tx aggregation will require additional queues (one queue per TID
1304 	 * for which aggregation is enabled) but we do not implement this yet.
1305 	 *
1306 	 * Unfortunately, we cannot tell if DQA will be used until the
1307 	 * firmware gets loaded later, so just allocate sufficient rings
1308 	 * in order to satisfy both cases.
1309 	 */
1310 	if (qid > IWM_CMD_QUEUE)
1311 		return 0;
1312 
1313 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1314 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1315 	if (err) {
1316 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1317 		goto fail;
1318 	}
1319 	ring->cmd = ring->cmd_dma.vaddr;
1320 
1321 	paddr = ring->cmd_dma.paddr;
1322 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1323 		struct iwm_tx_data *data = &ring->data[i];
1324 		size_t mapsize;
1325 
1326 		data->cmd_paddr = paddr;
1327 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1328 		    + offsetof(struct iwm_tx_cmd, scratch);
1329 		paddr += sizeof(struct iwm_device_cmd);
1330 
1331 		/* FW commands may require more mapped space than packets. */
1332 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1333 			mapsize = (sizeof(struct iwm_cmd_header) +
1334 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1335 		else
1336 			mapsize = MCLBYTES;
1337 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1338 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1339 		    &data->map);
1340 		if (err) {
1341 			printf("%s: could not create TX buf DMA map\n",
1342 			    DEVNAME(sc));
1343 			goto fail;
1344 		}
1345 	}
1346 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1347 	return 0;
1348 
1349 fail:	iwm_free_tx_ring(sc, ring);
1350 	return err;
1351 }
1352 
1353 void
1354 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1355 {
1356 	int i;
1357 
1358 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1359 		struct iwm_tx_data *data = &ring->data[i];
1360 
1361 		if (data->m != NULL) {
1362 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1363 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1364 			bus_dmamap_unload(sc->sc_dmat, data->map);
1365 			m_freem(data->m);
1366 			data->m = NULL;
1367 		}
1368 	}
1369 	/* Clear TX descriptors. */
1370 	memset(ring->desc, 0, ring->desc_dma.size);
1371 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1372 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1373 	sc->qfullmsk &= ~(1 << ring->qid);
1374 	/* 7000 family NICs are locked while commands are in progress. */
1375 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1376 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1377 			iwm_nic_unlock(sc);
1378 	}
1379 	ring->queued = 0;
1380 	ring->cur = 0;
1381 	ring->tail = 0;
1382 }
1383 
1384 void
1385 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1386 {
1387 	int i;
1388 
1389 	iwm_dma_contig_free(&ring->desc_dma);
1390 	iwm_dma_contig_free(&ring->cmd_dma);
1391 
1392 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1393 		struct iwm_tx_data *data = &ring->data[i];
1394 
1395 		if (data->m != NULL) {
1396 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1397 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1398 			bus_dmamap_unload(sc->sc_dmat, data->map);
1399 			m_freem(data->m);
1400 			data->m = NULL;
1401 		}
1402 		if (data->map != NULL)
1403 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1404 	}
1405 }
1406 
1407 void
1408 iwm_enable_rfkill_int(struct iwm_softc *sc)
1409 {
1410 	if (!sc->sc_msix) {
1411 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1412 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1413 	} else {
1414 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1415 		    sc->sc_fh_init_mask);
1416 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1417 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1418 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1419 	}
1420 
1421 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1422 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1423 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1424 }
1425 
1426 int
1427 iwm_check_rfkill(struct iwm_softc *sc)
1428 {
1429 	uint32_t v;
1430 	int s;
1431 	int rv;
1432 
1433 	s = splnet();
1434 
1435 	/*
1436 	 * "documentation" is not really helpful here:
1437 	 *  27:	HW_RF_KILL_SW
1438 	 *	Indicates state of (platform's) hardware RF-Kill switch
1439 	 *
1440 	 * But apparently when it's off, it's on ...
1441 	 */
1442 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1443 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1444 	if (rv) {
1445 		sc->sc_flags |= IWM_FLAG_RFKILL;
1446 	} else {
1447 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1448 	}
1449 
1450 	splx(s);
1451 	return rv;
1452 }
1453 
1454 void
1455 iwm_enable_interrupts(struct iwm_softc *sc)
1456 {
1457 	if (!sc->sc_msix) {
1458 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1459 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1460 	} else {
1461 		/*
1462 		 * fh/hw_mask keeps all the unmasked causes.
1463 		 * Unlike msi, in msix cause is enabled when it is unset.
1464 		 */
1465 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1466 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1467 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1468 		    ~sc->sc_fh_mask);
1469 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1470 		    ~sc->sc_hw_mask);
1471 	}
1472 }
1473 
1474 void
1475 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1476 {
1477 	if (!sc->sc_msix) {
1478 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1479 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1480 	} else {
1481 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1482 		    sc->sc_hw_init_mask);
1483 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1484 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1485 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1486 	}
1487 }
1488 
1489 void
1490 iwm_restore_interrupts(struct iwm_softc *sc)
1491 {
1492 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1493 }
1494 
1495 void
1496 iwm_disable_interrupts(struct iwm_softc *sc)
1497 {
1498 	int s = splnet();
1499 
1500 	if (!sc->sc_msix) {
1501 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1502 
1503 		/* acknowledge all interrupts */
1504 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1505 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1506 	} else {
1507 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1508 		    sc->sc_fh_init_mask);
1509 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1510 		    sc->sc_hw_init_mask);
1511 	}
1512 
1513 	splx(s);
1514 }
1515 
1516 void
1517 iwm_ict_reset(struct iwm_softc *sc)
1518 {
1519 	iwm_disable_interrupts(sc);
1520 
1521 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1522 	sc->ict_cur = 0;
1523 
1524 	/* Set physical address of ICT (4KB aligned). */
1525 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1526 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1527 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1528 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1529 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1530 
1531 	/* Switch to ICT interrupt mode in driver. */
1532 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1533 
1534 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1535 	iwm_enable_interrupts(sc);
1536 }
1537 
1538 #define IWM_HW_READY_TIMEOUT 50
1539 int
1540 iwm_set_hw_ready(struct iwm_softc *sc)
1541 {
1542 	int ready;
1543 
1544 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1545 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1546 
1547 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1548 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1549 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1550 	    IWM_HW_READY_TIMEOUT);
1551 	if (ready)
1552 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1553 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1554 
1555 	return ready;
1556 }
1557 #undef IWM_HW_READY_TIMEOUT
1558 
1559 int
1560 iwm_prepare_card_hw(struct iwm_softc *sc)
1561 {
1562 	int t = 0;
1563 
1564 	if (iwm_set_hw_ready(sc))
1565 		return 0;
1566 
1567 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1568 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1569 	DELAY(1000);
1570 
1571 
1572 	/* If HW is not ready, prepare the conditions to check again */
1573 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1574 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1575 
1576 	do {
1577 		if (iwm_set_hw_ready(sc))
1578 			return 0;
1579 		DELAY(200);
1580 		t += 200;
1581 	} while (t < 150000);
1582 
1583 	return ETIMEDOUT;
1584 }
1585 
1586 void
1587 iwm_apm_config(struct iwm_softc *sc)
1588 {
1589 	pcireg_t lctl, cap;
1590 
1591 	/*
1592 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1593 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1594 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1595 	 *    costs negligible amount of power savings.
1596 	 * If not (unlikely), enable L0S, so there is at least some
1597 	 *    power savings, even without L1.
1598 	 */
1599 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1600 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1601 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1602 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1603 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1604 	} else {
1605 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1606 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1607 	}
1608 
1609 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1610 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1611 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1612 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1613 	    DEVNAME(sc),
1614 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1615 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1616 }
1617 
1618 /*
1619  * Start up NIC's basic functionality after it has been reset
1620  * e.g. after platform boot or shutdown.
1621  * NOTE:  This does not load uCode nor start the embedded processor
1622  */
1623 int
1624 iwm_apm_init(struct iwm_softc *sc)
1625 {
1626 	int err = 0;
1627 
1628 	/* Disable L0S exit timer (platform NMI workaround) */
1629 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1630 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1631 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1632 
1633 	/*
1634 	 * Disable L0s without affecting L1;
1635 	 *  don't wait for ICH L0s (ICH bug W/A)
1636 	 */
1637 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1638 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1639 
1640 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1641 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1642 
1643 	/*
1644 	 * Enable HAP INTA (interrupt from management bus) to
1645 	 * wake device's PCI Express link L1a -> L0s
1646 	 */
1647 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1648 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1649 
1650 	iwm_apm_config(sc);
1651 
1652 #if 0 /* not for 7k/8k */
1653 	/* Configure analog phase-lock-loop before activating to D0A */
1654 	if (trans->cfg->base_params->pll_cfg_val)
1655 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1656 		    trans->cfg->base_params->pll_cfg_val);
1657 #endif
1658 
1659 	/*
1660 	 * Set "initialization complete" bit to move adapter from
1661 	 * D0U* --> D0A* (powered-up active) state.
1662 	 */
1663 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1664 
1665 	/*
1666 	 * Wait for clock stabilization; once stabilized, access to
1667 	 * device-internal resources is supported, e.g. iwm_write_prph()
1668 	 * and accesses to uCode SRAM.
1669 	 */
1670 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1671 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1672 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1673 		printf("%s: timeout waiting for clock stabilization\n",
1674 		    DEVNAME(sc));
1675 		err = ETIMEDOUT;
1676 		goto out;
1677 	}
1678 
1679 	if (sc->host_interrupt_operation_mode) {
1680 		/*
1681 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1682 		 * only check host_interrupt_operation_mode even if this is
1683 		 * not related to host_interrupt_operation_mode.
1684 		 *
1685 		 * Enable the oscillator to count wake up time for L1 exit. This
1686 		 * consumes slightly more power (100uA) - but allows to be sure
1687 		 * that we wake up from L1 on time.
1688 		 *
1689 		 * This looks weird: read twice the same register, discard the
1690 		 * value, set a bit, and yet again, read that same register
1691 		 * just to discard the value. But that's the way the hardware
1692 		 * seems to like it.
1693 		 */
1694 		if (iwm_nic_lock(sc)) {
1695 			iwm_read_prph(sc, IWM_OSC_CLK);
1696 			iwm_read_prph(sc, IWM_OSC_CLK);
1697 			iwm_nic_unlock(sc);
1698 		}
1699 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1700 		if (iwm_nic_lock(sc)) {
1701 			iwm_read_prph(sc, IWM_OSC_CLK);
1702 			iwm_read_prph(sc, IWM_OSC_CLK);
1703 			iwm_nic_unlock(sc);
1704 		}
1705 	}
1706 
1707 	/*
1708 	 * Enable DMA clock and wait for it to stabilize.
1709 	 *
1710 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1711 	 * do not disable clocks.  This preserves any hardware bits already
1712 	 * set by default in "CLK_CTRL_REG" after reset.
1713 	 */
1714 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1715 		if (iwm_nic_lock(sc)) {
1716 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1717 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1718 			iwm_nic_unlock(sc);
1719 		}
1720 		DELAY(20);
1721 
1722 		/* Disable L1-Active */
1723 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1724 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1725 
1726 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1727 		if (iwm_nic_lock(sc)) {
1728 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1729 			    IWM_APMG_RTC_INT_STT_RFKILL);
1730 			iwm_nic_unlock(sc);
1731 		}
1732 	}
1733  out:
1734 	if (err)
1735 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1736 	return err;
1737 }
1738 
1739 void
1740 iwm_apm_stop(struct iwm_softc *sc)
1741 {
1742 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1743 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1744 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1745 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1746 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1747 	DELAY(1000);
1748 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1749 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1750 	DELAY(5000);
1751 
1752 	/* stop device's busmaster DMA activity */
1753 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1754 
1755 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1756 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1757 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1758 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1759 
1760 	/*
1761 	 * Clear "initialization complete" bit to move adapter from
1762 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1763 	 */
1764 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1765 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1766 }
1767 
1768 void
1769 iwm_init_msix_hw(struct iwm_softc *sc)
1770 {
1771 	iwm_conf_msix_hw(sc, 0);
1772 
1773 	if (!sc->sc_msix)
1774 		return;
1775 
1776 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1777 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1778 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1779 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1780 }
1781 
1782 void
1783 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1784 {
1785 	int vector = 0;
1786 
1787 	if (!sc->sc_msix) {
1788 		/* Newer chips default to MSIX. */
1789 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1790 			iwm_write_prph(sc, IWM_UREG_CHICK,
1791 			    IWM_UREG_CHICK_MSI_ENABLE);
1792 			iwm_nic_unlock(sc);
1793 		}
1794 		return;
1795 	}
1796 
1797 	if (!stopped && iwm_nic_lock(sc)) {
1798 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1799 		iwm_nic_unlock(sc);
1800 	}
1801 
1802 	/* Disable all interrupts */
1803 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1804 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1805 
1806 	/* Map fallback-queue (command/mgmt) to a single vector */
1807 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1808 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1809 	/* Map RSS queue (data) to the same vector */
1810 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1811 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1812 
1813 	/* Enable the RX queues cause interrupts */
1814 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1815 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1816 
1817 	/* Map non-RX causes to the same vector */
1818 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1819 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1820 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1821 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1822 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1823 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1824 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1825 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1826 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1827 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1828 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1829 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1830 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1831 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1832 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1833 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1834 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1835 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1836 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1837 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1838 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1839 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1840 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1841 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1842 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1843 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1844 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1845 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1846 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1847 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1848 
1849 	/* Enable non-RX causes interrupts */
1850 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1851 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1852 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1853 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1854 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1855 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1856 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1857 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1858 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1859 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1860 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
1861 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
1862 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
1863 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
1864 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
1865 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
1866 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
1867 }
1868 
1869 int
1870 iwm_start_hw(struct iwm_softc *sc)
1871 {
1872 	int err;
1873 
1874 	err = iwm_prepare_card_hw(sc);
1875 	if (err)
1876 		return err;
1877 
1878 	/* Reset the entire device */
1879 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1880 	DELAY(5000);
1881 
1882 	err = iwm_apm_init(sc);
1883 	if (err)
1884 		return err;
1885 
1886 	iwm_init_msix_hw(sc);
1887 
1888 	iwm_enable_rfkill_int(sc);
1889 	iwm_check_rfkill(sc);
1890 
1891 	return 0;
1892 }
1893 
1894 
1895 void
1896 iwm_stop_device(struct iwm_softc *sc)
1897 {
1898 	int chnl, ntries;
1899 	int qid;
1900 
1901 	iwm_disable_interrupts(sc);
1902 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1903 
1904 	/* Stop all DMA channels. */
1905 	if (iwm_nic_lock(sc)) {
1906 		/* Deactivate TX scheduler. */
1907 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1908 
1909 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1910 			IWM_WRITE(sc,
1911 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1912 			for (ntries = 0; ntries < 200; ntries++) {
1913 				uint32_t r;
1914 
1915 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1916 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1917 				    chnl))
1918 					break;
1919 				DELAY(20);
1920 			}
1921 		}
1922 		iwm_nic_unlock(sc);
1923 	}
1924 	iwm_disable_rx_dma(sc);
1925 
1926 	iwm_reset_rx_ring(sc, &sc->rxq);
1927 
1928 	for (qid = 0; qid < nitems(sc->txq); qid++)
1929 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1930 
1931 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1932 		if (iwm_nic_lock(sc)) {
1933 			/* Power-down device's busmaster DMA clocks */
1934 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1935 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1936 			iwm_nic_unlock(sc);
1937 		}
1938 		DELAY(5);
1939 	}
1940 
1941 	/* Make sure (redundant) we've released our request to stay awake */
1942 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1943 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1944 	if (sc->sc_nic_locks > 0)
1945 		printf("%s: %d active NIC locks forcefully cleared\n",
1946 		    DEVNAME(sc), sc->sc_nic_locks);
1947 	sc->sc_nic_locks = 0;
1948 
1949 	/* Stop the device, and put it in low power state */
1950 	iwm_apm_stop(sc);
1951 
1952 	/* Reset the on-board processor. */
1953 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1954 	DELAY(5000);
1955 
1956 	/*
1957 	 * Upon stop, the IVAR table gets erased, so msi-x won't
1958 	 * work. This causes a bug in RF-KILL flows, since the interrupt
1959 	 * that enables radio won't fire on the correct irq, and the
1960 	 * driver won't be able to handle the interrupt.
1961 	 * Configure the IVAR table again after reset.
1962 	 */
1963 	iwm_conf_msix_hw(sc, 1);
1964 
1965 	/*
1966 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1967 	 * Clear the interrupt again.
1968 	 */
1969 	iwm_disable_interrupts(sc);
1970 
1971 	/* Even though we stop the HW we still want the RF kill interrupt. */
1972 	iwm_enable_rfkill_int(sc);
1973 	iwm_check_rfkill(sc);
1974 
1975 	iwm_prepare_card_hw(sc);
1976 }
1977 
1978 void
1979 iwm_nic_config(struct iwm_softc *sc)
1980 {
1981 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1982 	uint32_t mask, val, reg_val = 0;
1983 
1984 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1985 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1986 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1987 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1988 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1989 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1990 
1991 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1992 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1993 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1994 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1995 
1996 	/* radio configuration */
1997 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1998 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1999 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2000 
2001 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2002 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2003 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2004 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2005 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2006 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2007 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2008 
2009 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2010 	val &= ~mask;
2011 	val |= reg_val;
2012 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2013 
2014 	/*
2015 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2016 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2017 	 * to lose ownership and not being able to obtain it back.
2018 	 */
2019 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2020 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2021 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2022 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2023 }
2024 
2025 int
2026 iwm_nic_rx_init(struct iwm_softc *sc)
2027 {
2028 	if (sc->sc_mqrx_supported)
2029 		return iwm_nic_rx_mq_init(sc);
2030 	else
2031 		return iwm_nic_rx_legacy_init(sc);
2032 }
2033 
2034 int
2035 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2036 {
2037 	int enabled;
2038 
2039 	if (!iwm_nic_lock(sc))
2040 		return EBUSY;
2041 
2042 	/* Stop RX DMA. */
2043 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2044 	/* Disable RX used and free queue operation. */
2045 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2046 
2047 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2048 	    sc->rxq.free_desc_dma.paddr);
2049 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2050 	    sc->rxq.used_desc_dma.paddr);
2051 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2052 	    sc->rxq.stat_dma.paddr);
2053 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2054 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2055 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2056 
2057 	/* We configure only queue 0 for now. */
2058 	enabled = ((1 << 0) << 16) | (1 << 0);
2059 
2060 	/* Enable RX DMA, 4KB buffer size. */
2061 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2062 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2063 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2064 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2065 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2066 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2067 
2068 	/* Enable RX DMA snooping. */
2069 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2070 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2071 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2072 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2073 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2074 
2075 	/* Enable the configured queue(s). */
2076 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2077 
2078 	iwm_nic_unlock(sc);
2079 
2080 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2081 
2082 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2083 
2084 	return 0;
2085 }
2086 
2087 int
2088 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2089 {
2090 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2091 
2092 	iwm_disable_rx_dma(sc);
2093 
2094 	if (!iwm_nic_lock(sc))
2095 		return EBUSY;
2096 
2097 	/* reset and flush pointers */
2098 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2099 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2100 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2101 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2102 
2103 	/* Set physical address of RX ring (256-byte aligned). */
2104 	IWM_WRITE(sc,
2105 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2106 
2107 	/* Set physical address of RX status (16-byte aligned). */
2108 	IWM_WRITE(sc,
2109 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2110 
2111 	/* Enable RX. */
2112 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2113 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2114 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2115 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2116 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2117 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2118 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2119 
2120 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2121 
2122 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2123 	if (sc->host_interrupt_operation_mode)
2124 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2125 
2126 	iwm_nic_unlock(sc);
2127 
2128 	/*
2129 	 * This value should initially be 0 (before preparing any RBs),
2130 	 * and should be 8 after preparing the first 8 RBs (for example).
2131 	 */
2132 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2133 
2134 	return 0;
2135 }
2136 
2137 int
2138 iwm_nic_tx_init(struct iwm_softc *sc)
2139 {
2140 	int qid;
2141 
2142 	if (!iwm_nic_lock(sc))
2143 		return EBUSY;
2144 
2145 	/* Deactivate TX scheduler. */
2146 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2147 
2148 	/* Set physical address of "keep warm" page (16-byte aligned). */
2149 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2150 
2151 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2152 		struct iwm_tx_ring *txq = &sc->txq[qid];
2153 
2154 		/* Set physical address of TX ring (256-byte aligned). */
2155 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2156 		    txq->desc_dma.paddr >> 8);
2157 	}
2158 
2159 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2160 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2161 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2162 
2163 	iwm_nic_unlock(sc);
2164 
2165 	return 0;
2166 }
2167 
2168 int
2169 iwm_nic_init(struct iwm_softc *sc)
2170 {
2171 	int err;
2172 
2173 	iwm_apm_init(sc);
2174 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2175 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2176 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2177 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2178 
2179 	iwm_nic_config(sc);
2180 
2181 	err = iwm_nic_rx_init(sc);
2182 	if (err)
2183 		return err;
2184 
2185 	err = iwm_nic_tx_init(sc);
2186 	if (err)
2187 		return err;
2188 
2189 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2190 
2191 	return 0;
2192 }
2193 
2194 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2195 const uint8_t iwm_ac_to_tx_fifo[] = {
2196 	IWM_TX_FIFO_BE,
2197 	IWM_TX_FIFO_BK,
2198 	IWM_TX_FIFO_VI,
2199 	IWM_TX_FIFO_VO,
2200 };
2201 
2202 int
2203 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2204 {
2205 	iwm_nic_assert_locked(sc);
2206 
2207 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2208 
2209 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2210 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2211 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2212 
2213 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2214 
2215 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2216 
2217 	iwm_write_mem32(sc,
2218 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2219 
2220 	/* Set scheduler window size and frame limit. */
2221 	iwm_write_mem32(sc,
2222 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2223 	    sizeof(uint32_t),
2224 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2225 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2226 	    ((IWM_FRAME_LIMIT
2227 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2228 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2229 
2230 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2231 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2232 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2233 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2234 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2235 
2236 	if (qid == sc->cmdqid)
2237 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2238 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2239 
2240 	return 0;
2241 }
2242 
2243 int
2244 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
2245 {
2246 	struct iwm_scd_txq_cfg_cmd cmd;
2247 	int err;
2248 
2249 	iwm_nic_assert_locked(sc);
2250 
2251 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2252 
2253 	memset(&cmd, 0, sizeof(cmd));
2254 	cmd.scd_queue = qid;
2255 	cmd.enable = 1;
2256 	cmd.sta_id = sta_id;
2257 	cmd.tx_fifo = fifo;
2258 	cmd.aggregate = 0;
2259 	cmd.window = IWM_FRAME_LIMIT;
2260 
2261 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2262 	    sizeof(cmd), &cmd);
2263 	if (err)
2264 		return err;
2265 
2266 	return 0;
2267 }
2268 
2269 int
2270 iwm_post_alive(struct iwm_softc *sc)
2271 {
2272 	int nwords;
2273 	int err, chnl;
2274 	uint32_t base;
2275 
2276 	if (!iwm_nic_lock(sc))
2277 		return EBUSY;
2278 
2279 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2280 
2281 	iwm_ict_reset(sc);
2282 
2283 	iwm_nic_unlock(sc);
2284 
2285 	/* Clear TX scheduler state in SRAM. */
2286 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2287 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2288 	    / sizeof(uint32_t);
2289 	err = iwm_write_mem(sc,
2290 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2291 	    NULL, nwords);
2292 	if (err)
2293 		return err;
2294 
2295 	if (!iwm_nic_lock(sc))
2296 		return EBUSY;
2297 
2298 	/* Set physical address of TX scheduler rings (1KB aligned). */
2299 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2300 
2301 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2302 
2303 	/* enable command channel */
2304 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2305 	if (err) {
2306 		iwm_nic_unlock(sc);
2307 		return err;
2308 	}
2309 
2310 	/* Activate TX scheduler. */
2311 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2312 
2313 	/* Enable DMA channels. */
2314 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2315 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2316 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2317 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2318 	}
2319 
2320 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2321 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2322 
2323 	iwm_nic_unlock(sc);
2324 
2325 	/* Enable L1-Active */
2326 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
2327 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2328 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2329 
2330 	return err;
2331 }
2332 
2333 struct iwm_phy_db_entry *
2334 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2335 {
2336 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2337 
2338 	if (type >= IWM_PHY_DB_MAX)
2339 		return NULL;
2340 
2341 	switch (type) {
2342 	case IWM_PHY_DB_CFG:
2343 		return &phy_db->cfg;
2344 	case IWM_PHY_DB_CALIB_NCH:
2345 		return &phy_db->calib_nch;
2346 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2347 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2348 			return NULL;
2349 		return &phy_db->calib_ch_group_papd[chg_id];
2350 	case IWM_PHY_DB_CALIB_CHG_TXP:
2351 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2352 			return NULL;
2353 		return &phy_db->calib_ch_group_txp[chg_id];
2354 	default:
2355 		return NULL;
2356 	}
2357 	return NULL;
2358 }
2359 
2360 int
2361 iwm_phy_db_set_section(struct iwm_softc *sc,
2362     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2363 {
2364 	uint16_t type = le16toh(phy_db_notif->type);
2365 	uint16_t size  = le16toh(phy_db_notif->length);
2366 	struct iwm_phy_db_entry *entry;
2367 	uint16_t chg_id = 0;
2368 
2369 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2370 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2371 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2372 
2373 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2374 	if (!entry)
2375 		return EINVAL;
2376 
2377 	if (entry->data)
2378 		free(entry->data, M_DEVBUF, entry->size);
2379 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2380 	if (!entry->data) {
2381 		entry->size = 0;
2382 		return ENOMEM;
2383 	}
2384 	memcpy(entry->data, phy_db_notif->data, size);
2385 	entry->size = size;
2386 
2387 	return 0;
2388 }
2389 
2390 int
2391 iwm_is_valid_channel(uint16_t ch_id)
2392 {
2393 	if (ch_id <= 14 ||
2394 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2395 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2396 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2397 		return 1;
2398 	return 0;
2399 }
2400 
2401 uint8_t
2402 iwm_ch_id_to_ch_index(uint16_t ch_id)
2403 {
2404 	if (!iwm_is_valid_channel(ch_id))
2405 		return 0xff;
2406 
2407 	if (ch_id <= 14)
2408 		return ch_id - 1;
2409 	if (ch_id <= 64)
2410 		return (ch_id + 20) / 4;
2411 	if (ch_id <= 140)
2412 		return (ch_id - 12) / 4;
2413 	return (ch_id - 13) / 4;
2414 }
2415 
2416 
2417 uint16_t
2418 iwm_channel_id_to_papd(uint16_t ch_id)
2419 {
2420 	if (!iwm_is_valid_channel(ch_id))
2421 		return 0xff;
2422 
2423 	if (1 <= ch_id && ch_id <= 14)
2424 		return 0;
2425 	if (36 <= ch_id && ch_id <= 64)
2426 		return 1;
2427 	if (100 <= ch_id && ch_id <= 140)
2428 		return 2;
2429 	return 3;
2430 }
2431 
2432 uint16_t
2433 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2434 {
2435 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2436 	struct iwm_phy_db_chg_txp *txp_chg;
2437 	int i;
2438 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2439 
2440 	if (ch_index == 0xff)
2441 		return 0xff;
2442 
2443 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2444 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2445 		if (!txp_chg)
2446 			return 0xff;
2447 		/*
2448 		 * Looking for the first channel group the max channel
2449 		 * of which is higher than the requested channel.
2450 		 */
2451 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2452 			return i;
2453 	}
2454 	return 0xff;
2455 }
2456 
2457 int
2458 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2459     uint16_t *size, uint16_t ch_id)
2460 {
2461 	struct iwm_phy_db_entry *entry;
2462 	uint16_t ch_group_id = 0;
2463 
2464 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2465 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2466 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2467 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2468 
2469 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2470 	if (!entry)
2471 		return EINVAL;
2472 
2473 	*data = entry->data;
2474 	*size = entry->size;
2475 
2476 	return 0;
2477 }
2478 
2479 int
2480 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2481     void *data)
2482 {
2483 	struct iwm_phy_db_cmd phy_db_cmd;
2484 	struct iwm_host_cmd cmd = {
2485 		.id = IWM_PHY_DB_CMD,
2486 		.flags = IWM_CMD_ASYNC,
2487 	};
2488 
2489 	phy_db_cmd.type = le16toh(type);
2490 	phy_db_cmd.length = le16toh(length);
2491 
2492 	cmd.data[0] = &phy_db_cmd;
2493 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2494 	cmd.data[1] = data;
2495 	cmd.len[1] = length;
2496 
2497 	return iwm_send_cmd(sc, &cmd);
2498 }
2499 
2500 int
2501 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2502     uint8_t max_ch_groups)
2503 {
2504 	uint16_t i;
2505 	int err;
2506 	struct iwm_phy_db_entry *entry;
2507 
2508 	for (i = 0; i < max_ch_groups; i++) {
2509 		entry = iwm_phy_db_get_section(sc, type, i);
2510 		if (!entry)
2511 			return EINVAL;
2512 
2513 		if (!entry->size)
2514 			continue;
2515 
2516 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2517 		if (err)
2518 			return err;
2519 
2520 		DELAY(1000);
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 int
2527 iwm_send_phy_db_data(struct iwm_softc *sc)
2528 {
2529 	uint8_t *data = NULL;
2530 	uint16_t size = 0;
2531 	int err;
2532 
2533 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2534 	if (err)
2535 		return err;
2536 
2537 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2538 	if (err)
2539 		return err;
2540 
2541 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2542 	    &data, &size, 0);
2543 	if (err)
2544 		return err;
2545 
2546 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2547 	if (err)
2548 		return err;
2549 
2550 	err = iwm_phy_db_send_all_channel_groups(sc,
2551 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2552 	if (err)
2553 		return err;
2554 
2555 	err = iwm_phy_db_send_all_channel_groups(sc,
2556 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2557 	if (err)
2558 		return err;
2559 
2560 	return 0;
2561 }
2562 
2563 /*
2564  * For the high priority TE use a time event type that has similar priority to
2565  * the FW's action scan priority.
2566  */
2567 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2568 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2569 
2570 int
2571 iwm_send_time_event_cmd(struct iwm_softc *sc,
2572     const struct iwm_time_event_cmd *cmd)
2573 {
2574 	struct iwm_rx_packet *pkt;
2575 	struct iwm_time_event_resp *resp;
2576 	struct iwm_host_cmd hcmd = {
2577 		.id = IWM_TIME_EVENT_CMD,
2578 		.flags = IWM_CMD_WANT_RESP,
2579 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2580 	};
2581 	uint32_t resp_len;
2582 	int err;
2583 
2584 	hcmd.data[0] = cmd;
2585 	hcmd.len[0] = sizeof(*cmd);
2586 	err = iwm_send_cmd(sc, &hcmd);
2587 	if (err)
2588 		return err;
2589 
2590 	pkt = hcmd.resp_pkt;
2591 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2592 		err = EIO;
2593 		goto out;
2594 	}
2595 
2596 	resp_len = iwm_rx_packet_payload_len(pkt);
2597 	if (resp_len != sizeof(*resp)) {
2598 		err = EIO;
2599 		goto out;
2600 	}
2601 
2602 	resp = (void *)pkt->data;
2603 	if (le32toh(resp->status) == 0)
2604 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2605 	else
2606 		err = EIO;
2607 out:
2608 	iwm_free_resp(sc, &hcmd);
2609 	return err;
2610 }
2611 
2612 void
2613 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2614     uint32_t duration, uint32_t max_delay)
2615 {
2616 	struct iwm_time_event_cmd time_cmd;
2617 
2618 	/* Do nothing if a time event is already scheduled. */
2619 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2620 		return;
2621 
2622 	memset(&time_cmd, 0, sizeof(time_cmd));
2623 
2624 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2625 	time_cmd.id_and_color =
2626 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2627 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2628 
2629 	time_cmd.apply_time = htole32(0);
2630 
2631 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2632 	time_cmd.max_delay = htole32(max_delay);
2633 	/* TODO: why do we need to interval = bi if it is not periodic? */
2634 	time_cmd.interval = htole32(1);
2635 	time_cmd.duration = htole32(duration);
2636 	time_cmd.repeat = 1;
2637 	time_cmd.policy
2638 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2639 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2640 		IWM_T2_V2_START_IMMEDIATELY);
2641 
2642 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2643 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2644 
2645 	DELAY(100);
2646 }
2647 
2648 void
2649 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2650 {
2651 	struct iwm_time_event_cmd time_cmd;
2652 
2653 	/* Do nothing if the time event has already ended. */
2654 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2655 		return;
2656 
2657 	memset(&time_cmd, 0, sizeof(time_cmd));
2658 
2659 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2660 	time_cmd.id_and_color =
2661 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2662 	time_cmd.id = htole32(sc->sc_time_event_uid);
2663 
2664 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2665 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2666 
2667 	DELAY(100);
2668 }
2669 
2670 /*
2671  * NVM read access and content parsing.  We do not support
2672  * external NVM or writing NVM.
2673  */
2674 
2675 /* list of NVM sections we are allowed/need to read */
2676 const int iwm_nvm_to_read[] = {
2677 	IWM_NVM_SECTION_TYPE_HW,
2678 	IWM_NVM_SECTION_TYPE_SW,
2679 	IWM_NVM_SECTION_TYPE_REGULATORY,
2680 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2681 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2682 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2683 	IWM_NVM_SECTION_TYPE_HW_8000,
2684 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2685 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2686 };
2687 
2688 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2689 
2690 #define IWM_NVM_WRITE_OPCODE 1
2691 #define IWM_NVM_READ_OPCODE 0
2692 
2693 int
2694 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2695     uint16_t length, uint8_t *data, uint16_t *len)
2696 {
2697 	offset = 0;
2698 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2699 		.offset = htole16(offset),
2700 		.length = htole16(length),
2701 		.type = htole16(section),
2702 		.op_code = IWM_NVM_READ_OPCODE,
2703 	};
2704 	struct iwm_nvm_access_resp *nvm_resp;
2705 	struct iwm_rx_packet *pkt;
2706 	struct iwm_host_cmd cmd = {
2707 		.id = IWM_NVM_ACCESS_CMD,
2708 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2709 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2710 		.data = { &nvm_access_cmd, },
2711 	};
2712 	int err, offset_read;
2713 	size_t bytes_read;
2714 	uint8_t *resp_data;
2715 
2716 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2717 
2718 	err = iwm_send_cmd(sc, &cmd);
2719 	if (err)
2720 		return err;
2721 
2722 	pkt = cmd.resp_pkt;
2723 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2724 		err = EIO;
2725 		goto exit;
2726 	}
2727 
2728 	/* Extract NVM response */
2729 	nvm_resp = (void *)pkt->data;
2730 	if (nvm_resp == NULL)
2731 		return EIO;
2732 
2733 	err = le16toh(nvm_resp->status);
2734 	bytes_read = le16toh(nvm_resp->length);
2735 	offset_read = le16toh(nvm_resp->offset);
2736 	resp_data = nvm_resp->data;
2737 	if (err) {
2738 		err = EINVAL;
2739 		goto exit;
2740 	}
2741 
2742 	if (offset_read != offset) {
2743 		err = EINVAL;
2744 		goto exit;
2745 	}
2746 
2747 	if (bytes_read > length) {
2748 		err = EINVAL;
2749 		goto exit;
2750 	}
2751 
2752 	memcpy(data + offset, resp_data, bytes_read);
2753 	*len = bytes_read;
2754 
2755  exit:
2756 	iwm_free_resp(sc, &cmd);
2757 	return err;
2758 }
2759 
2760 /*
2761  * Reads an NVM section completely.
2762  * NICs prior to 7000 family doesn't have a real NVM, but just read
2763  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2764  * by uCode, we need to manually check in this case that we don't
2765  * overflow and try to read more than the EEPROM size.
2766  */
2767 int
2768 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2769     uint16_t *len, size_t max_len)
2770 {
2771 	uint16_t chunklen, seglen;
2772 	int err = 0;
2773 
2774 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2775 	*len = 0;
2776 
2777 	/* Read NVM chunks until exhausted (reading less than requested) */
2778 	while (seglen == chunklen && *len < max_len) {
2779 		err = iwm_nvm_read_chunk(sc,
2780 		    section, *len, chunklen, data, &seglen);
2781 		if (err)
2782 			return err;
2783 
2784 		*len += seglen;
2785 	}
2786 
2787 	return err;
2788 }
2789 
2790 uint8_t
2791 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2792 {
2793 	uint8_t tx_ant;
2794 
2795 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2796 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2797 
2798 	if (sc->sc_nvm.valid_tx_ant)
2799 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2800 
2801 	return tx_ant;
2802 }
2803 
2804 uint8_t
2805 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2806 {
2807 	uint8_t rx_ant;
2808 
2809 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2810 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2811 
2812 	if (sc->sc_nvm.valid_rx_ant)
2813 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2814 
2815 	return rx_ant;
2816 }
2817 
2818 void
2819 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2820     const uint8_t *nvm_channels, int nchan)
2821 {
2822 	struct ieee80211com *ic = &sc->sc_ic;
2823 	struct iwm_nvm_data *data = &sc->sc_nvm;
2824 	int ch_idx;
2825 	struct ieee80211_channel *channel;
2826 	uint16_t ch_flags;
2827 	int is_5ghz;
2828 	int flags, hw_value;
2829 
2830 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2831 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2832 
2833 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2834 		    !data->sku_cap_band_52GHz_enable)
2835 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2836 
2837 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2838 			continue;
2839 
2840 		hw_value = nvm_channels[ch_idx];
2841 		channel = &ic->ic_channels[hw_value];
2842 
2843 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2844 		if (!is_5ghz) {
2845 			flags = IEEE80211_CHAN_2GHZ;
2846 			channel->ic_flags
2847 			    = IEEE80211_CHAN_CCK
2848 			    | IEEE80211_CHAN_OFDM
2849 			    | IEEE80211_CHAN_DYN
2850 			    | IEEE80211_CHAN_2GHZ;
2851 		} else {
2852 			flags = IEEE80211_CHAN_5GHZ;
2853 			channel->ic_flags =
2854 			    IEEE80211_CHAN_A;
2855 		}
2856 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2857 
2858 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2859 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2860 
2861 		if (data->sku_cap_11n_enable)
2862 			channel->ic_flags |= IEEE80211_CHAN_HT;
2863 	}
2864 }
2865 
2866 void
2867 iwm_setup_ht_rates(struct iwm_softc *sc)
2868 {
2869 	struct ieee80211com *ic = &sc->sc_ic;
2870 	uint8_t rx_ant;
2871 
2872 	/* TX is supported with the same MCS as RX. */
2873 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2874 
2875 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2876 
2877 	if (sc->sc_nvm.sku_cap_mimo_disable)
2878 		return;
2879 
2880 	rx_ant = iwm_fw_valid_rx_ant(sc);
2881 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2882 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2883 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2884 }
2885 
2886 #define IWM_MAX_RX_BA_SESSIONS 16
2887 
2888 void
2889 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2890     uint16_t ssn, uint16_t winsize, int start)
2891 {
2892 	struct ieee80211com *ic = &sc->sc_ic;
2893 	struct iwm_add_sta_cmd cmd;
2894 	struct iwm_node *in = (void *)ni;
2895 	int err, s;
2896 	uint32_t status;
2897 	size_t cmdsize;
2898 
2899 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2900 		ieee80211_addba_req_refuse(ic, ni, tid);
2901 		return;
2902 	}
2903 
2904 	memset(&cmd, 0, sizeof(cmd));
2905 
2906 	cmd.sta_id = IWM_STATION_ID;
2907 	cmd.mac_id_n_color
2908 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2909 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2910 
2911 	if (start) {
2912 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2913 		cmd.add_immediate_ba_ssn = ssn;
2914 		cmd.rx_ba_window = winsize;
2915 	} else {
2916 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2917 	}
2918 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2919 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2920 
2921 	status = IWM_ADD_STA_SUCCESS;
2922 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
2923 		cmdsize = sizeof(cmd);
2924 	else
2925 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
2926 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
2927 	    &status);
2928 
2929 	s = splnet();
2930 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) == IWM_ADD_STA_SUCCESS) {
2931 		if (start) {
2932 			sc->sc_rx_ba_sessions++;
2933 			ieee80211_addba_req_accept(ic, ni, tid);
2934 		} else if (sc->sc_rx_ba_sessions > 0)
2935 			sc->sc_rx_ba_sessions--;
2936 	} else if (start)
2937 		ieee80211_addba_req_refuse(ic, ni, tid);
2938 
2939 	splx(s);
2940 }
2941 
2942 void
2943 iwm_htprot_task(void *arg)
2944 {
2945 	struct iwm_softc *sc = arg;
2946 	struct ieee80211com *ic = &sc->sc_ic;
2947 	struct iwm_node *in = (void *)ic->ic_bss;
2948 	int err, s = splnet();
2949 
2950 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2951 		refcnt_rele_wake(&sc->task_refs);
2952 		splx(s);
2953 		return;
2954 	}
2955 
2956 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2957 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2958 	if (err)
2959 		printf("%s: could not change HT protection: error %d\n",
2960 		    DEVNAME(sc), err);
2961 
2962 	refcnt_rele_wake(&sc->task_refs);
2963 	splx(s);
2964 }
2965 
2966 /*
2967  * This function is called by upper layer when HT protection settings in
2968  * beacons have changed.
2969  */
2970 void
2971 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2972 {
2973 	struct iwm_softc *sc = ic->ic_softc;
2974 
2975 	/* assumes that ni == ic->ic_bss */
2976 	iwm_add_task(sc, systq, &sc->htprot_task);
2977 }
2978 
2979 void
2980 iwm_ba_task(void *arg)
2981 {
2982 	struct iwm_softc *sc = arg;
2983 	struct ieee80211com *ic = &sc->sc_ic;
2984 	struct ieee80211_node *ni = ic->ic_bss;
2985 	int s = splnet();
2986 
2987 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2988 		refcnt_rele_wake(&sc->task_refs);
2989 		splx(s);
2990 		return;
2991 	}
2992 
2993 	if (sc->ba_start)
2994 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
2995 		    sc->ba_winsize, 1);
2996 	else
2997 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
2998 
2999 	refcnt_rele_wake(&sc->task_refs);
3000 	splx(s);
3001 }
3002 
3003 /*
3004  * This function is called by upper layer when an ADDBA request is received
3005  * from another STA and before the ADDBA response is sent.
3006  */
3007 int
3008 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3009     uint8_t tid)
3010 {
3011 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3012 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3013 
3014 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
3015 		return ENOSPC;
3016 
3017 	sc->ba_start = 1;
3018 	sc->ba_tid = tid;
3019 	sc->ba_ssn = htole16(ba->ba_winstart);
3020 	sc->ba_winsize = htole16(ba->ba_winsize);
3021 	iwm_add_task(sc, systq, &sc->ba_task);
3022 
3023 	return EBUSY;
3024 }
3025 
3026 /*
3027  * This function is called by upper layer on teardown of an HT-immediate
3028  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3029  */
3030 void
3031 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3032     uint8_t tid)
3033 {
3034 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3035 
3036 	sc->ba_start = 0;
3037 	sc->ba_tid = tid;
3038 	iwm_add_task(sc, systq, &sc->ba_task);
3039 }
3040 
3041 void
3042 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3043     const uint16_t *mac_override, const uint16_t *nvm_hw)
3044 {
3045 	const uint8_t *hw_addr;
3046 
3047 	if (mac_override) {
3048 		static const uint8_t reserved_mac[] = {
3049 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3050 		};
3051 
3052 		hw_addr = (const uint8_t *)(mac_override +
3053 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3054 
3055 		/*
3056 		 * Store the MAC address from MAO section.
3057 		 * No byte swapping is required in MAO section
3058 		 */
3059 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3060 
3061 		/*
3062 		 * Force the use of the OTP MAC address in case of reserved MAC
3063 		 * address in the NVM, or if address is given but invalid.
3064 		 */
3065 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3066 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3067 		    sizeof(etherbroadcastaddr)) != 0) &&
3068 		    (memcmp(etheranyaddr, data->hw_addr,
3069 		    sizeof(etheranyaddr)) != 0) &&
3070 		    !ETHER_IS_MULTICAST(data->hw_addr))
3071 			return;
3072 	}
3073 
3074 	if (nvm_hw) {
3075 		/* Read the mac address from WFMP registers. */
3076 		uint32_t mac_addr0, mac_addr1;
3077 
3078 		if (!iwm_nic_lock(sc))
3079 			goto out;
3080 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3081 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3082 		iwm_nic_unlock(sc);
3083 
3084 		hw_addr = (const uint8_t *)&mac_addr0;
3085 		data->hw_addr[0] = hw_addr[3];
3086 		data->hw_addr[1] = hw_addr[2];
3087 		data->hw_addr[2] = hw_addr[1];
3088 		data->hw_addr[3] = hw_addr[0];
3089 
3090 		hw_addr = (const uint8_t *)&mac_addr1;
3091 		data->hw_addr[4] = hw_addr[1];
3092 		data->hw_addr[5] = hw_addr[0];
3093 
3094 		return;
3095 	}
3096 out:
3097 	printf("%s: mac address not found\n", DEVNAME(sc));
3098 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3099 }
3100 
3101 int
3102 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3103     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3104     const uint16_t *mac_override, const uint16_t *phy_sku,
3105     const uint16_t *regulatory, int n_regulatory)
3106 {
3107 	struct iwm_nvm_data *data = &sc->sc_nvm;
3108 	uint8_t hw_addr[ETHER_ADDR_LEN];
3109 	uint32_t sku;
3110 	uint16_t lar_config;
3111 
3112 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3113 
3114 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3115 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3116 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3117 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3118 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3119 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3120 
3121 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3122 	} else {
3123 		uint32_t radio_cfg =
3124 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3125 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3126 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3127 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3128 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3129 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3130 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3131 
3132 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3133 	}
3134 
3135 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3136 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3137 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3138 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3139 
3140 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3141 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3142 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3143 				       IWM_NVM_LAR_OFFSET_8000;
3144 
3145 		lar_config = le16_to_cpup(regulatory + lar_offset);
3146 		data->lar_enabled = !!(lar_config &
3147 				       IWM_NVM_LAR_ENABLED_8000);
3148 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3149 	} else
3150 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3151 
3152 
3153 	/* The byte order is little endian 16 bit, meaning 214365 */
3154 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3155 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3156 		data->hw_addr[0] = hw_addr[1];
3157 		data->hw_addr[1] = hw_addr[0];
3158 		data->hw_addr[2] = hw_addr[3];
3159 		data->hw_addr[3] = hw_addr[2];
3160 		data->hw_addr[4] = hw_addr[5];
3161 		data->hw_addr[5] = hw_addr[4];
3162 	} else
3163 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3164 
3165 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3166 		if (sc->nvm_type == IWM_NVM_SDP) {
3167 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3168 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3169 		} else {
3170 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3171 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3172 		}
3173 	} else
3174 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3175 		    iwm_nvm_channels_8000,
3176 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3177 
3178 	data->calib_version = 255;   /* TODO:
3179 					this value will prevent some checks from
3180 					failing, we need to check if this
3181 					field is still needed, and if it does,
3182 					where is it in the NVM */
3183 
3184 	return 0;
3185 }
3186 
3187 int
3188 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3189 {
3190 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3191 	const uint16_t *regulatory = NULL;
3192 	int n_regulatory = 0;
3193 
3194 	/* Checking for required sections */
3195 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3196 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3197 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3198 			return ENOENT;
3199 		}
3200 
3201 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3202 
3203 		if (sc->nvm_type == IWM_NVM_SDP) {
3204 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3205 				return ENOENT;
3206 			regulatory = (const uint16_t *)
3207 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3208 			n_regulatory =
3209 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3210 		}
3211 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3212 		/* SW and REGULATORY sections are mandatory */
3213 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3214 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3215 			return ENOENT;
3216 		}
3217 		/* MAC_OVERRIDE or at least HW section must exist */
3218 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3219 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3220 			return ENOENT;
3221 		}
3222 
3223 		/* PHY_SKU section is mandatory in B0 */
3224 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3225 			return ENOENT;
3226 		}
3227 
3228 		regulatory = (const uint16_t *)
3229 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3230 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3231 		hw = (const uint16_t *)
3232 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3233 		mac_override =
3234 			(const uint16_t *)
3235 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3236 		phy_sku = (const uint16_t *)
3237 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3238 	} else {
3239 		panic("unknown device family %d\n", sc->sc_device_family);
3240 	}
3241 
3242 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3243 	calib = (const uint16_t *)
3244 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3245 
3246 	/* XXX should pass in the length of every section */
3247 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3248 	    phy_sku, regulatory, n_regulatory);
3249 }
3250 
3251 int
3252 iwm_nvm_init(struct iwm_softc *sc)
3253 {
3254 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3255 	int i, section, err;
3256 	uint16_t len;
3257 	uint8_t *buf;
3258 	const size_t bufsz = sc->sc_nvm_max_section_size;
3259 
3260 	memset(nvm_sections, 0, sizeof(nvm_sections));
3261 
3262 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3263 	if (buf == NULL)
3264 		return ENOMEM;
3265 
3266 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
3267 		section = iwm_nvm_to_read[i];
3268 		KASSERT(section <= nitems(nvm_sections));
3269 
3270 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3271 		if (err) {
3272 			err = 0;
3273 			continue;
3274 		}
3275 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3276 		if (nvm_sections[section].data == NULL) {
3277 			err = ENOMEM;
3278 			break;
3279 		}
3280 		memcpy(nvm_sections[section].data, buf, len);
3281 		nvm_sections[section].length = len;
3282 	}
3283 	free(buf, M_DEVBUF, bufsz);
3284 	if (err == 0)
3285 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3286 
3287 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3288 		if (nvm_sections[i].data != NULL)
3289 			free(nvm_sections[i].data, M_DEVBUF,
3290 			    nvm_sections[i].length);
3291 	}
3292 
3293 	return err;
3294 }
3295 
3296 int
3297 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3298     const uint8_t *section, uint32_t byte_cnt)
3299 {
3300 	int err = EINVAL;
3301 	uint32_t chunk_sz, offset;
3302 
3303 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3304 
3305 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3306 		uint32_t addr, len;
3307 		const uint8_t *data;
3308 
3309 		addr = dst_addr + offset;
3310 		len = MIN(chunk_sz, byte_cnt - offset);
3311 		data = section + offset;
3312 
3313 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3314 		if (err)
3315 			break;
3316 	}
3317 
3318 	return err;
3319 }
3320 
3321 int
3322 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3323     const uint8_t *chunk, uint32_t byte_cnt)
3324 {
3325 	struct iwm_dma_info *dma = &sc->fw_dma;
3326 	int err;
3327 
3328 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3329 	memcpy(dma->vaddr, chunk, byte_cnt);
3330 	bus_dmamap_sync(sc->sc_dmat,
3331 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
3332 
3333 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3334 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
3335 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3336 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3337 
3338 	sc->sc_fw_chunk_done = 0;
3339 
3340 	if (!iwm_nic_lock(sc))
3341 		return EBUSY;
3342 
3343 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3344 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3345 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3346 	    dst_addr);
3347 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3348 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3349 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3350 	    (iwm_get_dma_hi_addr(dma->paddr)
3351 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3352 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3353 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3354 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3355 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3356 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3357 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3358 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3359 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3360 
3361 	iwm_nic_unlock(sc);
3362 
3363 	/* Wait for this segment to load. */
3364 	err = 0;
3365 	while (!sc->sc_fw_chunk_done) {
3366 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
3367 		if (err)
3368 			break;
3369 	}
3370 
3371 	if (!sc->sc_fw_chunk_done)
3372 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
3373 		    DEVNAME(sc), dst_addr, byte_cnt);
3374 
3375 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3376 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
3377 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3378 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3379 	}
3380 
3381 	return err;
3382 }
3383 
3384 int
3385 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3386 {
3387 	struct iwm_fw_sects *fws;
3388 	int err, i;
3389 	void *data;
3390 	uint32_t dlen;
3391 	uint32_t offset;
3392 
3393 	fws = &sc->sc_fw.fw_sects[ucode_type];
3394 	for (i = 0; i < fws->fw_count; i++) {
3395 		data = fws->fw_sect[i].fws_data;
3396 		dlen = fws->fw_sect[i].fws_len;
3397 		offset = fws->fw_sect[i].fws_devoff;
3398 		if (dlen > sc->sc_fwdmasegsz) {
3399 			err = EFBIG;
3400 		} else
3401 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3402 		if (err) {
3403 			printf("%s: could not load firmware chunk %u of %u\n",
3404 			    DEVNAME(sc), i, fws->fw_count);
3405 			return err;
3406 		}
3407 	}
3408 
3409 	iwm_enable_interrupts(sc);
3410 
3411 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3412 
3413 	return 0;
3414 }
3415 
3416 int
3417 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3418     int cpu, int *first_ucode_section)
3419 {
3420 	int shift_param;
3421 	int i, err = 0, sec_num = 0x1;
3422 	uint32_t val, last_read_idx = 0;
3423 	void *data;
3424 	uint32_t dlen;
3425 	uint32_t offset;
3426 
3427 	if (cpu == 1) {
3428 		shift_param = 0;
3429 		*first_ucode_section = 0;
3430 	} else {
3431 		shift_param = 16;
3432 		(*first_ucode_section)++;
3433 	}
3434 
3435 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3436 		last_read_idx = i;
3437 		data = fws->fw_sect[i].fws_data;
3438 		dlen = fws->fw_sect[i].fws_len;
3439 		offset = fws->fw_sect[i].fws_devoff;
3440 
3441 		/*
3442 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3443 		 * CPU1 to CPU2.
3444 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3445 		 * CPU2 non paged to CPU2 paging sec.
3446 		 */
3447 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3448 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3449 			break;
3450 
3451 		if (dlen > sc->sc_fwdmasegsz) {
3452 			err = EFBIG;
3453 		} else
3454 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3455 		if (err) {
3456 			printf("%s: could not load firmware chunk %d "
3457 			    "(error %d)\n", DEVNAME(sc), i, err);
3458 			return err;
3459 		}
3460 
3461 		/* Notify the ucode of the loaded section number and status */
3462 		if (iwm_nic_lock(sc)) {
3463 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3464 			val = val | (sec_num << shift_param);
3465 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3466 			sec_num = (sec_num << 1) | 0x1;
3467 			iwm_nic_unlock(sc);
3468 		} else {
3469 			err = EBUSY;
3470 			printf("%s: could not load firmware chunk %d "
3471 			    "(error %d)\n", DEVNAME(sc), i, err);
3472 			return err;
3473 		}
3474 	}
3475 
3476 	*first_ucode_section = last_read_idx;
3477 
3478 	if (iwm_nic_lock(sc)) {
3479 		if (cpu == 1)
3480 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3481 		else
3482 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3483 		iwm_nic_unlock(sc);
3484 	} else {
3485 		err = EBUSY;
3486 		printf("%s: could not finalize firmware loading (error %d)\n",
3487 		    DEVNAME(sc), err);
3488 		return err;
3489 	}
3490 
3491 	return 0;
3492 }
3493 
3494 int
3495 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3496 {
3497 	struct iwm_fw_sects *fws;
3498 	int err = 0;
3499 	int first_ucode_section;
3500 
3501 	fws = &sc->sc_fw.fw_sects[ucode_type];
3502 
3503 	/* configure the ucode to be ready to get the secured image */
3504 	/* release CPU reset */
3505 	if (iwm_nic_lock(sc)) {
3506 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3507 		    IWM_RELEASE_CPU_RESET_BIT);
3508 		iwm_nic_unlock(sc);
3509 	}
3510 
3511 	/* load to FW the binary Secured sections of CPU1 */
3512 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3513 	if (err)
3514 		return err;
3515 
3516 	/* load to FW the binary sections of CPU2 */
3517 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3518 	if (err)
3519 		return err;
3520 
3521 	iwm_enable_interrupts(sc);
3522 	return 0;
3523 }
3524 
3525 int
3526 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3527 {
3528 	int err, w;
3529 
3530 	sc->sc_uc.uc_intr = 0;
3531 
3532 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
3533 		err = iwm_load_firmware_8000(sc, ucode_type);
3534 	else
3535 		err = iwm_load_firmware_7000(sc, ucode_type);
3536 
3537 	if (err)
3538 		return err;
3539 
3540 	/* wait for the firmware to load */
3541 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3542 		err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", MSEC_TO_NSEC(100));
3543 	}
3544 	if (err || !sc->sc_uc.uc_ok)
3545 		printf("%s: could not load firmware\n", DEVNAME(sc));
3546 
3547 	return err;
3548 }
3549 
3550 int
3551 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3552 {
3553 	int err;
3554 
3555 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3556 
3557 	err = iwm_nic_init(sc);
3558 	if (err) {
3559 		printf("%s: unable to init nic\n", DEVNAME(sc));
3560 		return err;
3561 	}
3562 
3563 	/* make sure rfkill handshake bits are cleared */
3564 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3565 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3566 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3567 
3568 	/* clear (again), then enable firwmare load interrupt */
3569 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3570 	iwm_enable_fwload_interrupt(sc);
3571 
3572 	/* really make sure rfkill handshake bits are cleared */
3573 	/* maybe we should write a few times more?  just to make sure */
3574 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3575 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3576 
3577 	return iwm_load_firmware(sc, ucode_type);
3578 }
3579 
3580 int
3581 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3582 {
3583 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3584 		.valid = htole32(valid_tx_ant),
3585 	};
3586 
3587 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3588 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3589 }
3590 
3591 int
3592 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3593 {
3594 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3595 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3596 
3597 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3598 	phy_cfg_cmd.calib_control.event_trigger =
3599 	    sc->sc_default_calib[ucode_type].event_trigger;
3600 	phy_cfg_cmd.calib_control.flow_trigger =
3601 	    sc->sc_default_calib[ucode_type].flow_trigger;
3602 
3603 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3604 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3605 }
3606 
3607 int
3608 iwm_send_dqa_cmd(struct iwm_softc *sc)
3609 {
3610 	struct iwm_dqa_enable_cmd dqa_cmd = {
3611 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
3612 	};
3613 	uint32_t cmd_id;
3614 
3615 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
3616 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3617 }
3618 
3619 int
3620 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3621 	enum iwm_ucode_type ucode_type)
3622 {
3623 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3624 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
3625 	int err;
3626 
3627 	err = iwm_read_firmware(sc, ucode_type);
3628 	if (err)
3629 		return err;
3630 
3631 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3632 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
3633 	else
3634 		sc->cmdqid = IWM_CMD_QUEUE;
3635 
3636 	sc->sc_uc_current = ucode_type;
3637 	err = iwm_start_fw(sc, ucode_type);
3638 	if (err) {
3639 		sc->sc_uc_current = old_type;
3640 		return err;
3641 	}
3642 
3643 	err = iwm_post_alive(sc);
3644 	if (err)
3645 		return err;
3646 
3647 	/*
3648 	 * configure and operate fw paging mechanism.
3649 	 * driver configures the paging flow only once, CPU2 paging image
3650 	 * included in the IWM_UCODE_INIT image.
3651 	 */
3652 	if (fw->paging_mem_size) {
3653 		err = iwm_save_fw_paging(sc, fw);
3654 		if (err) {
3655 			printf("%s: failed to save the FW paging image\n",
3656 			    DEVNAME(sc));
3657 			return err;
3658 		}
3659 
3660 		err = iwm_send_paging_cmd(sc, fw);
3661 		if (err) {
3662 			printf("%s: failed to send the paging cmd\n",
3663 			    DEVNAME(sc));
3664 			iwm_free_fw_paging(sc);
3665 			return err;
3666 		}
3667 	}
3668 
3669 	return 0;
3670 }
3671 
3672 int
3673 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3674 {
3675 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
3676 	int err;
3677 
3678 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3679 		printf("%s: radio is disabled by hardware switch\n",
3680 		    DEVNAME(sc));
3681 		return EPERM;
3682 	}
3683 
3684 	sc->sc_init_complete = 0;
3685 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3686 	if (err) {
3687 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3688 		return err;
3689 	}
3690 
3691 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
3692 		err = iwm_send_bt_init_conf(sc);
3693 		if (err) {
3694 			printf("%s: could not init bt coex (error %d)\n",
3695 			    DEVNAME(sc), err);
3696 			return err;
3697 		}
3698 	}
3699 
3700 	if (justnvm) {
3701 		err = iwm_nvm_init(sc);
3702 		if (err) {
3703 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3704 			return err;
3705 		}
3706 
3707 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3708 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3709 			    sc->sc_nvm.hw_addr);
3710 
3711 		return 0;
3712 	}
3713 
3714 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3715 	if (err)
3716 		return err;
3717 
3718 	/* Send TX valid antennas before triggering calibrations */
3719 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3720 	if (err)
3721 		return err;
3722 
3723 	/*
3724 	 * Send phy configurations command to init uCode
3725 	 * to start the 16.0 uCode init image internal calibrations.
3726 	 */
3727 	err = iwm_send_phy_cfg_cmd(sc);
3728 	if (err)
3729 		return err;
3730 
3731 	/*
3732 	 * Nothing to do but wait for the init complete and phy DB
3733 	 * notifications from the firmware.
3734 	 */
3735 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3736 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
3737 		    SEC_TO_NSEC(2));
3738 		if (err)
3739 			break;
3740 	}
3741 
3742 	return err;
3743 }
3744 
3745 int
3746 iwm_config_ltr(struct iwm_softc *sc)
3747 {
3748 	struct iwm_ltr_config_cmd cmd = {
3749 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3750 	};
3751 
3752 	if (!sc->sc_ltr_enabled)
3753 		return 0;
3754 
3755 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3756 }
3757 
3758 int
3759 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3760 {
3761 	struct iwm_rx_ring *ring = &sc->rxq;
3762 	struct iwm_rx_data *data = &ring->data[idx];
3763 	struct mbuf *m;
3764 	int err;
3765 	int fatal = 0;
3766 
3767 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3768 	if (m == NULL)
3769 		return ENOBUFS;
3770 
3771 	if (size <= MCLBYTES) {
3772 		MCLGET(m, M_DONTWAIT);
3773 	} else {
3774 		MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
3775 	}
3776 	if ((m->m_flags & M_EXT) == 0) {
3777 		m_freem(m);
3778 		return ENOBUFS;
3779 	}
3780 
3781 	if (data->m != NULL) {
3782 		bus_dmamap_unload(sc->sc_dmat, data->map);
3783 		fatal = 1;
3784 	}
3785 
3786 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3787 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3788 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3789 	if (err) {
3790 		/* XXX */
3791 		if (fatal)
3792 			panic("iwm: could not load RX mbuf");
3793 		m_freem(m);
3794 		return err;
3795 	}
3796 	data->m = m;
3797 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3798 
3799 	/* Update RX descriptor. */
3800 	if (sc->sc_mqrx_supported) {
3801 		((uint64_t *)ring->desc)[idx] =
3802 		    htole64(data->map->dm_segs[0].ds_addr);
3803 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3804 		    idx * sizeof(uint64_t), sizeof(uint64_t),
3805 		    BUS_DMASYNC_PREWRITE);
3806 	} else {
3807 		((uint32_t *)ring->desc)[idx] =
3808 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
3809 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3810 		    idx * sizeof(uint32_t), sizeof(uint32_t),
3811 		    BUS_DMASYNC_PREWRITE);
3812 	}
3813 
3814 	return 0;
3815 }
3816 
3817 /*
3818  * RSSI values are reported by the FW as positive values - need to negate
3819  * to obtain their dBM.  Account for missing antennas by replacing 0
3820  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3821  */
3822 int
3823 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3824 {
3825 	int energy_a, energy_b, energy_c, max_energy;
3826 	uint32_t val;
3827 
3828 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3829 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3830 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3831 	energy_a = energy_a ? -energy_a : -256;
3832 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3833 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3834 	energy_b = energy_b ? -energy_b : -256;
3835 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3836 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3837 	energy_c = energy_c ? -energy_c : -256;
3838 	max_energy = MAX(energy_a, energy_b);
3839 	max_energy = MAX(max_energy, energy_c);
3840 
3841 	return max_energy;
3842 }
3843 
3844 int
3845 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3846     struct iwm_rx_mpdu_desc *desc)
3847 {
3848 	int energy_a, energy_b;
3849 
3850 	energy_a = desc->v1.energy_a;
3851 	energy_b = desc->v1.energy_b;
3852 	energy_a = energy_a ? -energy_a : -256;
3853 	energy_b = energy_b ? -energy_b : -256;
3854 	return MAX(energy_a, energy_b);
3855 }
3856 
3857 void
3858 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3859     struct iwm_rx_data *data)
3860 {
3861 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3862 
3863 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3864 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3865 
3866 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3867 }
3868 
3869 /*
3870  * Retrieve the average noise (in dBm) among receivers.
3871  */
3872 int
3873 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3874 {
3875 	int i, total, nbant, noise;
3876 
3877 	total = nbant = noise = 0;
3878 	for (i = 0; i < 3; i++) {
3879 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3880 		if (noise) {
3881 			total += noise;
3882 			nbant++;
3883 		}
3884 	}
3885 
3886 	/* There should be at least one antenna but check anyway. */
3887 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3888 }
3889 
3890 void
3891 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
3892      int is_shortpre, int rate_n_flags, uint32_t device_timestamp,
3893      struct ieee80211_rxinfo *rxi, struct mbuf_list *ml)
3894 {
3895 	struct ieee80211com *ic = &sc->sc_ic;
3896 	struct ieee80211_frame *wh;
3897 	struct ieee80211_node *ni;
3898 	struct ieee80211_channel *bss_chan;
3899 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3900 
3901 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3902 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3903 
3904 	wh = mtod(m, struct ieee80211_frame *);
3905 	ni = ieee80211_find_rxnode(ic, wh);
3906 	if (ni == ic->ic_bss) {
3907 		/*
3908 		 * We may switch ic_bss's channel during scans.
3909 		 * Record the current channel so we can restore it later.
3910 		 */
3911 		bss_chan = ni->ni_chan;
3912 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3913 	}
3914 	ni->ni_chan = &ic->ic_channels[chanidx];
3915 
3916 #if NBPFILTER > 0
3917 	if (sc->sc_drvbpf != NULL) {
3918 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3919 		uint16_t chan_flags;
3920 
3921 		tap->wr_flags = 0;
3922 		if (is_shortpre)
3923 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3924 		tap->wr_chan_freq =
3925 		    htole16(ic->ic_channels[chanidx].ic_freq);
3926 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3927 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3928 			chan_flags &= ~IEEE80211_CHAN_HT;
3929 		tap->wr_chan_flags = htole16(chan_flags);
3930 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3931 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3932 		tap->wr_tsft = device_timestamp;
3933 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
3934 			uint8_t mcs = (rate_n_flags &
3935 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
3936 			    IWM_RATE_HT_MCS_NSS_MSK));
3937 			tap->wr_rate = (0x80 | mcs);
3938 		} else {
3939 			uint8_t rate = (rate_n_flags &
3940 			    IWM_RATE_LEGACY_RATE_MSK);
3941 			switch (rate) {
3942 			/* CCK rates. */
3943 			case  10: tap->wr_rate =   2; break;
3944 			case  20: tap->wr_rate =   4; break;
3945 			case  55: tap->wr_rate =  11; break;
3946 			case 110: tap->wr_rate =  22; break;
3947 			/* OFDM rates. */
3948 			case 0xd: tap->wr_rate =  12; break;
3949 			case 0xf: tap->wr_rate =  18; break;
3950 			case 0x5: tap->wr_rate =  24; break;
3951 			case 0x7: tap->wr_rate =  36; break;
3952 			case 0x9: tap->wr_rate =  48; break;
3953 			case 0xb: tap->wr_rate =  72; break;
3954 			case 0x1: tap->wr_rate =  96; break;
3955 			case 0x3: tap->wr_rate = 108; break;
3956 			/* Unknown rate: should not happen. */
3957 			default:  tap->wr_rate =   0;
3958 			}
3959 		}
3960 
3961 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
3962 		    m, BPF_DIRECTION_IN);
3963 	}
3964 #endif
3965 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
3966 	/*
3967 	 * ieee80211_inputm() might have changed our BSS.
3968 	 * Restore ic_bss's channel if we are still in the same BSS.
3969 	 */
3970 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3971 		ni->ni_chan = bss_chan;
3972 	ieee80211_release_node(ic, ni);
3973 }
3974 
3975 void
3976 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
3977     size_t maxlen, struct mbuf_list *ml)
3978 {
3979 	struct ieee80211com *ic = &sc->sc_ic;
3980 	struct ieee80211_rxinfo rxi;
3981 	struct iwm_rx_phy_info *phy_info;
3982 	struct iwm_rx_mpdu_res_start *rx_res;
3983 	int device_timestamp;
3984 	uint16_t phy_flags;
3985 	uint32_t len;
3986 	uint32_t rx_pkt_status;
3987 	int rssi, chanidx, rate_n_flags;
3988 
3989 	phy_info = &sc->sc_last_phy_info;
3990 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
3991 	len = le16toh(rx_res->byte_count);
3992 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
3993 		/* Allow control frames in monitor mode. */
3994 		if (len < sizeof(struct ieee80211_frame_cts)) {
3995 			ic->ic_stats.is_rx_tooshort++;
3996 			IC2IFP(ic)->if_ierrors++;
3997 			m_freem(m);
3998 			return;
3999 		}
4000 	} else if (len < sizeof(struct ieee80211_frame)) {
4001 		ic->ic_stats.is_rx_tooshort++;
4002 		IC2IFP(ic)->if_ierrors++;
4003 		m_freem(m);
4004 		return;
4005 	}
4006 	if (len > maxlen - sizeof(*rx_res)) {
4007 		IC2IFP(ic)->if_ierrors++;
4008 		m_freem(m);
4009 		return;
4010 	}
4011 
4012 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4013 		m_freem(m);
4014 		return;
4015 	}
4016 
4017 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4018 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4019 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4020 		m_freem(m);
4021 		return; /* drop */
4022 	}
4023 
4024 	m->m_data = pktdata + sizeof(*rx_res);
4025 	m->m_pkthdr.len = m->m_len = len;
4026 
4027 	chanidx = letoh32(phy_info->channel);
4028 	device_timestamp = le32toh(phy_info->system_timestamp);
4029 	phy_flags = letoh16(phy_info->phy_flags);
4030 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4031 
4032 	rssi = iwm_get_signal_strength(sc, phy_info);
4033 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4034 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4035 
4036 	memset(&rxi, 0, sizeof(rxi));
4037 	rxi.rxi_rssi = rssi;
4038 	rxi.rxi_tstamp = device_timestamp;
4039 
4040 	iwm_rx_frame(sc, m, chanidx,
4041 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4042 	    rate_n_flags, device_timestamp, &rxi, ml);
4043 }
4044 
4045 void
4046 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4047     size_t maxlen, struct mbuf_list *ml)
4048 {
4049 	struct ieee80211com *ic = &sc->sc_ic;
4050 	struct ieee80211_rxinfo rxi;
4051 	struct iwm_rx_mpdu_desc *desc;
4052 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4053 	int rssi;
4054 	uint8_t chanidx;
4055 	uint16_t phy_info;
4056 
4057 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
4058 
4059 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
4060 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4061 		m_freem(m);
4062 		return; /* drop */
4063 	}
4064 
4065 	len = le16toh(desc->mpdu_len);
4066 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4067 		/* Allow control frames in monitor mode. */
4068 		if (len < sizeof(struct ieee80211_frame_cts)) {
4069 			ic->ic_stats.is_rx_tooshort++;
4070 			IC2IFP(ic)->if_ierrors++;
4071 			m_freem(m);
4072 			return;
4073 		}
4074 	} else if (len < sizeof(struct ieee80211_frame)) {
4075 		ic->ic_stats.is_rx_tooshort++;
4076 		IC2IFP(ic)->if_ierrors++;
4077 		m_freem(m);
4078 		return;
4079 	}
4080 	if (len > maxlen - sizeof(*desc)) {
4081 		IC2IFP(ic)->if_ierrors++;
4082 		m_freem(m);
4083 		return;
4084 	}
4085 
4086 	m->m_data = pktdata + sizeof(*desc);
4087 	m->m_pkthdr.len = m->m_len = len;
4088 
4089 	/* Account for padding following the frame header. */
4090 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
4091 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4092 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4093 		if (type == IEEE80211_FC0_TYPE_CTL) {
4094 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4095 			case IEEE80211_FC0_SUBTYPE_CTS:
4096 				hdrlen = sizeof(struct ieee80211_frame_cts);
4097 				break;
4098 			case IEEE80211_FC0_SUBTYPE_ACK:
4099 				hdrlen = sizeof(struct ieee80211_frame_ack);
4100 				break;
4101 			default:
4102 				hdrlen = sizeof(struct ieee80211_frame_min);
4103 				break;
4104 			}
4105 		} else
4106 			hdrlen = ieee80211_get_hdrlen(wh);
4107 		memmove(m->m_data + 2, m->m_data, hdrlen);
4108 		m_adj(m, 2);
4109 	}
4110 
4111 	phy_info = le16toh(desc->phy_info);
4112 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4113 	chanidx = desc->v1.channel;
4114 	device_timestamp = desc->v1.gp2_on_air_rise;
4115 
4116 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
4117 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4118 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4119 
4120 	memset(&rxi, 0, sizeof(rxi));
4121 	rxi.rxi_rssi = rssi;
4122 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4123 
4124 	iwm_rx_frame(sc, m, chanidx,
4125 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
4126 	    rate_n_flags, device_timestamp, &rxi, ml);
4127 }
4128 
4129 void
4130 iwm_enable_ht_cck_fallback(struct iwm_softc *sc, struct iwm_node *in)
4131 {
4132 	struct ieee80211com *ic = &sc->sc_ic;
4133 	struct ieee80211_node *ni = &in->in_ni;
4134 	struct ieee80211_rateset *rs = &ni->ni_rates;
4135 	uint8_t rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4136 	uint8_t min_rval = ieee80211_min_basic_rate(ic);
4137 	int i;
4138 
4139 	/* Are CCK frames forbidden in our BSS? */
4140 	if (IWM_RVAL_IS_OFDM(min_rval))
4141 		return;
4142 
4143 	in->ht_force_cck = 1;
4144 
4145 	ieee80211_mira_cancel_timeouts(&in->in_mn);
4146 	ieee80211_mira_node_init(&in->in_mn);
4147 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
4148 
4149 	/* Choose initial CCK Tx rate. */
4150 	ni->ni_txrate = 0;
4151 	for (i = 0; i < rs->rs_nrates; i++) {
4152 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4153 		if (rval == min_rval) {
4154 			ni->ni_txrate = i;
4155 			break;
4156 		}
4157 	}
4158 }
4159 
4160 void
4161 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4162     struct iwm_node *in)
4163 {
4164 	struct ieee80211com *ic = &sc->sc_ic;
4165 	struct ieee80211_node *ni = &in->in_ni;
4166 	struct ifnet *ifp = IC2IFP(ic);
4167 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4168 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4169 	int txfail;
4170 
4171 	KASSERT(tx_resp->frame_count == 1);
4172 
4173 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
4174 	    status != IWM_TX_STATUS_DIRECT_DONE);
4175 
4176 	/* Update rate control statistics. */
4177 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) {
4178 		in->in_amn.amn_txcnt++;
4179 		if (in->ht_force_cck) {
4180 			/*
4181 			 * We want to move back to OFDM quickly if possible.
4182 			 * Only show actual Tx failures to AMRR, not retries.
4183 			 */
4184 			if (txfail)
4185 				in->in_amn.amn_retrycnt++;
4186 		} else if (tx_resp->failure_frame > 0)
4187 			in->in_amn.amn_retrycnt++;
4188 	} else if (ic->ic_fixed_mcs == -1) {
4189 		in->in_mn.frames += tx_resp->frame_count;
4190 		in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
4191 		in->in_mn.agglen = tx_resp->frame_count;
4192 		if (tx_resp->failure_frame > 0)
4193 			in->in_mn.retries += tx_resp->failure_frame;
4194 		if (txfail)
4195 			in->in_mn.txfail += tx_resp->frame_count;
4196 		if (ic->ic_state == IEEE80211_S_RUN && !in->ht_force_cck) {
4197 			int best_mcs;
4198 
4199 			ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
4200 			/*
4201 			 * If MiRA has chosen a new TX rate we must update
4202 			 * the firwmare's LQ rate table from process context.
4203 			 * ni_txmcs may change again before the task runs so
4204 			 * cache the chosen rate in the iwm_node structure.
4205 			 */
4206 			best_mcs = ieee80211_mira_get_best_mcs(&in->in_mn);
4207 			if (best_mcs != in->chosen_txmcs) {
4208 				in->chosen_txmcs = best_mcs;
4209 				iwm_add_task(sc, systq, &sc->setrates_task);
4210 			}
4211 
4212 			/* Fall back to CCK rates if MCS 0 is failing. */
4213 			if (txfail && IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) &&
4214 			    in->chosen_txmcs == 0 && best_mcs == 0)
4215 				iwm_enable_ht_cck_fallback(sc, in);
4216 		}
4217 	}
4218 
4219 	if (txfail)
4220 		ifp->if_oerrors++;
4221 }
4222 
4223 void
4224 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
4225 {
4226 	struct ieee80211com *ic = &sc->sc_ic;
4227 
4228 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4229 	    BUS_DMASYNC_POSTWRITE);
4230 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4231 	m_freem(txd->m);
4232 	txd->m = NULL;
4233 
4234 	KASSERT(txd->in);
4235 	ieee80211_release_node(ic, &txd->in->in_ni);
4236 	txd->in = NULL;
4237 
4238 	KASSERT(txd->done == 0);
4239 	txd->done = 1;
4240 }
4241 
4242 void
4243 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4244     struct iwm_rx_data *data)
4245 {
4246 	struct ieee80211com *ic = &sc->sc_ic;
4247 	struct ifnet *ifp = IC2IFP(ic);
4248 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4249 	int idx = cmd_hdr->idx;
4250 	int qid = cmd_hdr->qid;
4251 	struct iwm_tx_ring *ring = &sc->txq[qid];
4252 	struct iwm_tx_data *txd;
4253 
4254 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4255 	    BUS_DMASYNC_POSTREAD);
4256 
4257 	sc->sc_tx_timer = 0;
4258 
4259 	txd = &ring->data[idx];
4260 	if (txd->done)
4261 		return;
4262 
4263 	iwm_rx_tx_cmd_single(sc, pkt, txd->in);
4264 	iwm_txd_done(sc, txd);
4265 
4266 	/*
4267 	 * XXX Sometimes we miss Tx completion interrupts.
4268 	 * We cannot check Tx success/failure for affected frames; just free
4269 	 * the associated mbuf and release the associated node reference.
4270 	 */
4271 	while (ring->tail != idx) {
4272 		txd = &ring->data[ring->tail];
4273 		if (!txd->done) {
4274 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
4275 			    __func__, ring->tail, idx));
4276 			iwm_txd_done(sc, txd);
4277 			ring->queued--;
4278 		}
4279 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
4280 	}
4281 
4282 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4283 		sc->qfullmsk &= ~(1 << ring->qid);
4284 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4285 			ifq_clr_oactive(&ifp->if_snd);
4286 			/*
4287 			 * Well, we're in interrupt context, but then again
4288 			 * I guess net80211 does all sorts of stunts in
4289 			 * interrupt context, so maybe this is no biggie.
4290 			 */
4291 			(*ifp->if_start)(ifp);
4292 		}
4293 	}
4294 }
4295 
4296 void
4297 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4298     struct iwm_rx_data *data)
4299 {
4300 	struct ieee80211com *ic = &sc->sc_ic;
4301 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
4302 	uint32_t missed;
4303 
4304 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4305 	    (ic->ic_state != IEEE80211_S_RUN))
4306 		return;
4307 
4308 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4309 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4310 
4311 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4312 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4313 		if (ic->ic_if.if_flags & IFF_DEBUG)
4314 			printf("%s: receiving no beacons from %s; checking if "
4315 			    "this AP is still responding to probe requests\n",
4316 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4317 		/*
4318 		 * Rather than go directly to scan state, try to send a
4319 		 * directed probe request first. If that fails then the
4320 		 * state machine will drop us into scanning after timing
4321 		 * out waiting for a probe response.
4322 		 */
4323 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4324 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4325 	}
4326 
4327 }
4328 
4329 int
4330 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4331 {
4332 	struct iwm_binding_cmd cmd;
4333 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4334 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4335 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
4336 	uint32_t status;
4337 
4338 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
4339 		panic("binding already added");
4340 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
4341 		panic("binding already removed");
4342 
4343 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
4344 		return EINVAL;
4345 
4346 	memset(&cmd, 0, sizeof(cmd));
4347 
4348 	cmd.id_and_color
4349 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4350 	cmd.action = htole32(action);
4351 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4352 
4353 	cmd.macs[0] = htole32(mac_id);
4354 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4355 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4356 
4357 	status = 0;
4358 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4359 	    sizeof(cmd), &cmd, &status);
4360 	if (err == 0 && status != 0)
4361 		err = EIO;
4362 
4363 	return err;
4364 }
4365 
4366 void
4367 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4368     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4369 {
4370 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4371 
4372 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4373 	    ctxt->color));
4374 	cmd->action = htole32(action);
4375 	cmd->apply_time = htole32(apply_time);
4376 }
4377 
4378 void
4379 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4380     struct ieee80211_channel *chan, uint8_t chains_static,
4381     uint8_t chains_dynamic)
4382 {
4383 	struct ieee80211com *ic = &sc->sc_ic;
4384 	uint8_t active_cnt, idle_cnt;
4385 
4386 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4387 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4388 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4389 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4390 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4391 
4392 	/* Set rx the chains */
4393 	idle_cnt = chains_static;
4394 	active_cnt = chains_dynamic;
4395 
4396 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4397 					IWM_PHY_RX_CHAIN_VALID_POS);
4398 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4399 	cmd->rxchain_info |= htole32(active_cnt <<
4400 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4401 
4402 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4403 }
4404 
4405 int
4406 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4407     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4408     uint32_t apply_time)
4409 {
4410 	struct iwm_phy_context_cmd cmd;
4411 
4412 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4413 
4414 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4415 	    chains_static, chains_dynamic);
4416 
4417 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4418 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4419 }
4420 
4421 int
4422 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4423 {
4424 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
4425 	struct iwm_tfd *desc;
4426 	struct iwm_tx_data *txdata;
4427 	struct iwm_device_cmd *cmd;
4428 	struct mbuf *m;
4429 	bus_addr_t paddr;
4430 	uint32_t addr_lo;
4431 	int err = 0, i, paylen, off, s;
4432 	int idx, code, async, group_id;
4433 	size_t hdrlen, datasz;
4434 	uint8_t *data;
4435 	int generation = sc->sc_generation;
4436 
4437 	code = hcmd->id;
4438 	async = hcmd->flags & IWM_CMD_ASYNC;
4439 	idx = ring->cur;
4440 
4441 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
4442 		paylen += hcmd->len[i];
4443 	}
4444 
4445 	/* If this command waits for a response, allocate response buffer. */
4446 	hcmd->resp_pkt = NULL;
4447 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
4448 		uint8_t *resp_buf;
4449 		KASSERT(!async);
4450 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
4451 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
4452 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
4453 			return ENOSPC;
4454 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
4455 		    M_NOWAIT | M_ZERO);
4456 		if (resp_buf == NULL)
4457 			return ENOMEM;
4458 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
4459 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4460 	} else {
4461 		sc->sc_cmd_resp_pkt[idx] = NULL;
4462 	}
4463 
4464 	s = splnet();
4465 
4466 	desc = &ring->desc[idx];
4467 	txdata = &ring->data[idx];
4468 
4469 	group_id = iwm_cmd_groupid(code);
4470 	if (group_id != 0) {
4471 		hdrlen = sizeof(cmd->hdr_wide);
4472 		datasz = sizeof(cmd->data_wide);
4473 	} else {
4474 		hdrlen = sizeof(cmd->hdr);
4475 		datasz = sizeof(cmd->data);
4476 	}
4477 
4478 	if (paylen > datasz) {
4479 		/* Command is too large to fit in pre-allocated space. */
4480 		size_t totlen = hdrlen + paylen;
4481 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4482 			printf("%s: firmware command too long (%zd bytes)\n",
4483 			    DEVNAME(sc), totlen);
4484 			err = EINVAL;
4485 			goto out;
4486 		}
4487 		m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
4488 		if (m == NULL) {
4489 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
4490 			    DEVNAME(sc), totlen);
4491 			err = ENOMEM;
4492 			goto out;
4493 		}
4494 		cmd = mtod(m, struct iwm_device_cmd *);
4495 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4496 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4497 		if (err) {
4498 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
4499 			    DEVNAME(sc), totlen);
4500 			m_freem(m);
4501 			goto out;
4502 		}
4503 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
4504 		paddr = txdata->map->dm_segs[0].ds_addr;
4505 	} else {
4506 		cmd = &ring->cmd[idx];
4507 		paddr = txdata->cmd_paddr;
4508 	}
4509 
4510 	if (group_id != 0) {
4511 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4512 		cmd->hdr_wide.group_id = group_id;
4513 		cmd->hdr_wide.qid = ring->qid;
4514 		cmd->hdr_wide.idx = idx;
4515 		cmd->hdr_wide.length = htole16(paylen);
4516 		cmd->hdr_wide.version = iwm_cmd_version(code);
4517 		data = cmd->data_wide;
4518 	} else {
4519 		cmd->hdr.code = code;
4520 		cmd->hdr.flags = 0;
4521 		cmd->hdr.qid = ring->qid;
4522 		cmd->hdr.idx = idx;
4523 		data = cmd->data;
4524 	}
4525 
4526 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
4527 		if (hcmd->len[i] == 0)
4528 			continue;
4529 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4530 		off += hcmd->len[i];
4531 	}
4532 	KASSERT(off == paylen);
4533 
4534 	/* lo field is not aligned */
4535 	addr_lo = htole32((uint32_t)paddr);
4536 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4537 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4538 	    | ((hdrlen + paylen) << 4));
4539 	desc->num_tbs = 1;
4540 
4541 	if (paylen > datasz) {
4542 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
4543 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4544 	} else {
4545 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4546 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4547 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4548 	}
4549 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4550 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4551 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4552 
4553 	/*
4554 	 * Wake up the NIC to make sure that the firmware will see the host
4555 	 * command - we will let the NIC sleep once all the host commands
4556 	 * returned. This needs to be done only on 7000 family NICs.
4557 	 */
4558 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
4559 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
4560 			err = EBUSY;
4561 			goto out;
4562 		}
4563 	}
4564 
4565 #if 0
4566 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4567 #endif
4568 	/* Kick command ring. */
4569 	ring->queued++;
4570 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4571 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4572 
4573 	if (!async) {
4574 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
4575 		if (err == 0) {
4576 			/* if hardware is no longer up, return error */
4577 			if (generation != sc->sc_generation) {
4578 				err = ENXIO;
4579 				goto out;
4580 			}
4581 
4582 			/* Response buffer will be freed in iwm_free_resp(). */
4583 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
4584 			sc->sc_cmd_resp_pkt[idx] = NULL;
4585 		} else if (generation == sc->sc_generation) {
4586 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
4587 			    sc->sc_cmd_resp_len[idx]);
4588 			sc->sc_cmd_resp_pkt[idx] = NULL;
4589 		}
4590 	}
4591  out:
4592 	splx(s);
4593 
4594 	return err;
4595 }
4596 
4597 int
4598 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4599     uint16_t len, const void *data)
4600 {
4601 	struct iwm_host_cmd cmd = {
4602 		.id = id,
4603 		.len = { len, },
4604 		.data = { data, },
4605 		.flags = flags,
4606 	};
4607 
4608 	return iwm_send_cmd(sc, &cmd);
4609 }
4610 
4611 int
4612 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4613     uint32_t *status)
4614 {
4615 	struct iwm_rx_packet *pkt;
4616 	struct iwm_cmd_response *resp;
4617 	int err, resp_len;
4618 
4619 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
4620 	cmd->flags |= IWM_CMD_WANT_RESP;
4621 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
4622 
4623 	err = iwm_send_cmd(sc, cmd);
4624 	if (err)
4625 		return err;
4626 
4627 	pkt = cmd->resp_pkt;
4628 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
4629 		return EIO;
4630 
4631 	resp_len = iwm_rx_packet_payload_len(pkt);
4632 	if (resp_len != sizeof(*resp)) {
4633 		iwm_free_resp(sc, cmd);
4634 		return EIO;
4635 	}
4636 
4637 	resp = (void *)pkt->data;
4638 	*status = le32toh(resp->status);
4639 	iwm_free_resp(sc, cmd);
4640 	return err;
4641 }
4642 
4643 int
4644 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4645     const void *data, uint32_t *status)
4646 {
4647 	struct iwm_host_cmd cmd = {
4648 		.id = id,
4649 		.len = { len, },
4650 		.data = { data, },
4651 	};
4652 
4653 	return iwm_send_cmd_status(sc, &cmd, status);
4654 }
4655 
4656 void
4657 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4658 {
4659 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
4660 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4661 	hcmd->resp_pkt = NULL;
4662 }
4663 
4664 void
4665 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
4666 {
4667 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
4668 	struct iwm_tx_data *data;
4669 
4670 	if (qid != sc->cmdqid) {
4671 		return;	/* Not a command ack. */
4672 	}
4673 
4674 	data = &ring->data[idx];
4675 
4676 	if (data->m != NULL) {
4677 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4678 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4679 		bus_dmamap_unload(sc->sc_dmat, data->map);
4680 		m_freem(data->m);
4681 		data->m = NULL;
4682 	}
4683 	wakeup(&ring->desc[idx]);
4684 
4685 	if (ring->queued == 0) {
4686 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4687 		    DEVNAME(sc), code));
4688 	} else if (--ring->queued == 0) {
4689 		/*
4690 		 * 7000 family NICs are locked while commands are in progress.
4691 		 * All commands are now done so we may unlock the NIC again.
4692 		 */
4693 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4694 			iwm_nic_unlock(sc);
4695 	}
4696 }
4697 
4698 #if 0
4699 /*
4700  * necessary only for block ack mode
4701  */
4702 void
4703 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4704     uint16_t len)
4705 {
4706 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4707 	uint16_t w_val;
4708 
4709 	scd_bc_tbl = sc->sched_dma.vaddr;
4710 
4711 	len += 8; /* magic numbers came naturally from paris */
4712 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4713 		len = roundup(len, 4) / 4;
4714 
4715 	w_val = htole16(sta_id << 12 | len);
4716 
4717 	/* Update TX scheduler. */
4718 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4719 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4720 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4721 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4722 
4723 	/* I really wonder what this is ?!? */
4724 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4725 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4726 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4727 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4728 		    (char *)(void *)sc->sched_dma.vaddr,
4729 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4730 	}
4731 }
4732 #endif
4733 
4734 /*
4735  * Fill in various bit for management frames, and leave them
4736  * unfilled for data frames (firmware takes care of that).
4737  * Return the selected TX rate.
4738  */
4739 const struct iwm_rate *
4740 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4741     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4742 {
4743 	struct ieee80211com *ic = &sc->sc_ic;
4744 	struct ieee80211_node *ni = &in->in_ni;
4745 	struct ieee80211_rateset *rs = &ni->ni_rates;
4746 	const struct iwm_rate *rinfo;
4747 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4748 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
4749 	int ridx, rate_flags;
4750 
4751 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4752 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
4753 
4754 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4755 	    type != IEEE80211_FC0_TYPE_DATA) {
4756 		/* for non-data, use the lowest supported rate */
4757 		ridx = min_ridx;
4758 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4759 	} else if (ic->ic_fixed_mcs != -1) {
4760 		ridx = sc->sc_fixed_ridx;
4761 	} else if (ic->ic_fixed_rate != -1) {
4762 		ridx = sc->sc_fixed_ridx;
4763 	} else if ((ni->ni_flags & IEEE80211_NODE_HT) && !in->ht_force_cck &&
4764 	    ieee80211_mira_is_probing(&in->in_mn)) {
4765 		/* Keep Tx rate constant while mira is probing. */
4766 		ridx = iwm_mcs2ridx[ni->ni_txmcs];
4767 	} else if ((ni->ni_flags & IEEE80211_NODE_HT) && in->ht_force_cck) {
4768 		uint8_t rval;
4769 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4770 		ridx = iwm_rval2ridx(rval);
4771 		if (ridx < min_ridx)
4772 			ridx = min_ridx;
4773  	} else {
4774 		int i;
4775 		/* Use firmware rateset retry table. */
4776 		tx->initial_rate_index = 0;
4777 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4778 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4779 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4780 			return &iwm_rates[ridx];
4781 		}
4782 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4783 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4784 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
4785 			if (iwm_rates[i].rate == (ni->ni_txrate &
4786 			    IEEE80211_RATE_VAL)) {
4787 				ridx = i;
4788 				break;
4789 			}
4790 		}
4791 		return &iwm_rates[ridx];
4792 	}
4793 
4794 	rinfo = &iwm_rates[ridx];
4795 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
4796 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
4797 	else
4798 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
4799 	if (IWM_RIDX_IS_CCK(ridx))
4800 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4801 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4802 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4803 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4804 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4805 	} else
4806 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4807 
4808 	return rinfo;
4809 }
4810 
4811 #define TB0_SIZE 16
4812 int
4813 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4814 {
4815 	struct ieee80211com *ic = &sc->sc_ic;
4816 	struct iwm_node *in = (void *)ni;
4817 	struct iwm_tx_ring *ring;
4818 	struct iwm_tx_data *data;
4819 	struct iwm_tfd *desc;
4820 	struct iwm_device_cmd *cmd;
4821 	struct iwm_tx_cmd *tx;
4822 	struct ieee80211_frame *wh;
4823 	struct ieee80211_key *k = NULL;
4824 	const struct iwm_rate *rinfo;
4825 	uint32_t flags;
4826 	u_int hdrlen;
4827 	bus_dma_segment_t *seg;
4828 	uint8_t tid, type;
4829 	int i, totlen, err, pad;
4830 	int hdrlen2, rtsthres = ic->ic_rtsthreshold;
4831 
4832 	wh = mtod(m, struct ieee80211_frame *);
4833 	hdrlen = ieee80211_get_hdrlen(wh);
4834 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4835 
4836 	hdrlen2 = (ieee80211_has_qos(wh)) ?
4837 	    sizeof (struct ieee80211_qosframe) :
4838 	    sizeof (struct ieee80211_frame);
4839 
4840 	tid = 0;
4841 
4842 	/*
4843 	 * Map EDCA categories to Tx data queues.
4844 	 *
4845 	 * We use static data queue assignments even in DQA mode. We do not
4846 	 * need to share Tx queues between stations because we only implement
4847 	 * client mode; the firmware's station table contains only one entry
4848 	 * which represents our access point.
4849 	 *
4850 	 * Tx aggregation will require additional queues (one queue per TID
4851 	 * for which aggregation is enabled) but we do not implement this yet.
4852 	 */
4853 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4854 		ring = &sc->txq[IWM_DQA_MIN_MGMT_QUEUE + ac];
4855 	else
4856 		ring = &sc->txq[ac];
4857 	desc = &ring->desc[ring->cur];
4858 	memset(desc, 0, sizeof(*desc));
4859 	data = &ring->data[ring->cur];
4860 
4861 	cmd = &ring->cmd[ring->cur];
4862 	cmd->hdr.code = IWM_TX_CMD;
4863 	cmd->hdr.flags = 0;
4864 	cmd->hdr.qid = ring->qid;
4865 	cmd->hdr.idx = ring->cur;
4866 
4867 	tx = (void *)cmd->data;
4868 	memset(tx, 0, sizeof(*tx));
4869 
4870 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4871 
4872 #if NBPFILTER > 0
4873 	if (sc->sc_drvbpf != NULL) {
4874 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4875 		uint16_t chan_flags;
4876 
4877 		tap->wt_flags = 0;
4878 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4879 		chan_flags = ni->ni_chan->ic_flags;
4880 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4881 			chan_flags &= ~IEEE80211_CHAN_HT;
4882 		tap->wt_chan_flags = htole16(chan_flags);
4883 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4884 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4885 		    type == IEEE80211_FC0_TYPE_DATA &&
4886 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4887 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4888 		} else
4889 			tap->wt_rate = rinfo->rate;
4890 		tap->wt_hwqueue = ac;
4891 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4892 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4893 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4894 
4895 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4896 		    m, BPF_DIRECTION_OUT);
4897 	}
4898 #endif
4899 
4900 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4901                 k = ieee80211_get_txkey(ic, wh, ni);
4902 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4903 			return ENOBUFS;
4904 		/* 802.11 header may have moved. */
4905 		wh = mtod(m, struct ieee80211_frame *);
4906 	}
4907 	totlen = m->m_pkthdr.len;
4908 
4909 	flags = 0;
4910 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4911 		flags |= IWM_TX_CMD_FLG_ACK;
4912 	}
4913 
4914 	if (ni->ni_flags & IEEE80211_NODE_HT)
4915 		rtsthres = ieee80211_mira_get_rts_threshold(&in->in_mn, ic, ni,
4916 		    totlen + IEEE80211_CRC_LEN);
4917 
4918 	if (type == IEEE80211_FC0_TYPE_DATA &&
4919 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4920 	    (totlen + IEEE80211_CRC_LEN > rtsthres ||
4921 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
4922 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4923 
4924 	tx->sta_id = IWM_STATION_ID;
4925 
4926 	if (type == IEEE80211_FC0_TYPE_MGT) {
4927 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4928 
4929 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4930 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4931 			tx->pm_frame_timeout = htole16(3);
4932 		else
4933 			tx->pm_frame_timeout = htole16(2);
4934 	} else {
4935 		tx->pm_frame_timeout = htole16(0);
4936 	}
4937 
4938 	if (hdrlen & 3) {
4939 		/* First segment length must be a multiple of 4. */
4940 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4941 		pad = 4 - (hdrlen & 3);
4942 	} else
4943 		pad = 0;
4944 
4945 	tx->driver_txop = 0;
4946 	tx->next_frame_len = 0;
4947 
4948 	tx->len = htole16(totlen);
4949 	tx->tid_tspec = tid;
4950 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4951 
4952 	/* Set physical address of "scratch area". */
4953 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4954 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4955 
4956 	/* Copy 802.11 header in TX command. */
4957 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4958 
4959 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4960 
4961 	tx->sec_ctl = 0;
4962 	tx->tx_flags |= htole32(flags);
4963 
4964 	/* Trim 802.11 header. */
4965 	m_adj(m, hdrlen);
4966 
4967 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4968 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4969 	if (err && err != EFBIG) {
4970 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4971 		m_freem(m);
4972 		return err;
4973 	}
4974 	if (err) {
4975 		/* Too many DMA segments, linearize mbuf. */
4976 		if (m_defrag(m, M_DONTWAIT)) {
4977 			m_freem(m);
4978 			return ENOBUFS;
4979 		}
4980 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4981 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4982 		if (err) {
4983 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4984 			    err);
4985 			m_freem(m);
4986 			return err;
4987 		}
4988 	}
4989 	data->m = m;
4990 	data->in = in;
4991 	data->done = 0;
4992 
4993 	/* Fill TX descriptor. */
4994 	desc->num_tbs = 2 + data->map->dm_nsegs;
4995 
4996 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4997 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
4998 	    (TB0_SIZE << 4));
4999 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
5000 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5001 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
5002 	      + hdrlen + pad - TB0_SIZE) << 4));
5003 
5004 	/* Other DMA segments are for data payload. */
5005 	seg = data->map->dm_segs;
5006 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5007 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
5008 		desc->tbs[i+2].hi_n_len = \
5009 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
5010 		    | ((seg->ds_len) << 4));
5011 	}
5012 
5013 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5014 	    BUS_DMASYNC_PREWRITE);
5015 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5016 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5017 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5018 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5019 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5020 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5021 
5022 #if 0
5023 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
5024 #endif
5025 
5026 	/* Kick TX ring. */
5027 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
5028 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5029 
5030 	/* Mark TX ring as full if we reach a certain threshold. */
5031 	if (++ring->queued > IWM_TX_RING_HIMARK) {
5032 		sc->qfullmsk |= 1 << ring->qid;
5033 	}
5034 
5035 	return 0;
5036 }
5037 
5038 int
5039 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
5040 {
5041 	struct iwm_tx_path_flush_cmd flush_cmd = {
5042 		.queues_ctl = htole32(tfd_queue_msk),
5043 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
5044 	};
5045 	int err;
5046 
5047 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
5048 	    sizeof(flush_cmd), &flush_cmd);
5049 	if (err)
5050                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
5051 	return err;
5052 }
5053 
5054 void
5055 iwm_led_enable(struct iwm_softc *sc)
5056 {
5057 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
5058 }
5059 
5060 void
5061 iwm_led_disable(struct iwm_softc *sc)
5062 {
5063 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
5064 }
5065 
5066 int
5067 iwm_led_is_enabled(struct iwm_softc *sc)
5068 {
5069 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
5070 }
5071 
5072 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
5073 
5074 void
5075 iwm_led_blink_timeout(void *arg)
5076 {
5077 	struct iwm_softc *sc = arg;
5078 
5079 	if (iwm_led_is_enabled(sc))
5080 		iwm_led_disable(sc);
5081 	else
5082 		iwm_led_enable(sc);
5083 
5084 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5085 }
5086 
5087 void
5088 iwm_led_blink_start(struct iwm_softc *sc)
5089 {
5090 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5091 	iwm_led_enable(sc);
5092 }
5093 
5094 void
5095 iwm_led_blink_stop(struct iwm_softc *sc)
5096 {
5097 	timeout_del(&sc->sc_led_blink_to);
5098 	iwm_led_disable(sc);
5099 }
5100 
5101 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
5102 
5103 int
5104 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
5105     struct iwm_beacon_filter_cmd *cmd)
5106 {
5107 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
5108 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
5109 }
5110 
5111 void
5112 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
5113     struct iwm_beacon_filter_cmd *cmd)
5114 {
5115 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
5116 }
5117 
5118 int
5119 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
5120 {
5121 	struct iwm_beacon_filter_cmd cmd = {
5122 		IWM_BF_CMD_CONFIG_DEFAULTS,
5123 		.bf_enable_beacon_filter = htole32(1),
5124 		.ba_enable_beacon_abort = htole32(enable),
5125 	};
5126 
5127 	if (!sc->sc_bf.bf_enabled)
5128 		return 0;
5129 
5130 	sc->sc_bf.ba_enabled = enable;
5131 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5132 	return iwm_beacon_filter_send_cmd(sc, &cmd);
5133 }
5134 
5135 void
5136 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
5137     struct iwm_mac_power_cmd *cmd)
5138 {
5139 	struct ieee80211com *ic = &sc->sc_ic;
5140 	struct ieee80211_node *ni = &in->in_ni;
5141 	int dtim_period, dtim_msec, keep_alive;
5142 
5143 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5144 	    in->in_color));
5145 	if (ni->ni_dtimperiod)
5146 		dtim_period = ni->ni_dtimperiod;
5147 	else
5148 		dtim_period = 1;
5149 
5150 	/*
5151 	 * Regardless of power management state the driver must set
5152 	 * keep alive period. FW will use it for sending keep alive NDPs
5153 	 * immediately after association. Check that keep alive period
5154 	 * is at least 3 * DTIM.
5155 	 */
5156 	dtim_msec = dtim_period * ni->ni_intval;
5157 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
5158 	keep_alive = roundup(keep_alive, 1000) / 1000;
5159 	cmd->keep_alive_seconds = htole16(keep_alive);
5160 
5161 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5162 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5163 }
5164 
5165 int
5166 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
5167 {
5168 	int err;
5169 	int ba_enable;
5170 	struct iwm_mac_power_cmd cmd;
5171 
5172 	memset(&cmd, 0, sizeof(cmd));
5173 
5174 	iwm_power_build_cmd(sc, in, &cmd);
5175 
5176 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
5177 	    sizeof(cmd), &cmd);
5178 	if (err != 0)
5179 		return err;
5180 
5181 	ba_enable = !!(cmd.flags &
5182 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5183 	return iwm_update_beacon_abort(sc, in, ba_enable);
5184 }
5185 
5186 int
5187 iwm_power_update_device(struct iwm_softc *sc)
5188 {
5189 	struct iwm_device_power_cmd cmd = { };
5190 	struct ieee80211com *ic = &sc->sc_ic;
5191 
5192 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5193 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5194 
5195 	return iwm_send_cmd_pdu(sc,
5196 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5197 }
5198 
5199 int
5200 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
5201 {
5202 	struct iwm_beacon_filter_cmd cmd = {
5203 		IWM_BF_CMD_CONFIG_DEFAULTS,
5204 		.bf_enable_beacon_filter = htole32(1),
5205 	};
5206 	int err;
5207 
5208 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5209 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5210 
5211 	if (err == 0)
5212 		sc->sc_bf.bf_enabled = 1;
5213 
5214 	return err;
5215 }
5216 
5217 int
5218 iwm_disable_beacon_filter(struct iwm_softc *sc)
5219 {
5220 	struct iwm_beacon_filter_cmd cmd;
5221 	int err;
5222 
5223 	memset(&cmd, 0, sizeof(cmd));
5224 
5225 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5226 	if (err == 0)
5227 		sc->sc_bf.bf_enabled = 0;
5228 
5229 	return err;
5230 }
5231 
5232 int
5233 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5234 {
5235 	struct iwm_add_sta_cmd add_sta_cmd;
5236 	int err;
5237 	uint32_t status;
5238 	size_t cmdsize;
5239 	struct ieee80211com *ic = &sc->sc_ic;
5240 
5241 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
5242 		panic("STA already added");
5243 
5244 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5245 
5246 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5247 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
5248 	else
5249 		add_sta_cmd.sta_id = IWM_STATION_ID;
5250 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
5251 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5252 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
5253 		else
5254 			add_sta_cmd.station_type = IWM_STA_LINK;
5255 	}
5256 	add_sta_cmd.mac_id_n_color
5257 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5258 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5259 		int qid;
5260 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
5261 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5262 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
5263 		else
5264 			qid = IWM_AUX_QUEUE;
5265 		add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
5266 	} else if (!update) {
5267 		int ac;
5268 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5269 			int qid = ac;
5270 			if (isset(sc->sc_enabled_capa,
5271 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5272 				qid += IWM_DQA_MIN_MGMT_QUEUE;
5273 			add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
5274 		}
5275 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5276 	}
5277 	add_sta_cmd.add_modify = update ? 1 : 0;
5278 	add_sta_cmd.station_flags_msk
5279 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5280 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5281 	if (update)
5282 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5283 
5284 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5285 		add_sta_cmd.station_flags_msk
5286 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5287 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5288 
5289 		add_sta_cmd.station_flags
5290 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5291 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5292 		case IEEE80211_AMPDU_PARAM_SS_2:
5293 			add_sta_cmd.station_flags
5294 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5295 			break;
5296 		case IEEE80211_AMPDU_PARAM_SS_4:
5297 			add_sta_cmd.station_flags
5298 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5299 			break;
5300 		case IEEE80211_AMPDU_PARAM_SS_8:
5301 			add_sta_cmd.station_flags
5302 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5303 			break;
5304 		case IEEE80211_AMPDU_PARAM_SS_16:
5305 			add_sta_cmd.station_flags
5306 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5307 			break;
5308 		default:
5309 			break;
5310 		}
5311 	}
5312 
5313 	status = IWM_ADD_STA_SUCCESS;
5314 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5315 		cmdsize = sizeof(add_sta_cmd);
5316 	else
5317 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
5318 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
5319 	    &add_sta_cmd, &status);
5320 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
5321 		err = EIO;
5322 
5323 	return err;
5324 }
5325 
5326 int
5327 iwm_add_aux_sta(struct iwm_softc *sc)
5328 {
5329 	struct iwm_add_sta_cmd cmd;
5330 	int err, qid;
5331 	uint32_t status;
5332 	size_t cmdsize;
5333 
5334 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
5335 		qid = IWM_DQA_AUX_QUEUE;
5336 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
5337 		    IWM_TX_FIFO_MCAST);
5338 	} else {
5339 		qid = IWM_AUX_QUEUE;
5340 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
5341 	}
5342 	if (err)
5343 		return err;
5344 
5345 	memset(&cmd, 0, sizeof(cmd));
5346 	cmd.sta_id = IWM_AUX_STA_ID;
5347 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5348 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
5349 	cmd.mac_id_n_color =
5350 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5351 	cmd.tfd_queue_msk = htole32(1 << qid);
5352 	cmd.tid_disable_tx = htole16(0xffff);
5353 
5354 	status = IWM_ADD_STA_SUCCESS;
5355 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5356 		cmdsize = sizeof(cmd);
5357 	else
5358 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
5359 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
5360 	    &status);
5361 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
5362 		err = EIO;
5363 
5364 	return err;
5365 }
5366 
5367 int
5368 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
5369 {
5370 	struct ieee80211com *ic = &sc->sc_ic;
5371 	struct iwm_rm_sta_cmd rm_sta_cmd;
5372 	int err;
5373 
5374 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
5375 		panic("sta already removed");
5376 
5377 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
5378 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5379 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
5380 	else
5381 		rm_sta_cmd.sta_id = IWM_STATION_ID;
5382 
5383 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
5384 	    &rm_sta_cmd);
5385 
5386 	return err;
5387 }
5388 
5389 uint16_t
5390 iwm_scan_rx_chain(struct iwm_softc *sc)
5391 {
5392 	uint16_t rx_chain;
5393 	uint8_t rx_ant;
5394 
5395 	rx_ant = iwm_fw_valid_rx_ant(sc);
5396 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5397 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5398 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5399 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5400 	return htole16(rx_chain);
5401 }
5402 
5403 uint32_t
5404 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5405 {
5406 	uint32_t tx_ant;
5407 	int i, ind;
5408 
5409 	for (i = 0, ind = sc->sc_scan_last_antenna;
5410 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5411 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5412 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5413 			sc->sc_scan_last_antenna = ind;
5414 			break;
5415 		}
5416 	}
5417 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5418 
5419 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5420 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5421 				   tx_ant);
5422 	else
5423 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5424 }
5425 
5426 uint8_t
5427 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5428     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
5429 {
5430 	struct ieee80211com *ic = &sc->sc_ic;
5431 	struct ieee80211_channel *c;
5432 	uint8_t nchan;
5433 
5434 	for (nchan = 0, c = &ic->ic_channels[1];
5435 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5436 	    nchan < sc->sc_capa_n_scan_channels;
5437 	    c++) {
5438 		if (c->ic_flags == 0)
5439 			continue;
5440 
5441 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5442 		chan->iter_count = htole16(1);
5443 		chan->iter_interval = 0;
5444 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5445 		if (n_ssids != 0 && !bgscan)
5446 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
5447 		chan++;
5448 		nchan++;
5449 	}
5450 
5451 	return nchan;
5452 }
5453 
5454 uint8_t
5455 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5456     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
5457 {
5458 	struct ieee80211com *ic = &sc->sc_ic;
5459 	struct ieee80211_channel *c;
5460 	uint8_t nchan;
5461 
5462 	for (nchan = 0, c = &ic->ic_channels[1];
5463 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5464 	    nchan < sc->sc_capa_n_scan_channels;
5465 	    c++) {
5466 		if (c->ic_flags == 0)
5467 			continue;
5468 
5469 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5470 		chan->iter_count = 1;
5471 		chan->iter_interval = htole16(0);
5472 		if (n_ssids != 0 && !bgscan)
5473 			chan->flags = htole32(1 << 0); /* select SSID 0 */
5474 		chan++;
5475 		nchan++;
5476 	}
5477 
5478 	return nchan;
5479 }
5480 
5481 int
5482 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
5483 {
5484 	struct iwm_scan_probe_req preq2;
5485 	int err, i;
5486 
5487 	err = iwm_fill_probe_req(sc, &preq2);
5488 	if (err)
5489 		return err;
5490 
5491 	preq1->mac_header = preq2.mac_header;
5492 	for (i = 0; i < nitems(preq1->band_data); i++)
5493 		preq1->band_data[i] = preq2.band_data[i];
5494 	preq1->common_data = preq2.common_data;
5495 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
5496 	return 0;
5497 }
5498 
5499 int
5500 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5501 {
5502 	struct ieee80211com *ic = &sc->sc_ic;
5503 	struct ifnet *ifp = IC2IFP(ic);
5504 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5505 	struct ieee80211_rateset *rs;
5506 	size_t remain = sizeof(preq->buf);
5507 	uint8_t *frm, *pos;
5508 
5509 	memset(preq, 0, sizeof(*preq));
5510 
5511 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5512 		return ENOBUFS;
5513 
5514 	/*
5515 	 * Build a probe request frame.  Most of the following code is a
5516 	 * copy & paste of what is done in net80211.
5517 	 */
5518 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5519 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5520 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5521 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
5522 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5523 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5524 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5525 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5526 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5527 
5528 	frm = (uint8_t *)(wh + 1);
5529 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5530 
5531 	/* Tell the firmware where the MAC header is. */
5532 	preq->mac_header.offset = 0;
5533 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5534 	remain -= frm - (uint8_t *)wh;
5535 
5536 	/* Fill in 2GHz IEs and tell firmware where they are. */
5537 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5538 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5539 		if (remain < 4 + rs->rs_nrates)
5540 			return ENOBUFS;
5541 	} else if (remain < 2 + rs->rs_nrates)
5542 		return ENOBUFS;
5543 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5544 	pos = frm;
5545 	frm = ieee80211_add_rates(frm, rs);
5546 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5547 		frm = ieee80211_add_xrates(frm, rs);
5548 	preq->band_data[0].len = htole16(frm - pos);
5549 	remain -= frm - pos;
5550 
5551 	if (isset(sc->sc_enabled_capa,
5552 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5553 		if (remain < 3)
5554 			return ENOBUFS;
5555 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5556 		*frm++ = 1;
5557 		*frm++ = 0;
5558 		remain -= 3;
5559 	}
5560 
5561 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5562 		/* Fill in 5GHz IEs. */
5563 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5564 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5565 			if (remain < 4 + rs->rs_nrates)
5566 				return ENOBUFS;
5567 		} else if (remain < 2 + rs->rs_nrates)
5568 			return ENOBUFS;
5569 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5570 		pos = frm;
5571 		frm = ieee80211_add_rates(frm, rs);
5572 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5573 			frm = ieee80211_add_xrates(frm, rs);
5574 		preq->band_data[1].len = htole16(frm - pos);
5575 		remain -= frm - pos;
5576 	}
5577 
5578 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5579 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5580 	pos = frm;
5581 	if (ic->ic_flags & IEEE80211_F_HTON) {
5582 		if (remain < 28)
5583 			return ENOBUFS;
5584 		frm = ieee80211_add_htcaps(frm, ic);
5585 		/* XXX add WME info? */
5586 	}
5587 	preq->common_data.len = htole16(frm - pos);
5588 
5589 	return 0;
5590 }
5591 
5592 int
5593 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
5594 {
5595 	struct ieee80211com *ic = &sc->sc_ic;
5596 	struct iwm_host_cmd hcmd = {
5597 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5598 		.len = { 0, },
5599 		.data = { NULL, },
5600 		.flags = 0,
5601 	};
5602 	struct iwm_scan_req_lmac *req;
5603 	struct iwm_scan_probe_req_v1 *preq;
5604 	size_t req_len;
5605 	int err, async = bgscan;
5606 
5607 	req_len = sizeof(struct iwm_scan_req_lmac) +
5608 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5609 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
5610 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5611 		return ENOMEM;
5612 	req = malloc(req_len, M_DEVBUF,
5613 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5614 	if (req == NULL)
5615 		return ENOMEM;
5616 
5617 	hcmd.len[0] = (uint16_t)req_len;
5618 	hcmd.data[0] = (void *)req;
5619 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5620 
5621 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5622 	req->active_dwell = 10;
5623 	req->passive_dwell = 110;
5624 	req->fragmented_dwell = 44;
5625 	req->extended_dwell = 90;
5626 	if (bgscan) {
5627 		req->max_out_time = htole32(120);
5628 		req->suspend_time = htole32(120);
5629 	} else {
5630 		req->max_out_time = htole32(0);
5631 		req->suspend_time = htole32(0);
5632 	}
5633 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5634 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5635 	req->iter_num = htole32(1);
5636 	req->delay = 0;
5637 
5638 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5639 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5640 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5641 	if (ic->ic_des_esslen == 0)
5642 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5643 	else
5644 		req->scan_flags |=
5645 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5646 	if (isset(sc->sc_enabled_capa,
5647 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5648 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5649 
5650 	req->flags = htole32(IWM_PHY_BAND_24);
5651 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5652 		req->flags |= htole32(IWM_PHY_BAND_5);
5653 	req->filter_flags =
5654 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5655 
5656 	/* Tx flags 2 GHz. */
5657 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5658 	    IWM_TX_CMD_FLG_BT_DIS);
5659 	req->tx_cmd[0].rate_n_flags =
5660 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5661 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5662 
5663 	/* Tx flags 5 GHz. */
5664 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5665 	    IWM_TX_CMD_FLG_BT_DIS);
5666 	req->tx_cmd[1].rate_n_flags =
5667 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5668 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5669 
5670 	/* Check if we're doing an active directed scan. */
5671 	if (ic->ic_des_esslen != 0) {
5672 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5673 		req->direct_scan[0].len = ic->ic_des_esslen;
5674 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5675 		    ic->ic_des_esslen);
5676 	}
5677 
5678 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5679 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5680 	    ic->ic_des_esslen != 0, bgscan);
5681 
5682 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
5683 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5684 	    sc->sc_capa_n_scan_channels));
5685 	err = iwm_fill_probe_req_v1(sc, preq);
5686 	if (err) {
5687 		free(req, M_DEVBUF, req_len);
5688 		return err;
5689 	}
5690 
5691 	/* Specify the scan plan: We'll do one iteration. */
5692 	req->schedule[0].iterations = 1;
5693 	req->schedule[0].full_scan_mul = 1;
5694 
5695 	/* Disable EBS. */
5696 	req->channel_opt[0].non_ebs_ratio = 1;
5697 	req->channel_opt[1].non_ebs_ratio = 1;
5698 
5699 	err = iwm_send_cmd(sc, &hcmd);
5700 	free(req, M_DEVBUF, req_len);
5701 	return err;
5702 }
5703 
5704 int
5705 iwm_config_umac_scan(struct iwm_softc *sc)
5706 {
5707 	struct ieee80211com *ic = &sc->sc_ic;
5708 	struct iwm_scan_config *scan_config;
5709 	int err, nchan;
5710 	size_t cmd_size;
5711 	struct ieee80211_channel *c;
5712 	struct iwm_host_cmd hcmd = {
5713 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
5714 		.flags = 0,
5715 	};
5716 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5717 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5718 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5719 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5720 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5721 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5722 	    IWM_SCAN_CONFIG_RATE_54M);
5723 
5724 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5725 
5726 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
5727 	if (scan_config == NULL)
5728 		return ENOMEM;
5729 
5730 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5731 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5732 	scan_config->legacy_rates = htole32(rates |
5733 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5734 
5735 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5736 	scan_config->dwell_active = 10;
5737 	scan_config->dwell_passive = 110;
5738 	scan_config->dwell_fragmented = 44;
5739 	scan_config->dwell_extended = 90;
5740 	scan_config->out_of_channel_time = htole32(0);
5741 	scan_config->suspend_time = htole32(0);
5742 
5743 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5744 
5745 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5746 	scan_config->channel_flags = 0;
5747 
5748 	for (c = &ic->ic_channels[1], nchan = 0;
5749 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5750 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5751 		if (c->ic_flags == 0)
5752 			continue;
5753 		scan_config->channel_array[nchan++] =
5754 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5755 	}
5756 
5757 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5758 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5759 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5760 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5761 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5762 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5763 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5764 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5765 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5766 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5767 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5768 
5769 	hcmd.data[0] = scan_config;
5770 	hcmd.len[0] = cmd_size;
5771 
5772 	err = iwm_send_cmd(sc, &hcmd);
5773 	free(scan_config, M_DEVBUF, cmd_size);
5774 	return err;
5775 }
5776 
5777 int
5778 iwm_umac_scan_size(struct iwm_softc *sc)
5779 {
5780 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
5781 	int tail_size;
5782 
5783 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5784 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
5785 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5786 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
5787 #ifdef notyet
5788 	else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5789 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V6;
5790 #endif
5791 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5792 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
5793 	else
5794 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
5795 
5796 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
5797 	    sc->sc_capa_n_scan_channels + tail_size;
5798 }
5799 
5800 struct iwm_scan_umac_chan_param *
5801 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
5802     struct iwm_scan_req_umac *req)
5803 {
5804 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5805 		return &req->v8.channel;
5806 
5807 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5808 		return &req->v7.channel;
5809 #ifdef notyet
5810 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5811 		return &req->v6.channel;
5812 #endif
5813 	return &req->v1.channel;
5814 }
5815 
5816 void *
5817 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
5818 {
5819 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5820 		return (void *)&req->v8.data;
5821 
5822 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5823 		return (void *)&req->v7.data;
5824 #ifdef notyet
5825 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5826 		return (void *)&req->v6.data;
5827 #endif
5828 	return (void *)&req->v1.data;
5829 
5830 }
5831 
5832 /* adaptive dwell max budget time [TU] for full scan */
5833 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
5834 /* adaptive dwell max budget time [TU] for directed scan */
5835 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
5836 /* adaptive dwell default high band APs number */
5837 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
5838 /* adaptive dwell default low band APs number */
5839 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
5840 /* adaptive dwell default APs number in social channels (1, 6, 11) */
5841 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
5842 
5843 int
5844 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
5845 {
5846 	struct ieee80211com *ic = &sc->sc_ic;
5847 	struct iwm_host_cmd hcmd = {
5848 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
5849 		.len = { 0, },
5850 		.data = { NULL, },
5851 		.flags = 0,
5852 	};
5853 	struct iwm_scan_req_umac *req;
5854 	void *cmd_data, *tail_data;
5855 	struct iwm_scan_req_umac_tail_v2 *tail;
5856 	struct iwm_scan_req_umac_tail_v1 *tailv1;
5857 	struct iwm_scan_umac_chan_param *chanparam;
5858 	size_t req_len;
5859 	int err, async = bgscan;
5860 
5861 	req_len = iwm_umac_scan_size(sc);
5862 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
5863 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
5864 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5865 		return ERANGE;
5866 	req = malloc(req_len, M_DEVBUF,
5867 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5868 	if (req == NULL)
5869 		return ENOMEM;
5870 
5871 	hcmd.len[0] = (uint16_t)req_len;
5872 	hcmd.data[0] = (void *)req;
5873 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5874 
5875 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5876 		req->v7.adwell_default_n_aps_social =
5877 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
5878 		req->v7.adwell_default_n_aps =
5879 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
5880 
5881 		if (ic->ic_des_esslen != 0)
5882 			req->v7.adwell_max_budget =
5883 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
5884 		else
5885 			req->v7.adwell_max_budget =
5886 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
5887 
5888 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5889 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
5890 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
5891 
5892 		if (isset(sc->sc_ucode_api,
5893 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
5894 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
5895 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
5896 		} else {
5897 			req->v7.active_dwell = 10;
5898 			req->v7.passive_dwell = 110;
5899 			req->v7.fragmented_dwell = 44;
5900 		}
5901 	} else {
5902 		/* These timings correspond to iwlwifi's UNASSOC scan. */
5903 		req->v1.active_dwell = 10;
5904 		req->v1.passive_dwell = 110;
5905 		req->v1.fragmented_dwell = 44;
5906 		req->v1.extended_dwell = 90;
5907 	}
5908 
5909 	if (bgscan) {
5910 		const uint32_t timeout = htole32(120);
5911 		if (isset(sc->sc_ucode_api,
5912 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
5913 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
5914 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
5915 		} else if (isset(sc->sc_ucode_api,
5916 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5917 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
5918 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
5919 		} else {
5920 			req->v1.max_out_time = timeout;
5921 			req->v1.suspend_time = timeout;
5922 		}
5923 	}
5924 
5925 	req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5926 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5927 
5928 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
5929 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
5930 	chanparam->count = iwm_umac_scan_fill_channels(sc,
5931 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
5932 	    ic->ic_des_esslen != 0, bgscan);
5933 	chanparam->flags = 0;
5934 
5935 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
5936 	    sc->sc_capa_n_scan_channels;
5937 	tail = tail_data;
5938 	/* tail v1 layout differs in preq and direct_scan member fields. */
5939 	tailv1 = tail_data;
5940 
5941 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5942 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
5943 
5944 	/* Check if we're doing an active directed scan. */
5945 	if (ic->ic_des_esslen != 0) {
5946 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
5947 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5948 			tail->direct_scan[0].len = ic->ic_des_esslen;
5949 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5950 			    ic->ic_des_esslen);
5951 		} else {
5952 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5953 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
5954 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
5955 			    ic->ic_des_esslen);
5956 		}
5957 		req->general_flags |=
5958 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5959 	} else
5960 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5961 
5962 	if (isset(sc->sc_enabled_capa,
5963 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5964 		req->general_flags |=
5965 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5966 
5967 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5968 		req->general_flags |=
5969 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
5970 	} else {
5971 		req->general_flags |=
5972 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5973 	}
5974 
5975 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5976 		err = iwm_fill_probe_req(sc, &tail->preq);
5977 	else
5978 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
5979 	if (err) {
5980 		free(req, M_DEVBUF, req_len);
5981 		return err;
5982 	}
5983 
5984 	/* Specify the scan plan: We'll do one iteration. */
5985 	tail->schedule[0].interval = 0;
5986 	tail->schedule[0].iter_count = 1;
5987 
5988 	err = iwm_send_cmd(sc, &hcmd);
5989 	free(req, M_DEVBUF, req_len);
5990 	return err;
5991 }
5992 
5993 uint8_t
5994 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5995 {
5996 	int i;
5997 	uint8_t rval;
5998 
5999 	for (i = 0; i < rs->rs_nrates; i++) {
6000 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6001 		if (rval == iwm_rates[ridx].rate)
6002 			return rs->rs_rates[i];
6003 	}
6004 
6005 	return 0;
6006 }
6007 
6008 int
6009 iwm_rval2ridx(int rval)
6010 {
6011 	int ridx;
6012 
6013 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
6014 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
6015 			continue;
6016 		if (rval == iwm_rates[ridx].rate)
6017 			break;
6018 	}
6019 
6020        return ridx;
6021 }
6022 
6023 void
6024 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
6025     int *ofdm_rates)
6026 {
6027 	struct ieee80211_node *ni = &in->in_ni;
6028 	struct ieee80211_rateset *rs = &ni->ni_rates;
6029 	int lowest_present_ofdm = -1;
6030 	int lowest_present_cck = -1;
6031 	uint8_t cck = 0;
6032 	uint8_t ofdm = 0;
6033 	int i;
6034 
6035 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6036 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6037 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
6038 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6039 				continue;
6040 			cck |= (1 << i);
6041 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6042 				lowest_present_cck = i;
6043 		}
6044 	}
6045 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
6046 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6047 			continue;
6048 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
6049 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6050 			lowest_present_ofdm = i;
6051 	}
6052 
6053 	/*
6054 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6055 	 * variables. This isn't sufficient though, as there might not
6056 	 * be all the right rates in the bitmap. E.g. if the only basic
6057 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6058 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6059 	 *
6060 	 *    [...] a STA responding to a received frame shall transmit
6061 	 *    its Control Response frame [...] at the highest rate in the
6062 	 *    BSSBasicRateSet parameter that is less than or equal to the
6063 	 *    rate of the immediately previous frame in the frame exchange
6064 	 *    sequence ([...]) and that is of the same modulation class
6065 	 *    ([...]) as the received frame. If no rate contained in the
6066 	 *    BSSBasicRateSet parameter meets these conditions, then the
6067 	 *    control frame sent in response to a received frame shall be
6068 	 *    transmitted at the highest mandatory rate of the PHY that is
6069 	 *    less than or equal to the rate of the received frame, and
6070 	 *    that is of the same modulation class as the received frame.
6071 	 *
6072 	 * As a consequence, we need to add all mandatory rates that are
6073 	 * lower than all of the basic rates to these bitmaps.
6074 	 */
6075 
6076 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
6077 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
6078 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
6079 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
6080 	/* 6M already there or needed so always add */
6081 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
6082 
6083 	/*
6084 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6085 	 * Note, however:
6086 	 *  - if no CCK rates are basic, it must be ERP since there must
6087 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6088 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6089 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6090 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6091 	 *  - if 2M is basic, 1M is mandatory
6092 	 *  - if 1M is basic, that's the only valid ACK rate.
6093 	 * As a consequence, it's not as complicated as it sounds, just add
6094 	 * any lower rates to the ACK rate bitmap.
6095 	 */
6096 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
6097 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
6098 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
6099 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
6100 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
6101 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
6102 	/* 1M already there or needed so always add */
6103 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
6104 
6105 	*cck_rates = cck;
6106 	*ofdm_rates = ofdm;
6107 }
6108 
6109 void
6110 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
6111     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
6112 {
6113 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6114 	struct ieee80211com *ic = &sc->sc_ic;
6115 	struct ieee80211_node *ni = ic->ic_bss;
6116 	int cck_ack_rates, ofdm_ack_rates;
6117 	int i;
6118 
6119 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6120 	    in->in_color));
6121 	cmd->action = htole32(action);
6122 
6123 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6124 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
6125 	else if (ic->ic_opmode == IEEE80211_M_STA)
6126 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
6127 	else
6128 		panic("unsupported operating mode %d\n", ic->ic_opmode);
6129 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
6130 
6131 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6132 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6133 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6134 		return;
6135 	}
6136 
6137 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
6138 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6139 	cmd->cck_rates = htole32(cck_ack_rates);
6140 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6141 
6142 	cmd->cck_short_preamble
6143 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6144 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
6145 	cmd->short_slot
6146 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6147 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
6148 
6149 	for (i = 0; i < EDCA_NUM_AC; i++) {
6150 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6151 		int txf = iwm_ac_to_tx_fifo[i];
6152 
6153 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
6154 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
6155 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6156 		cmd->ac[txf].fifos_mask = (1 << txf);
6157 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6158 	}
6159 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6160 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
6161 
6162 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6163 		enum ieee80211_htprot htprot =
6164 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6165 		switch (htprot) {
6166 		case IEEE80211_HTPROT_NONE:
6167 			break;
6168 		case IEEE80211_HTPROT_NONMEMBER:
6169 		case IEEE80211_HTPROT_NONHT_MIXED:
6170 			cmd->protection_flags |=
6171 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
6172 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6173 				cmd->protection_flags |=
6174 				    htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
6175 			break;
6176 		case IEEE80211_HTPROT_20MHZ:
6177 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
6178 				/* XXX ... and if our channel is 40 MHz ... */
6179 				cmd->protection_flags |=
6180 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
6181 				    IWM_MAC_PROT_FLG_FAT_PROT);
6182 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6183 					cmd->protection_flags |= htole32(
6184 					    IWM_MAC_PROT_FLG_SELF_CTS_EN);
6185 			}
6186 			break;
6187 		default:
6188 			break;
6189 		}
6190 
6191 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
6192 	}
6193 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6194 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
6195 
6196 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
6197 #undef IWM_EXP2
6198 }
6199 
6200 void
6201 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
6202     struct iwm_mac_data_sta *sta, int assoc)
6203 {
6204 	struct ieee80211_node *ni = &in->in_ni;
6205 	uint32_t dtim_off;
6206 	uint64_t tsf;
6207 
6208 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
6209 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
6210 	tsf = letoh64(tsf);
6211 
6212 	sta->is_assoc = htole32(assoc);
6213 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
6214 	sta->dtim_tsf = htole64(tsf + dtim_off);
6215 	sta->bi = htole32(ni->ni_intval);
6216 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
6217 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6218 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
6219 	sta->listen_interval = htole32(10);
6220 	sta->assoc_id = htole32(ni->ni_associd);
6221 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
6222 }
6223 
6224 int
6225 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
6226     int assoc)
6227 {
6228 	struct ieee80211com *ic = &sc->sc_ic;
6229 	struct ieee80211_node *ni = &in->in_ni;
6230 	struct iwm_mac_ctx_cmd cmd;
6231 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
6232 
6233 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6234 		panic("MAC already added");
6235 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6236 		panic("MAC already removed");
6237 
6238 	memset(&cmd, 0, sizeof(cmd));
6239 
6240 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
6241 
6242 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6243 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
6244 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
6245 		    IWM_MAC_FILTER_ACCEPT_GRP |
6246 		    IWM_MAC_FILTER_IN_BEACON |
6247 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
6248 		    IWM_MAC_FILTER_IN_CRC32);
6249 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6250 		/*
6251 		 * Allow beacons to pass through as long as we are not
6252 		 * associated or we do not have dtim period information.
6253 		 */
6254 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
6255 	else
6256 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6257 
6258 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6259 }
6260 
6261 int
6262 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
6263 {
6264 	struct iwm_time_quota_cmd cmd;
6265 	int i, idx, num_active_macs, quota, quota_rem;
6266 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
6267 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
6268 	uint16_t id;
6269 
6270 	memset(&cmd, 0, sizeof(cmd));
6271 
6272 	/* currently, PHY ID == binding ID */
6273 	if (in && in->in_phyctxt) {
6274 		id = in->in_phyctxt->id;
6275 		KASSERT(id < IWM_MAX_BINDINGS);
6276 		colors[id] = in->in_phyctxt->color;
6277 		if (running)
6278 			n_ifs[id] = 1;
6279 	}
6280 
6281 	/*
6282 	 * The FW's scheduling session consists of
6283 	 * IWM_MAX_QUOTA fragments. Divide these fragments
6284 	 * equally between all the bindings that require quota
6285 	 */
6286 	num_active_macs = 0;
6287 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
6288 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
6289 		num_active_macs += n_ifs[i];
6290 	}
6291 
6292 	quota = 0;
6293 	quota_rem = 0;
6294 	if (num_active_macs) {
6295 		quota = IWM_MAX_QUOTA / num_active_macs;
6296 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
6297 	}
6298 
6299 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
6300 		if (colors[i] < 0)
6301 			continue;
6302 
6303 		cmd.quotas[idx].id_and_color =
6304 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
6305 
6306 		if (n_ifs[i] <= 0) {
6307 			cmd.quotas[idx].quota = htole32(0);
6308 			cmd.quotas[idx].max_duration = htole32(0);
6309 		} else {
6310 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
6311 			cmd.quotas[idx].max_duration = htole32(0);
6312 		}
6313 		idx++;
6314 	}
6315 
6316 	/* Give the remainder of the session to the first binding */
6317 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
6318 
6319 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
6320 	    sizeof(cmd), &cmd);
6321 }
6322 
6323 void
6324 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
6325 {
6326 	int s = splnet();
6327 
6328 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6329 		splx(s);
6330 		return;
6331 	}
6332 
6333 	refcnt_take(&sc->task_refs);
6334 	if (!task_add(taskq, task))
6335 		refcnt_rele_wake(&sc->task_refs);
6336 	splx(s);
6337 }
6338 
6339 void
6340 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
6341 {
6342 	if (task_del(taskq, task))
6343 		refcnt_rele(&sc->task_refs);
6344 }
6345 
6346 int
6347 iwm_scan(struct iwm_softc *sc)
6348 {
6349 	struct ieee80211com *ic = &sc->sc_ic;
6350 	struct ifnet *ifp = IC2IFP(ic);
6351 	int err;
6352 
6353 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
6354 		err = iwm_scan_abort(sc);
6355 		if (err) {
6356 			printf("%s: could not abort background scan\n",
6357 			    DEVNAME(sc));
6358 			return err;
6359 		}
6360 	}
6361 
6362 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6363 		err = iwm_umac_scan(sc, 0);
6364 	else
6365 		err = iwm_lmac_scan(sc, 0);
6366 	if (err) {
6367 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6368 		return err;
6369 	}
6370 
6371 	/*
6372 	 * The current mode might have been fixed during association.
6373 	 * Ensure all channels get scanned.
6374 	 */
6375 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6376 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6377 
6378 	sc->sc_flags |= IWM_FLAG_SCANNING;
6379 	if (ifp->if_flags & IFF_DEBUG)
6380 		printf("%s: %s -> %s\n", ifp->if_xname,
6381 		    ieee80211_state_name[ic->ic_state],
6382 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6383 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
6384 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6385 		ieee80211_node_cleanup(ic, ic->ic_bss);
6386 	}
6387 	ic->ic_state = IEEE80211_S_SCAN;
6388 	iwm_led_blink_start(sc);
6389 	wakeup(&ic->ic_state); /* wake iwm_init() */
6390 
6391 	return 0;
6392 }
6393 
6394 int
6395 iwm_bgscan(struct ieee80211com *ic)
6396 {
6397 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6398 	int err;
6399 
6400 	if (sc->sc_flags & IWM_FLAG_SCANNING)
6401 		return 0;
6402 
6403 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6404 		err = iwm_umac_scan(sc, 1);
6405 	else
6406 		err = iwm_lmac_scan(sc, 1);
6407 	if (err) {
6408 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6409 		return err;
6410 	}
6411 
6412 	sc->sc_flags |= IWM_FLAG_BGSCAN;
6413 	return 0;
6414 }
6415 
6416 int
6417 iwm_umac_scan_abort(struct iwm_softc *sc)
6418 {
6419 	struct iwm_umac_scan_abort cmd = { 0 };
6420 
6421 	return iwm_send_cmd_pdu(sc,
6422 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
6423 	    0, sizeof(cmd), &cmd);
6424 }
6425 
6426 int
6427 iwm_lmac_scan_abort(struct iwm_softc *sc)
6428 {
6429 	struct iwm_host_cmd cmd = {
6430 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
6431 	};
6432 	int err, status;
6433 
6434 	err = iwm_send_cmd_status(sc, &cmd, &status);
6435 	if (err)
6436 		return err;
6437 
6438 	if (status != IWM_CAN_ABORT_STATUS) {
6439 		/*
6440 		 * The scan abort will return 1 for success or
6441 		 * 2 for "failure".  A failure condition can be
6442 		 * due to simply not being in an active scan which
6443 		 * can occur if we send the scan abort before the
6444 		 * microcode has notified us that a scan is completed.
6445 		 */
6446 		return EBUSY;
6447 	}
6448 
6449 	return 0;
6450 }
6451 
6452 int
6453 iwm_scan_abort(struct iwm_softc *sc)
6454 {
6455 	int err;
6456 
6457 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6458 		err = iwm_umac_scan_abort(sc);
6459 	else
6460 		err = iwm_lmac_scan_abort(sc);
6461 
6462 	if (err == 0)
6463 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6464 	return err;
6465 }
6466 
6467 int
6468 iwm_auth(struct iwm_softc *sc)
6469 {
6470 	struct ieee80211com *ic = &sc->sc_ic;
6471 	struct iwm_node *in = (void *)ic->ic_bss;
6472 	uint32_t duration;
6473 	int generation = sc->sc_generation, err;
6474 
6475 	splassert(IPL_NET);
6476 
6477 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6478 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
6479 	else
6480 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
6481 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6482 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
6483 	if (err) {
6484 		printf("%s: could not update PHY context (error %d)\n",
6485 		    DEVNAME(sc), err);
6486 		return err;
6487 	}
6488 	in->in_phyctxt = &sc->sc_phyctxt[0];
6489 
6490 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
6491 	if (err) {
6492 		printf("%s: could not add MAC context (error %d)\n",
6493 		    DEVNAME(sc), err);
6494 		return err;
6495  	}
6496 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
6497 
6498 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
6499 	if (err) {
6500 		printf("%s: could not add binding (error %d)\n",
6501 		    DEVNAME(sc), err);
6502 		goto rm_mac_ctxt;
6503 	}
6504 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
6505 
6506 	err = iwm_add_sta_cmd(sc, in, 0);
6507 	if (err) {
6508 		printf("%s: could not add sta (error %d)\n",
6509 		    DEVNAME(sc), err);
6510 		goto rm_binding;
6511 	}
6512 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
6513 
6514 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6515 		return 0;
6516 
6517 	/*
6518 	 * Prevent the FW from wandering off channel during association
6519 	 * by "protecting" the session with a time event.
6520 	 */
6521 	if (in->in_ni.ni_intval)
6522 		duration = in->in_ni.ni_intval * 2;
6523 	else
6524 		duration = IEEE80211_DUR_TU;
6525 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
6526 
6527 	return 0;
6528 
6529 rm_binding:
6530 	if (generation == sc->sc_generation) {
6531 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
6532 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6533 	}
6534 rm_mac_ctxt:
6535 	if (generation == sc->sc_generation) {
6536 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
6537 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6538 	}
6539 	return err;
6540 }
6541 
6542 int
6543 iwm_deauth(struct iwm_softc *sc)
6544 {
6545 	struct ieee80211com *ic = &sc->sc_ic;
6546 	struct iwm_node *in = (void *)ic->ic_bss;
6547 	int ac, tfd_queue_msk, err;
6548 
6549 	splassert(IPL_NET);
6550 
6551 	iwm_unprotect_session(sc, in);
6552 
6553 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
6554 		err = iwm_rm_sta_cmd(sc, in);
6555 		if (err) {
6556 			printf("%s: could not remove STA (error %d)\n",
6557 			    DEVNAME(sc), err);
6558 			return err;
6559 		}
6560 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6561 	}
6562 
6563 	tfd_queue_msk = 0;
6564 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6565 		int qid = ac;
6566 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6567 			qid += IWM_DQA_MIN_MGMT_QUEUE;
6568 		tfd_queue_msk |= htole32(1 << qid);
6569 	}
6570 
6571 	err = iwm_flush_tx_path(sc, tfd_queue_msk);
6572 	if (err) {
6573 		printf("%s: could not flush Tx path (error %d)\n",
6574 		    DEVNAME(sc), err);
6575 		return err;
6576 	}
6577 
6578 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
6579 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
6580 		if (err) {
6581 			printf("%s: could not remove binding (error %d)\n",
6582 			    DEVNAME(sc), err);
6583 			return err;
6584 		}
6585 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6586 	}
6587 
6588 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
6589 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
6590 		if (err) {
6591 			printf("%s: could not remove MAC context (error %d)\n",
6592 			    DEVNAME(sc), err);
6593 			return err;
6594 		}
6595 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6596 	}
6597 
6598 	return 0;
6599 }
6600 
6601 int
6602 iwm_assoc(struct iwm_softc *sc)
6603 {
6604 	struct ieee80211com *ic = &sc->sc_ic;
6605 	struct iwm_node *in = (void *)ic->ic_bss;
6606 	int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
6607 	int err;
6608 
6609 	splassert(IPL_NET);
6610 
6611 	err = iwm_add_sta_cmd(sc, in, update_sta);
6612 	if (err) {
6613 		printf("%s: could not %s STA (error %d)\n",
6614 		    DEVNAME(sc), update_sta ? "update" : "add", err);
6615 		return err;
6616 	}
6617 
6618 	return 0;
6619 }
6620 
6621 int
6622 iwm_disassoc(struct iwm_softc *sc)
6623 {
6624 	struct ieee80211com *ic = &sc->sc_ic;
6625 	struct iwm_node *in = (void *)ic->ic_bss;
6626 	int err;
6627 
6628 	splassert(IPL_NET);
6629 
6630 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
6631 		err = iwm_rm_sta_cmd(sc, in);
6632 		if (err) {
6633 			printf("%s: could not remove STA (error %d)\n",
6634 			    DEVNAME(sc), err);
6635 			return err;
6636 		}
6637 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6638 	}
6639 
6640 	return 0;
6641 }
6642 
6643 int
6644 iwm_run(struct iwm_softc *sc)
6645 {
6646 	struct ieee80211com *ic = &sc->sc_ic;
6647 	struct iwm_node *in = (void *)ic->ic_bss;
6648 	int err;
6649 
6650 	splassert(IPL_NET);
6651 
6652 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6653 		/* Add a MAC context and a sniffing STA. */
6654 		err = iwm_auth(sc);
6655 		if (err)
6656 			return err;
6657 	}
6658 
6659 	/* Configure Rx chains for MIMO. */
6660 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
6661 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
6662 	    !sc->sc_nvm.sku_cap_mimo_disable) {
6663 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
6664 		    2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
6665 		if (err) {
6666 			printf("%s: failed to update PHY\n",
6667 			    DEVNAME(sc));
6668 			return err;
6669 		}
6670 	}
6671 
6672 	/* We have now been assigned an associd by the AP. */
6673 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6674 	if (err) {
6675 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6676 		return err;
6677 	}
6678 
6679 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
6680 	if (err) {
6681 		printf("%s: could not set sf full on (error %d)\n",
6682 		    DEVNAME(sc), err);
6683 		return err;
6684 	}
6685 
6686 	err = iwm_allow_mcast(sc);
6687 	if (err) {
6688 		printf("%s: could not allow mcast (error %d)\n",
6689 		    DEVNAME(sc), err);
6690 		return err;
6691 	}
6692 
6693 	err = iwm_power_update_device(sc);
6694 	if (err) {
6695 		printf("%s: could not send power command (error %d)\n",
6696 		    DEVNAME(sc), err);
6697 		return err;
6698 	}
6699 #ifdef notyet
6700 	/*
6701 	 * Disabled for now. Default beacon filter settings
6702 	 * prevent net80211 from getting ERP and HT protection
6703 	 * updates from beacons.
6704 	 */
6705 	err = iwm_enable_beacon_filter(sc, in);
6706 	if (err) {
6707 		printf("%s: could not enable beacon filter\n",
6708 		    DEVNAME(sc));
6709 		return err;
6710 	}
6711 #endif
6712 	err = iwm_power_mac_update_mode(sc, in);
6713 	if (err) {
6714 		printf("%s: could not update MAC power (error %d)\n",
6715 		    DEVNAME(sc), err);
6716 		return err;
6717 	}
6718 
6719 	err = iwm_update_quotas(sc, in, 1);
6720 	if (err) {
6721 		printf("%s: could not update quotas (error %d)\n",
6722 		    DEVNAME(sc), err);
6723 		return err;
6724 	}
6725 
6726 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6727 	ieee80211_mira_node_init(&in->in_mn);
6728 
6729 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6730 		iwm_led_blink_start(sc);
6731 		return 0;
6732 	}
6733 
6734 	/* Start at lowest available bit-rate, AMRR will raise. */
6735 	in->in_ni.ni_txrate = 0;
6736 	in->in_ni.ni_txmcs = 0;
6737 	in->chosen_txrate = 0;
6738 	in->chosen_txmcs = 0;
6739 	iwm_setrates(in);
6740 
6741 	timeout_add_msec(&sc->sc_calib_to, 500);
6742 	iwm_led_enable(sc);
6743 
6744 	return 0;
6745 }
6746 
6747 int
6748 iwm_run_stop(struct iwm_softc *sc)
6749 {
6750 	struct ieee80211com *ic = &sc->sc_ic;
6751 	struct iwm_node *in = (void *)ic->ic_bss;
6752 	int err;
6753 
6754 	splassert(IPL_NET);
6755 
6756 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6757 		iwm_led_blink_stop(sc);
6758 
6759 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
6760 	if (err)
6761 		return err;
6762 
6763 	iwm_disable_beacon_filter(sc);
6764 
6765 	err = iwm_update_quotas(sc, in, 0);
6766 	if (err) {
6767 		printf("%s: could not update quotas (error %d)\n",
6768 		    DEVNAME(sc), err);
6769 		return err;
6770 	}
6771 
6772 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
6773 	if (err) {
6774 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6775 		return err;
6776 	}
6777 
6778 	/* Reset Tx chains in case MIMO was enabled. */
6779 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
6780 	    !sc->sc_nvm.sku_cap_mimo_disable) {
6781 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6782 		    IWM_FW_CTXT_ACTION_MODIFY, 0);
6783 		if (err) {
6784 			printf("%s: failed to update PHY\n", DEVNAME(sc));
6785 			return err;
6786 		}
6787 	}
6788 
6789 	return 0;
6790 }
6791 
6792 struct ieee80211_node *
6793 iwm_node_alloc(struct ieee80211com *ic)
6794 {
6795 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
6796 }
6797 
6798 void
6799 iwm_calib_timeout(void *arg)
6800 {
6801 	struct iwm_softc *sc = arg;
6802 	struct ieee80211com *ic = &sc->sc_ic;
6803 	struct iwm_node *in = (void *)ic->ic_bss;
6804 	struct ieee80211_node *ni = &in->in_ni;
6805 	int s;
6806 
6807 	s = splnet();
6808 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
6809 	    ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) &&
6810 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
6811 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6812 		/*
6813 		 * If AMRR has chosen a new TX rate we must update
6814 		 * the firwmare's LQ rate table from process context.
6815 		 * ni_txrate may change again before the task runs so
6816 		 * cache the chosen rate in the iwm_node structure.
6817 		 */
6818 		if (ni->ni_txrate != in->chosen_txrate) {
6819 			in->chosen_txrate = ni->ni_txrate;
6820 			iwm_add_task(sc, systq, &sc->setrates_task);
6821 		}
6822 		if (in->ht_force_cck) {
6823 			struct ieee80211_rateset *rs = &ni->ni_rates;
6824 			uint8_t rv;
6825 			rv = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6826 			if (IWM_RVAL_IS_OFDM(rv))
6827 				in->ht_force_cck = 0;
6828 		}
6829 	}
6830 
6831 	splx(s);
6832 
6833 	timeout_add_msec(&sc->sc_calib_to, 500);
6834 }
6835 
6836 void
6837 iwm_setrates_task(void *arg)
6838 {
6839 	struct iwm_softc *sc = arg;
6840 	struct ieee80211com *ic = &sc->sc_ic;
6841 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6842 	int s = splnet();
6843 
6844 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6845 		refcnt_rele_wake(&sc->task_refs);
6846 		splx(s);
6847 		return;
6848 	}
6849 
6850 	/* Update rates table based on new TX rate determined by AMRR. */
6851 	iwm_setrates(in);
6852 	refcnt_rele_wake(&sc->task_refs);
6853 	splx(s);
6854 }
6855 
6856 void
6857 iwm_setrates(struct iwm_node *in)
6858 {
6859 	struct ieee80211_node *ni = &in->in_ni;
6860 	struct ieee80211com *ic = ni->ni_ic;
6861 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6862 	struct iwm_lq_cmd lqcmd;
6863 	struct ieee80211_rateset *rs = &ni->ni_rates;
6864 	int i, ridx, ridx_min, ridx_max, j, sgi_ok = 0, mimo, tab = 0;
6865 	struct iwm_host_cmd cmd = {
6866 		.id = IWM_LQ_CMD,
6867 		.len = { sizeof(lqcmd), },
6868 	};
6869 
6870 	memset(&lqcmd, 0, sizeof(lqcmd));
6871 	lqcmd.sta_id = IWM_STATION_ID;
6872 
6873 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6874 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6875 
6876 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6877 	    ieee80211_node_supports_ht_sgi20(ni)) {
6878 		ni->ni_flags |= IEEE80211_NODE_HT_SGI20;
6879 		sgi_ok = 1;
6880 	}
6881 
6882 	/*
6883 	 * Fill the LQ rate selection table with legacy and/or HT rates
6884 	 * in descending order, i.e. with the node's current TX rate first.
6885 	 * In cases where throughput of an HT rate corresponds to a legacy
6886 	 * rate it makes no sense to add both. We rely on the fact that
6887 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
6888 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6889 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6890 	 */
6891 	j = 0;
6892 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6893 	mimo = iwm_is_mimo_mcs(in->chosen_txmcs);
6894 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
6895 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
6896 		uint8_t plcp = iwm_rates[ridx].plcp;
6897 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
6898 
6899 		if (j >= nitems(lqcmd.rs_table))
6900 			break;
6901 		tab = 0;
6902 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6903 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
6904 				continue;
6905 	 		/* Do not mix SISO and MIMO HT rates. */
6906 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
6907 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
6908 				continue;
6909 			for (i = in->chosen_txmcs; i >= 0; i--) {
6910 				if (isclr(ni->ni_rxmcs, i))
6911 					continue;
6912 				if (ridx == iwm_mcs2ridx[i]) {
6913 					tab = ht_plcp;
6914 					tab |= IWM_RATE_MCS_HT_MSK;
6915 					if (sgi_ok)
6916 						tab |= IWM_RATE_MCS_SGI_MSK;
6917 					break;
6918 				}
6919 			}
6920 		} else if (plcp != IWM_RATE_INVM_PLCP) {
6921 			for (i = in->chosen_txrate; i >= 0; i--) {
6922 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6923 				    IEEE80211_RATE_VAL)) {
6924 					tab = plcp;
6925 					break;
6926 				}
6927 			}
6928 		}
6929 
6930 		if (tab == 0)
6931 			continue;
6932 
6933 		if (iwm_is_mimo_ht_plcp(ht_plcp))
6934 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
6935 		else
6936 			tab |= IWM_RATE_MCS_ANT_A_MSK;
6937 
6938 		if (IWM_RIDX_IS_CCK(ridx))
6939 			tab |= IWM_RATE_MCS_CCK_MSK;
6940 		lqcmd.rs_table[j++] = htole32(tab);
6941 	}
6942 
6943 	lqcmd.mimo_delim = (mimo ? j : 0);
6944 
6945 	/* Fill the rest with the lowest possible rate */
6946 	while (j < nitems(lqcmd.rs_table)) {
6947 		tab = iwm_rates[ridx_min].plcp;
6948 		if (IWM_RIDX_IS_CCK(ridx_min))
6949 			tab |= IWM_RATE_MCS_CCK_MSK;
6950 		tab |= IWM_RATE_MCS_ANT_A_MSK;
6951 		lqcmd.rs_table[j++] = htole32(tab);
6952 	}
6953 
6954 	lqcmd.single_stream_ant_msk = IWM_ANT_A;
6955 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
6956 
6957 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
6958 	lqcmd.agg_disable_start_th = 3;
6959 #ifdef notyet
6960 	lqcmd.agg_frame_cnt_limit = 0x3f;
6961 #else
6962 	lqcmd.agg_frame_cnt_limit = 1; /* tx agg disabled */
6963 #endif
6964 
6965 	cmd.data[0] = &lqcmd;
6966 	iwm_send_cmd(sc, &cmd);
6967 }
6968 
6969 int
6970 iwm_media_change(struct ifnet *ifp)
6971 {
6972 	struct iwm_softc *sc = ifp->if_softc;
6973 	struct ieee80211com *ic = &sc->sc_ic;
6974 	uint8_t rate, ridx;
6975 	int err;
6976 
6977 	err = ieee80211_media_change(ifp);
6978 	if (err != ENETRESET)
6979 		return err;
6980 
6981 	if (ic->ic_fixed_mcs != -1)
6982 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6983 	else if (ic->ic_fixed_rate != -1) {
6984 		rate = ic->ic_sup_rates[ic->ic_curmode].
6985 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6986 		/* Map 802.11 rate to HW rate index. */
6987 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6988 			if (iwm_rates[ridx].rate == rate)
6989 				break;
6990 		sc->sc_fixed_ridx = ridx;
6991 	}
6992 
6993 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6994 	    (IFF_UP | IFF_RUNNING)) {
6995 		iwm_stop(ifp);
6996 		err = iwm_init(ifp);
6997 	}
6998 	return err;
6999 }
7000 
7001 void
7002 iwm_newstate_task(void *psc)
7003 {
7004 	struct iwm_softc *sc = (struct iwm_softc *)psc;
7005 	struct ieee80211com *ic = &sc->sc_ic;
7006 	enum ieee80211_state nstate = sc->ns_nstate;
7007 	enum ieee80211_state ostate = ic->ic_state;
7008 	int arg = sc->ns_arg;
7009 	int err = 0, s = splnet();
7010 
7011 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7012 		/* iwm_stop() is waiting for us. */
7013 		refcnt_rele_wake(&sc->task_refs);
7014 		splx(s);
7015 		return;
7016 	}
7017 
7018 	if (ostate == IEEE80211_S_SCAN) {
7019 		if (nstate == ostate) {
7020 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
7021 				refcnt_rele_wake(&sc->task_refs);
7022 				splx(s);
7023 				return;
7024 			}
7025 			/* Firmware is no longer scanning. Do another scan. */
7026 			goto next_scan;
7027 		} else
7028 			iwm_led_blink_stop(sc);
7029 	}
7030 
7031 	if (nstate <= ostate) {
7032 		switch (ostate) {
7033 		case IEEE80211_S_RUN:
7034 			err = iwm_run_stop(sc);
7035 			if (err)
7036 				goto out;
7037 			/* FALLTHROUGH */
7038 		case IEEE80211_S_ASSOC:
7039 			if (nstate <= IEEE80211_S_ASSOC) {
7040 				err = iwm_disassoc(sc);
7041 				if (err)
7042 					goto out;
7043 			}
7044 			/* FALLTHROUGH */
7045 		case IEEE80211_S_AUTH:
7046 			if (nstate <= IEEE80211_S_AUTH) {
7047 				err = iwm_deauth(sc);
7048 				if (err)
7049 					goto out;
7050 			}
7051 			/* FALLTHROUGH */
7052 		case IEEE80211_S_SCAN:
7053 		case IEEE80211_S_INIT:
7054 			break;
7055 		}
7056 
7057 		/* Die now if iwm_stop() was called while we were sleeping. */
7058 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7059 			refcnt_rele_wake(&sc->task_refs);
7060 			splx(s);
7061 			return;
7062 		}
7063 	}
7064 
7065 	switch (nstate) {
7066 	case IEEE80211_S_INIT:
7067 		break;
7068 
7069 	case IEEE80211_S_SCAN:
7070 next_scan:
7071 		err = iwm_scan(sc);
7072 		if (err)
7073 			break;
7074 		refcnt_rele_wake(&sc->task_refs);
7075 		splx(s);
7076 		return;
7077 
7078 	case IEEE80211_S_AUTH:
7079 		err = iwm_auth(sc);
7080 		break;
7081 
7082 	case IEEE80211_S_ASSOC:
7083 		err = iwm_assoc(sc);
7084 		break;
7085 
7086 	case IEEE80211_S_RUN:
7087 		err = iwm_run(sc);
7088 		break;
7089 	}
7090 
7091 out:
7092 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
7093 		if (err)
7094 			task_add(systq, &sc->init_task);
7095 		else
7096 			sc->sc_newstate(ic, nstate, arg);
7097 	}
7098 	refcnt_rele_wake(&sc->task_refs);
7099 	splx(s);
7100 }
7101 
7102 int
7103 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7104 {
7105 	struct ifnet *ifp = IC2IFP(ic);
7106 	struct iwm_softc *sc = ifp->if_softc;
7107 	struct iwm_node *in = (void *)ic->ic_bss;
7108 
7109 	if (ic->ic_state == IEEE80211_S_RUN) {
7110 		timeout_del(&sc->sc_calib_to);
7111 		ieee80211_mira_cancel_timeouts(&in->in_mn);
7112 		iwm_del_task(sc, systq, &sc->ba_task);
7113 		iwm_del_task(sc, systq, &sc->htprot_task);
7114 		iwm_del_task(sc, systq, &sc->setrates_task);
7115 	}
7116 
7117 	sc->ns_nstate = nstate;
7118 	sc->ns_arg = arg;
7119 
7120 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7121 
7122 	return 0;
7123 }
7124 
7125 void
7126 iwm_endscan(struct iwm_softc *sc)
7127 {
7128 	struct ieee80211com *ic = &sc->sc_ic;
7129 
7130 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
7131 		return;
7132 
7133 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
7134 	ieee80211_end_scan(&ic->ic_if);
7135 }
7136 
7137 /*
7138  * Aging and idle timeouts for the different possible scenarios
7139  * in default configuration
7140  */
7141 static const uint32_t
7142 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
7143 	{
7144 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
7145 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
7146 	},
7147 	{
7148 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
7149 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
7150 	},
7151 	{
7152 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
7153 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
7154 	},
7155 	{
7156 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
7157 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
7158 	},
7159 	{
7160 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
7161 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
7162 	},
7163 };
7164 
7165 /*
7166  * Aging and idle timeouts for the different possible scenarios
7167  * in single BSS MAC configuration.
7168  */
7169 static const uint32_t
7170 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
7171 	{
7172 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
7173 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
7174 	},
7175 	{
7176 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
7177 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
7178 	},
7179 	{
7180 		htole32(IWM_SF_MCAST_AGING_TIMER),
7181 		htole32(IWM_SF_MCAST_IDLE_TIMER)
7182 	},
7183 	{
7184 		htole32(IWM_SF_BA_AGING_TIMER),
7185 		htole32(IWM_SF_BA_IDLE_TIMER)
7186 	},
7187 	{
7188 		htole32(IWM_SF_TX_RE_AGING_TIMER),
7189 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
7190 	},
7191 };
7192 
7193 void
7194 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
7195     struct ieee80211_node *ni)
7196 {
7197 	int i, j, watermark;
7198 
7199 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
7200 
7201 	/*
7202 	 * If we are in association flow - check antenna configuration
7203 	 * capabilities of the AP station, and choose the watermark accordingly.
7204 	 */
7205 	if (ni) {
7206 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7207 			if (ni->ni_rxmcs[1] != 0)
7208 				watermark = IWM_SF_W_MARK_MIMO2;
7209 			else
7210 				watermark = IWM_SF_W_MARK_SISO;
7211 		} else {
7212 			watermark = IWM_SF_W_MARK_LEGACY;
7213 		}
7214 	/* default watermark value for unassociated mode. */
7215 	} else {
7216 		watermark = IWM_SF_W_MARK_MIMO2;
7217 	}
7218 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
7219 
7220 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
7221 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
7222 			sf_cmd->long_delay_timeouts[i][j] =
7223 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
7224 		}
7225 	}
7226 
7227 	if (ni) {
7228 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
7229 		       sizeof(iwm_sf_full_timeout));
7230 	} else {
7231 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
7232 		       sizeof(iwm_sf_full_timeout_def));
7233 	}
7234 
7235 }
7236 
7237 int
7238 iwm_sf_config(struct iwm_softc *sc, int new_state)
7239 {
7240 	struct ieee80211com *ic = &sc->sc_ic;
7241 	struct iwm_sf_cfg_cmd sf_cmd = {
7242 		.state = htole32(new_state),
7243 	};
7244 	int err = 0;
7245 
7246 #if 0	/* only used for models with sdio interface, in iwlwifi */
7247 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7248 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
7249 #endif
7250 
7251 	switch (new_state) {
7252 	case IWM_SF_UNINIT:
7253 	case IWM_SF_INIT_OFF:
7254 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
7255 		break;
7256 	case IWM_SF_FULL_ON:
7257 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7258 		break;
7259 	default:
7260 		return EINVAL;
7261 	}
7262 
7263 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
7264 				   sizeof(sf_cmd), &sf_cmd);
7265 	return err;
7266 }
7267 
7268 int
7269 iwm_send_bt_init_conf(struct iwm_softc *sc)
7270 {
7271 	struct iwm_bt_coex_cmd bt_cmd;
7272 
7273 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
7274 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
7275 
7276 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
7277 	    &bt_cmd);
7278 }
7279 
7280 int
7281 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
7282 {
7283 	struct iwm_mcc_update_cmd mcc_cmd;
7284 	struct iwm_host_cmd hcmd = {
7285 		.id = IWM_MCC_UPDATE_CMD,
7286 		.flags = IWM_CMD_WANT_RESP,
7287 		.data = { &mcc_cmd },
7288 	};
7289 	int err;
7290 	int resp_v2 = isset(sc->sc_enabled_capa,
7291 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
7292 
7293 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
7294 	    !sc->sc_nvm.lar_enabled) {
7295 		return 0;
7296 	}
7297 
7298 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7299 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7300 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
7301 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
7302 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
7303 	else
7304 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
7305 
7306 	if (resp_v2) {
7307 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
7308 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
7309 		    sizeof(struct iwm_mcc_update_resp);
7310 	} else {
7311 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
7312 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
7313 		    sizeof(struct iwm_mcc_update_resp_v1);
7314 	}
7315 
7316 	err = iwm_send_cmd(sc, &hcmd);
7317 	if (err)
7318 		return err;
7319 
7320 	iwm_free_resp(sc, &hcmd);
7321 
7322 	return 0;
7323 }
7324 
7325 void
7326 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
7327 {
7328 	struct iwm_host_cmd cmd = {
7329 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
7330 		.len = { sizeof(uint32_t), },
7331 		.data = { &backoff, },
7332 	};
7333 
7334 	iwm_send_cmd(sc, &cmd);
7335 }
7336 
7337 void
7338 iwm_free_fw_paging(struct iwm_softc *sc)
7339 {
7340 	int i;
7341 
7342 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
7343 		return;
7344 
7345 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
7346 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
7347 	}
7348 
7349 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
7350 }
7351 
7352 int
7353 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
7354 {
7355 	int sec_idx, idx;
7356 	uint32_t offset = 0;
7357 
7358 	/*
7359 	 * find where is the paging image start point:
7360 	 * if CPU2 exist and it's in paging format, then the image looks like:
7361 	 * CPU1 sections (2 or more)
7362 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
7363 	 * CPU2 sections (not paged)
7364 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
7365 	 * non paged to CPU2 paging sec
7366 	 * CPU2 paging CSS
7367 	 * CPU2 paging image (including instruction and data)
7368 	 */
7369 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
7370 		if (image->fw_sect[sec_idx].fws_devoff ==
7371 		    IWM_PAGING_SEPARATOR_SECTION) {
7372 			sec_idx++;
7373 			break;
7374 		}
7375 	}
7376 
7377 	/*
7378 	 * If paging is enabled there should be at least 2 more sections left
7379 	 * (one for CSS and one for Paging data)
7380 	 */
7381 	if (sec_idx >= nitems(image->fw_sect) - 1) {
7382 		printf("%s: Paging: Missing CSS and/or paging sections\n",
7383 		    DEVNAME(sc));
7384 		iwm_free_fw_paging(sc);
7385 		return EINVAL;
7386 	}
7387 
7388 	/* copy the CSS block to the dram */
7389 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
7390 	    DEVNAME(sc), sec_idx));
7391 
7392 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
7393 	    image->fw_sect[sec_idx].fws_data,
7394 	    sc->fw_paging_db[0].fw_paging_size);
7395 
7396 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
7397 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
7398 
7399 	sec_idx++;
7400 
7401 	/*
7402 	 * copy the paging blocks to the dram
7403 	 * loop index start from 1 since that CSS block already copied to dram
7404 	 * and CSS index is 0.
7405 	 * loop stop at num_of_paging_blk since that last block is not full.
7406 	 */
7407 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
7408 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
7409 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
7410 		    sc->fw_paging_db[idx].fw_paging_size);
7411 
7412 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
7413 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
7414 
7415 		offset += sc->fw_paging_db[idx].fw_paging_size;
7416 	}
7417 
7418 	/* copy the last paging block */
7419 	if (sc->num_of_pages_in_last_blk > 0) {
7420 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
7421 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
7422 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
7423 
7424 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
7425 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
7426 	}
7427 
7428 	return 0;
7429 }
7430 
7431 int
7432 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
7433 {
7434 	int blk_idx = 0;
7435 	int error, num_of_pages;
7436 
7437 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
7438 		int i;
7439 		/* Device got reset, and we setup firmware paging again */
7440 		bus_dmamap_sync(sc->sc_dmat,
7441 		    sc->fw_paging_db[0].fw_paging_block.map,
7442 		    0, IWM_FW_PAGING_SIZE,
7443 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
7444 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
7445 			bus_dmamap_sync(sc->sc_dmat,
7446 			    sc->fw_paging_db[i].fw_paging_block.map,
7447 			    0, IWM_PAGING_BLOCK_SIZE,
7448 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
7449 		}
7450 		return 0;
7451 	}
7452 
7453 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
7454 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
7455 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
7456 #endif
7457 
7458 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
7459 	sc->num_of_paging_blk =
7460 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
7461 
7462 	sc->num_of_pages_in_last_blk =
7463 		num_of_pages -
7464 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
7465 
7466 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
7467 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
7468 	    sc->num_of_paging_blk,
7469 	    sc->num_of_pages_in_last_blk));
7470 
7471 	/* allocate block of 4Kbytes for paging CSS */
7472 	error = iwm_dma_contig_alloc(sc->sc_dmat,
7473 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
7474 	    4096);
7475 	if (error) {
7476 		/* free all the previous pages since we failed */
7477 		iwm_free_fw_paging(sc);
7478 		return ENOMEM;
7479 	}
7480 
7481 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
7482 
7483 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
7484 	    DEVNAME(sc)));
7485 
7486 	/*
7487 	 * allocate blocks in dram.
7488 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
7489 	 */
7490 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
7491 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
7492 		/* XXX Use iwm_dma_contig_alloc for allocating */
7493 		error = iwm_dma_contig_alloc(sc->sc_dmat,
7494 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
7495 		    IWM_PAGING_BLOCK_SIZE, 4096);
7496 		if (error) {
7497 			/* free all the previous pages since we failed */
7498 			iwm_free_fw_paging(sc);
7499 			return ENOMEM;
7500 		}
7501 
7502 		sc->fw_paging_db[blk_idx].fw_paging_size =
7503 		    IWM_PAGING_BLOCK_SIZE;
7504 
7505 		DPRINTF((
7506 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
7507 		    DEVNAME(sc)));
7508 	}
7509 
7510 	return 0;
7511 }
7512 
7513 int
7514 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
7515 {
7516 	int ret;
7517 
7518 	ret = iwm_alloc_fw_paging_mem(sc, fw);
7519 	if (ret)
7520 		return ret;
7521 
7522 	return iwm_fill_paging_mem(sc, fw);
7523 }
7524 
7525 /* send paging cmd to FW in case CPU2 has paging image */
7526 int
7527 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
7528 {
7529 	int blk_idx;
7530 	uint32_t dev_phy_addr;
7531 	struct iwm_fw_paging_cmd fw_paging_cmd = {
7532 		.flags =
7533 			htole32(IWM_PAGING_CMD_IS_SECURED |
7534 				IWM_PAGING_CMD_IS_ENABLED |
7535 				(sc->num_of_pages_in_last_blk <<
7536 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
7537 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
7538 		.block_num = htole32(sc->num_of_paging_blk),
7539 	};
7540 
7541 	/* loop for for all paging blocks + CSS block */
7542 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
7543 		dev_phy_addr = htole32(
7544 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
7545 		    IWM_PAGE_2_EXP_SIZE);
7546 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
7547 		bus_dmamap_sync(sc->sc_dmat,
7548 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
7549 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
7550 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
7551 	}
7552 
7553 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
7554 					       IWM_LONG_GROUP, 0),
7555 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
7556 }
7557 
7558 int
7559 iwm_init_hw(struct iwm_softc *sc)
7560 {
7561 	struct ieee80211com *ic = &sc->sc_ic;
7562 	int err, i, ac, qid;
7563 
7564 	err = iwm_preinit(sc);
7565 	if (err)
7566 		return err;
7567 
7568 	err = iwm_start_hw(sc);
7569 	if (err) {
7570 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7571 		return err;
7572 	}
7573 
7574 	err = iwm_run_init_mvm_ucode(sc, 0);
7575 	if (err)
7576 		return err;
7577 
7578 	/* Should stop and start HW since INIT image just loaded. */
7579 	iwm_stop_device(sc);
7580 	err = iwm_start_hw(sc);
7581 	if (err) {
7582 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7583 		return err;
7584 	}
7585 
7586 	/* Restart, this time with the regular firmware */
7587 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
7588 	if (err) {
7589 		printf("%s: could not load firmware\n", DEVNAME(sc));
7590 		goto err;
7591 	}
7592 
7593 	if (!iwm_nic_lock(sc))
7594 		return EBUSY;
7595 
7596 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
7597 	if (err) {
7598 		printf("%s: could not init tx ant config (error %d)\n",
7599 		    DEVNAME(sc), err);
7600 		goto err;
7601 	}
7602 
7603 	err = iwm_send_phy_db_data(sc);
7604 	if (err) {
7605 		printf("%s: could not init phy db (error %d)\n",
7606 		    DEVNAME(sc), err);
7607 		goto err;
7608 	}
7609 
7610 	err = iwm_send_phy_cfg_cmd(sc);
7611 	if (err) {
7612 		printf("%s: could not send phy config (error %d)\n",
7613 		    DEVNAME(sc), err);
7614 		goto err;
7615 	}
7616 
7617 	err = iwm_send_bt_init_conf(sc);
7618 	if (err) {
7619 		printf("%s: could not init bt coex (error %d)\n",
7620 		    DEVNAME(sc), err);
7621 		return err;
7622 	}
7623 
7624 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7625 		err = iwm_send_dqa_cmd(sc);
7626 		if (err)
7627 			return err;
7628 	}
7629 
7630 	/* Add auxiliary station for scanning */
7631 	err = iwm_add_aux_sta(sc);
7632 	if (err) {
7633 		printf("%s: could not add aux station (error %d)\n",
7634 		    DEVNAME(sc), err);
7635 		goto err;
7636 	}
7637 
7638 	for (i = 0; i < 1; i++) {
7639 		/*
7640 		 * The channel used here isn't relevant as it's
7641 		 * going to be overwritten in the other flows.
7642 		 * For now use the first channel we have.
7643 		 */
7644 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7645 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7646 		    IWM_FW_CTXT_ACTION_ADD, 0);
7647 		if (err) {
7648 			printf("%s: could not add phy context %d (error %d)\n",
7649 			    DEVNAME(sc), i, err);
7650 			goto err;
7651 		}
7652 	}
7653 
7654 	/* Initialize tx backoffs to the minimum. */
7655 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
7656 		iwm_tt_tx_backoff(sc, 0);
7657 
7658 
7659 	err = iwm_config_ltr(sc);
7660 	if (err) {
7661 		printf("%s: PCIe LTR configuration failed (error %d)\n",
7662 		    DEVNAME(sc), err);
7663 	}
7664 
7665 	err = iwm_power_update_device(sc);
7666 	if (err) {
7667 		printf("%s: could not send power command (error %d)\n",
7668 		    DEVNAME(sc), err);
7669 		goto err;
7670 	}
7671 
7672 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
7673 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
7674 		if (err) {
7675 			printf("%s: could not init LAR (error %d)\n",
7676 			    DEVNAME(sc), err);
7677 			goto err;
7678 		}
7679 	}
7680 
7681 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
7682 		err = iwm_config_umac_scan(sc);
7683 		if (err) {
7684 			printf("%s: could not configure scan (error %d)\n",
7685 			    DEVNAME(sc), err);
7686 			goto err;
7687 		}
7688 	}
7689 
7690 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7691 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7692 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7693 		else
7694 			qid = IWM_AUX_QUEUE;
7695 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
7696 		    iwm_ac_to_tx_fifo[EDCA_AC_BE]);
7697 		if (err) {
7698 			printf("%s: could not enable monitor inject Tx queue "
7699 			    "(error %d)\n", DEVNAME(sc), err);
7700 			goto err;
7701 		}
7702 	} else {
7703 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7704 			if (isset(sc->sc_enabled_capa,
7705 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7706 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
7707 			else
7708 				qid = ac;
7709 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
7710 			    iwm_ac_to_tx_fifo[ac]);
7711 			if (err) {
7712 				printf("%s: could not enable Tx queue %d "
7713 				    "(error %d)\n", DEVNAME(sc), ac, err);
7714 				goto err;
7715 			}
7716 		}
7717 	}
7718 
7719 	err = iwm_disable_beacon_filter(sc);
7720 	if (err) {
7721 		printf("%s: could not disable beacon filter (error %d)\n",
7722 		    DEVNAME(sc), err);
7723 		goto err;
7724 	}
7725 
7726 err:
7727 	iwm_nic_unlock(sc);
7728 	return err;
7729 }
7730 
7731 /* Allow multicast from our BSSID. */
7732 int
7733 iwm_allow_mcast(struct iwm_softc *sc)
7734 {
7735 	struct ieee80211com *ic = &sc->sc_ic;
7736 	struct ieee80211_node *ni = ic->ic_bss;
7737 	struct iwm_mcast_filter_cmd *cmd;
7738 	size_t size;
7739 	int err;
7740 
7741 	size = roundup(sizeof(*cmd), 4);
7742 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
7743 	if (cmd == NULL)
7744 		return ENOMEM;
7745 	cmd->filter_own = 1;
7746 	cmd->port_id = 0;
7747 	cmd->count = 0;
7748 	cmd->pass_all = 1;
7749 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
7750 
7751 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
7752 	    0, size, cmd);
7753 	free(cmd, M_DEVBUF, size);
7754 	return err;
7755 }
7756 
7757 int
7758 iwm_init(struct ifnet *ifp)
7759 {
7760 	struct iwm_softc *sc = ifp->if_softc;
7761 	struct ieee80211com *ic = &sc->sc_ic;
7762 	int err, generation;
7763 
7764 	rw_assert_wrlock(&sc->ioctl_rwl);
7765 
7766 	generation = ++sc->sc_generation;
7767 
7768 	KASSERT(sc->task_refs.refs == 0);
7769 	refcnt_init(&sc->task_refs);
7770 
7771 	err = iwm_init_hw(sc);
7772 	if (err) {
7773 		if (generation == sc->sc_generation)
7774 			iwm_stop(ifp);
7775 		return err;
7776 	}
7777 
7778 	ifq_clr_oactive(&ifp->if_snd);
7779 	ifp->if_flags |= IFF_RUNNING;
7780 
7781 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7782 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
7783 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7784 		return 0;
7785 	}
7786 
7787 	ieee80211_begin_scan(ifp);
7788 
7789 	/*
7790 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
7791 	 * Wait until the transition to SCAN state has completed.
7792 	 */
7793 	do {
7794 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
7795 		    SEC_TO_NSEC(1));
7796 		if (generation != sc->sc_generation)
7797 			return ENXIO;
7798 		if (err)
7799 			return err;
7800 	} while (ic->ic_state != IEEE80211_S_SCAN);
7801 
7802 	return 0;
7803 }
7804 
7805 void
7806 iwm_start(struct ifnet *ifp)
7807 {
7808 	struct iwm_softc *sc = ifp->if_softc;
7809 	struct ieee80211com *ic = &sc->sc_ic;
7810 	struct ieee80211_node *ni;
7811 	struct ether_header *eh;
7812 	struct mbuf *m;
7813 	int ac = EDCA_AC_BE; /* XXX */
7814 
7815 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
7816 		return;
7817 
7818 	for (;;) {
7819 		/* why isn't this done per-queue? */
7820 		if (sc->qfullmsk != 0) {
7821 			ifq_set_oactive(&ifp->if_snd);
7822 			break;
7823 		}
7824 
7825 		/* need to send management frames even if we're not RUNning */
7826 		m = mq_dequeue(&ic->ic_mgtq);
7827 		if (m) {
7828 			ni = m->m_pkthdr.ph_cookie;
7829 			goto sendit;
7830 		}
7831 
7832 		if (ic->ic_state != IEEE80211_S_RUN ||
7833 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
7834 			break;
7835 
7836 		IFQ_DEQUEUE(&ifp->if_snd, m);
7837 		if (!m)
7838 			break;
7839 		if (m->m_len < sizeof (*eh) &&
7840 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
7841 			ifp->if_oerrors++;
7842 			continue;
7843 		}
7844 #if NBPFILTER > 0
7845 		if (ifp->if_bpf != NULL)
7846 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
7847 #endif
7848 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
7849 			ifp->if_oerrors++;
7850 			continue;
7851 		}
7852 
7853  sendit:
7854 #if NBPFILTER > 0
7855 		if (ic->ic_rawbpf != NULL)
7856 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
7857 #endif
7858 		if (iwm_tx(sc, m, ni, ac) != 0) {
7859 			ieee80211_release_node(ic, ni);
7860 			ifp->if_oerrors++;
7861 			continue;
7862 		}
7863 
7864 		if (ifp->if_flags & IFF_UP) {
7865 			sc->sc_tx_timer = 15;
7866 			ifp->if_timer = 1;
7867 		}
7868 	}
7869 
7870 	return;
7871 }
7872 
7873 void
7874 iwm_stop(struct ifnet *ifp)
7875 {
7876 	struct iwm_softc *sc = ifp->if_softc;
7877 	struct ieee80211com *ic = &sc->sc_ic;
7878 	struct iwm_node *in = (void *)ic->ic_bss;
7879 	int i, s = splnet();
7880 
7881 	rw_assert_wrlock(&sc->ioctl_rwl);
7882 
7883 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
7884 
7885 	/* Cancel scheduled tasks and let any stale tasks finish up. */
7886 	task_del(systq, &sc->init_task);
7887 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
7888 	iwm_del_task(sc, systq, &sc->setrates_task);
7889 	iwm_del_task(sc, systq, &sc->ba_task);
7890 	iwm_del_task(sc, systq, &sc->htprot_task);
7891 	KASSERT(sc->task_refs.refs >= 1);
7892 	refcnt_finalize(&sc->task_refs, "iwmstop");
7893 
7894 	iwm_stop_device(sc);
7895 
7896 	/* Reset soft state. */
7897 
7898 	sc->sc_generation++;
7899 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
7900 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
7901 		sc->sc_cmd_resp_pkt[i] = NULL;
7902 		sc->sc_cmd_resp_len[i] = 0;
7903 	}
7904 	ifp->if_flags &= ~IFF_RUNNING;
7905 	ifq_clr_oactive(&ifp->if_snd);
7906 
7907 	in->in_phyctxt = NULL;
7908 	if (ic->ic_state == IEEE80211_S_RUN)
7909 		ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
7910 
7911 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
7912 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
7913 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
7914 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
7915 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
7916 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
7917 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
7918 
7919 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
7920 
7921 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
7922 	iwm_led_blink_stop(sc);
7923 	ifp->if_timer = sc->sc_tx_timer = 0;
7924 
7925 	splx(s);
7926 }
7927 
7928 void
7929 iwm_watchdog(struct ifnet *ifp)
7930 {
7931 	struct iwm_softc *sc = ifp->if_softc;
7932 
7933 	ifp->if_timer = 0;
7934 	if (sc->sc_tx_timer > 0) {
7935 		if (--sc->sc_tx_timer == 0) {
7936 			printf("%s: device timeout\n", DEVNAME(sc));
7937 #ifdef IWM_DEBUG
7938 			iwm_nic_error(sc);
7939 #endif
7940 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
7941 				task_add(systq, &sc->init_task);
7942 			ifp->if_oerrors++;
7943 			return;
7944 		}
7945 		ifp->if_timer = 1;
7946 	}
7947 
7948 	ieee80211_watchdog(ifp);
7949 }
7950 
7951 int
7952 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
7953 {
7954 	struct iwm_softc *sc = ifp->if_softc;
7955 	int s, err = 0, generation = sc->sc_generation;
7956 
7957 	/*
7958 	 * Prevent processes from entering this function while another
7959 	 * process is tsleep'ing in it.
7960 	 */
7961 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
7962 	if (err == 0 && generation != sc->sc_generation) {
7963 		rw_exit(&sc->ioctl_rwl);
7964 		return ENXIO;
7965 	}
7966 	if (err)
7967 		return err;
7968 	s = splnet();
7969 
7970 	switch (cmd) {
7971 	case SIOCSIFADDR:
7972 		ifp->if_flags |= IFF_UP;
7973 		/* FALLTHROUGH */
7974 	case SIOCSIFFLAGS:
7975 		if (ifp->if_flags & IFF_UP) {
7976 			if (!(ifp->if_flags & IFF_RUNNING)) {
7977 				err = iwm_init(ifp);
7978 			}
7979 		} else {
7980 			if (ifp->if_flags & IFF_RUNNING)
7981 				iwm_stop(ifp);
7982 		}
7983 		break;
7984 
7985 	default:
7986 		err = ieee80211_ioctl(ifp, cmd, data);
7987 	}
7988 
7989 	if (err == ENETRESET) {
7990 		err = 0;
7991 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7992 		    (IFF_UP | IFF_RUNNING)) {
7993 			iwm_stop(ifp);
7994 			err = iwm_init(ifp);
7995 		}
7996 	}
7997 
7998 	splx(s);
7999 	rw_exit(&sc->ioctl_rwl);
8000 
8001 	return err;
8002 }
8003 
8004 #ifdef IWM_DEBUG
8005 /*
8006  * Note: This structure is read from the device with IO accesses,
8007  * and the reading already does the endian conversion. As it is
8008  * read with uint32_t-sized accesses, any members with a different size
8009  * need to be ordered correctly though!
8010  */
8011 struct iwm_error_event_table {
8012 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8013 	uint32_t error_id;		/* type of error */
8014 	uint32_t trm_hw_status0;	/* TRM HW status */
8015 	uint32_t trm_hw_status1;	/* TRM HW status */
8016 	uint32_t blink2;		/* branch link */
8017 	uint32_t ilink1;		/* interrupt link */
8018 	uint32_t ilink2;		/* interrupt link */
8019 	uint32_t data1;		/* error-specific data */
8020 	uint32_t data2;		/* error-specific data */
8021 	uint32_t data3;		/* error-specific data */
8022 	uint32_t bcon_time;		/* beacon timer */
8023 	uint32_t tsf_low;		/* network timestamp function timer */
8024 	uint32_t tsf_hi;		/* network timestamp function timer */
8025 	uint32_t gp1;		/* GP1 timer register */
8026 	uint32_t gp2;		/* GP2 timer register */
8027 	uint32_t fw_rev_type;	/* firmware revision type */
8028 	uint32_t major;		/* uCode version major */
8029 	uint32_t minor;		/* uCode version minor */
8030 	uint32_t hw_ver;		/* HW Silicon version */
8031 	uint32_t brd_ver;		/* HW board version */
8032 	uint32_t log_pc;		/* log program counter */
8033 	uint32_t frame_ptr;		/* frame pointer */
8034 	uint32_t stack_ptr;		/* stack pointer */
8035 	uint32_t hcmd;		/* last host command header */
8036 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8037 				 * rxtx_flag */
8038 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8039 				 * host_flag */
8040 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8041 				 * enc_flag */
8042 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8043 				 * time_flag */
8044 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8045 				 * wico interrupt */
8046 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8047 	uint32_t wait_event;		/* wait event() caller address */
8048 	uint32_t l2p_control;	/* L2pControlField */
8049 	uint32_t l2p_duration;	/* L2pDurationField */
8050 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8051 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8052 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8053 				 * (LMPM_PMG_SEL) */
8054 	uint32_t u_timestamp;	/* indicate when the date and time of the
8055 				 * compilation */
8056 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8057 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8058 
8059 /*
8060  * UMAC error struct - relevant starting from family 8000 chip.
8061  * Note: This structure is read from the device with IO accesses,
8062  * and the reading already does the endian conversion. As it is
8063  * read with u32-sized accesses, any members with a different size
8064  * need to be ordered correctly though!
8065  */
8066 struct iwm_umac_error_event_table {
8067 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8068 	uint32_t error_id;	/* type of error */
8069 	uint32_t blink1;	/* branch link */
8070 	uint32_t blink2;	/* branch link */
8071 	uint32_t ilink1;	/* interrupt link */
8072 	uint32_t ilink2;	/* interrupt link */
8073 	uint32_t data1;		/* error-specific data */
8074 	uint32_t data2;		/* error-specific data */
8075 	uint32_t data3;		/* error-specific data */
8076 	uint32_t umac_major;
8077 	uint32_t umac_minor;
8078 	uint32_t frame_pointer;	/* core register 27*/
8079 	uint32_t stack_pointer;	/* core register 28 */
8080 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8081 	uint32_t nic_isr_pref;	/* ISR status register */
8082 } __packed;
8083 
8084 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8085 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8086 
8087 void
8088 iwm_nic_umac_error(struct iwm_softc *sc)
8089 {
8090 	struct iwm_umac_error_event_table table;
8091 	uint32_t base;
8092 
8093 	base = sc->sc_uc.uc_umac_error_event_table;
8094 
8095 	if (base < 0x800000) {
8096 		printf("%s: Invalid error log pointer 0x%08x\n",
8097 		    DEVNAME(sc), base);
8098 		return;
8099 	}
8100 
8101 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8102 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8103 		return;
8104 	}
8105 
8106 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8107 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8108 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8109 			sc->sc_flags, table.valid);
8110 	}
8111 
8112 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8113 		iwm_desc_lookup(table.error_id));
8114 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8115 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8116 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8117 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8118 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8119 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8120 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8121 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8122 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8123 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8124 	    table.frame_pointer);
8125 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8126 	    table.stack_pointer);
8127 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8128 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8129 	    table.nic_isr_pref);
8130 }
8131 
8132 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
8133 static struct {
8134 	const char *name;
8135 	uint8_t num;
8136 } advanced_lookup[] = {
8137 	{ "NMI_INTERRUPT_WDG", 0x34 },
8138 	{ "SYSASSERT", 0x35 },
8139 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8140 	{ "BAD_COMMAND", 0x38 },
8141 	{ "BAD_COMMAND", 0x39 },
8142 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8143 	{ "FATAL_ERROR", 0x3D },
8144 	{ "NMI_TRM_HW_ERR", 0x46 },
8145 	{ "NMI_INTERRUPT_TRM", 0x4C },
8146 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8147 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8148 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8149 	{ "NMI_INTERRUPT_HOST", 0x66 },
8150 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8151 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8152 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8153 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8154 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8155 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8156 	{ "ADVANCED_SYSASSERT", 0 },
8157 };
8158 
8159 const char *
8160 iwm_desc_lookup(uint32_t num)
8161 {
8162 	int i;
8163 
8164 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8165 		if (advanced_lookup[i].num ==
8166 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
8167 			return advanced_lookup[i].name;
8168 
8169 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8170 	return advanced_lookup[i].name;
8171 }
8172 
8173 /*
8174  * Support for dumping the error log seemed like a good idea ...
8175  * but it's mostly hex junk and the only sensible thing is the
8176  * hw/ucode revision (which we know anyway).  Since it's here,
8177  * I'll just leave it in, just in case e.g. the Intel guys want to
8178  * help us decipher some "ADVANCED_SYSASSERT" later.
8179  */
8180 void
8181 iwm_nic_error(struct iwm_softc *sc)
8182 {
8183 	struct iwm_error_event_table table;
8184 	uint32_t base;
8185 
8186 	printf("%s: dumping device error log\n", DEVNAME(sc));
8187 	base = sc->sc_uc.uc_error_event_table;
8188 	if (base < 0x800000) {
8189 		printf("%s: Invalid error log pointer 0x%08x\n",
8190 		    DEVNAME(sc), base);
8191 		return;
8192 	}
8193 
8194 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8195 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8196 		return;
8197 	}
8198 
8199 	if (!table.valid) {
8200 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8201 		return;
8202 	}
8203 
8204 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8205 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8206 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8207 		    sc->sc_flags, table.valid);
8208 	}
8209 
8210 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8211 	    iwm_desc_lookup(table.error_id));
8212 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8213 	    table.trm_hw_status0);
8214 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8215 	    table.trm_hw_status1);
8216 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8217 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8218 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8219 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8220 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8221 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8222 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8223 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8224 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8225 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8226 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8227 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8228 	    table.fw_rev_type);
8229 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8230 	    table.major);
8231 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8232 	    table.minor);
8233 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8234 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8235 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8236 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8237 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8238 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8239 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8240 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8241 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8242 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8243 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8244 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8245 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8246 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8247 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8248 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8249 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8250 
8251 	if (sc->sc_uc.uc_umac_error_event_table)
8252 		iwm_nic_umac_error(sc);
8253 }
8254 #endif
8255 
8256 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8257 do {									\
8258 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8259 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
8260 	_var_ = (void *)((_pkt_)+1);					\
8261 } while (/*CONSTCOND*/0)
8262 
8263 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
8264 do {									\
8265 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8266 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
8267 	_ptr_ = (void *)((_pkt_)+1);					\
8268 } while (/*CONSTCOND*/0)
8269 
8270 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
8271 
8272 int
8273 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
8274 {
8275 	int qid, idx, code;
8276 
8277 	qid = pkt->hdr.qid & ~0x80;
8278 	idx = pkt->hdr.idx;
8279 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8280 
8281 	return (!(qid == 0 && idx == 0 && code == 0) &&
8282 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
8283 }
8284 
8285 void
8286 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
8287 {
8288 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8289 	struct iwm_rx_packet *pkt, *nextpkt;
8290 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8291 	struct mbuf *m0, *m;
8292 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8293 	size_t remain = IWM_RBUF_SIZE;
8294 	int qid, idx, code, handled = 1;
8295 
8296 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
8297 	    BUS_DMASYNC_POSTREAD);
8298 
8299 	m0 = data->m;
8300 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
8301 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
8302 		qid = pkt->hdr.qid;
8303 		idx = pkt->hdr.idx;
8304 
8305 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8306 
8307 		if (!iwm_rx_pkt_valid(pkt))
8308 			break;
8309 
8310 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
8311 		if (len < sizeof(pkt->hdr) ||
8312 		    len > (IWM_RBUF_SIZE - offset - minsz))
8313 			break;
8314 
8315 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8316 			/* Take mbuf m0 off the RX ring. */
8317 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
8318 				ifp->if_ierrors++;
8319 				break;
8320 			}
8321 			KASSERT(data->m != m0);
8322 		}
8323 
8324 		switch (code) {
8325 		case IWM_REPLY_RX_PHY_CMD:
8326 			iwm_rx_rx_phy_cmd(sc, pkt, data);
8327 			break;
8328 
8329 		case IWM_REPLY_RX_MPDU_CMD: {
8330 			size_t maxlen = remain - minsz;
8331 			nextoff = offset +
8332 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
8333 			nextpkt = (struct iwm_rx_packet *)
8334 			    (m0->m_data + nextoff);
8335 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
8336 			    !iwm_rx_pkt_valid(nextpkt)) {
8337 				/* No need to copy last frame in buffer. */
8338 				if (offset > 0)
8339 					m_adj(m0, offset);
8340 				if (sc->sc_mqrx_supported)
8341 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
8342 					    maxlen, ml);
8343 				else
8344 					iwm_rx_mpdu(sc, m0, pkt->data,
8345 					    maxlen, ml);
8346 				m0 = NULL; /* stack owns m0 now; abort loop */
8347 			} else {
8348 				/*
8349 				 * Create an mbuf which points to the current
8350 				 * packet. Always copy from offset zero to
8351 				 * preserve m_pkthdr.
8352 				 */
8353 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
8354 				if (m == NULL) {
8355 					ifp->if_ierrors++;
8356 					m_freem(m0);
8357 					m0 = NULL;
8358 					break;
8359 				}
8360 				m_adj(m, offset);
8361 				if (sc->sc_mqrx_supported)
8362 					iwm_rx_mpdu_mq(sc, m, pkt->data,
8363 					    maxlen, ml);
8364 				else
8365 					iwm_rx_mpdu(sc, m, pkt->data,
8366 					    maxlen, ml);
8367 			}
8368 
8369 			if (offset + minsz < remain)
8370 				remain -= offset;
8371 			else
8372 				remain = minsz;
8373  			break;
8374 		}
8375 
8376 		case IWM_TX_CMD:
8377 			iwm_rx_tx_cmd(sc, pkt, data);
8378 			break;
8379 
8380 		case IWM_MISSED_BEACONS_NOTIFICATION:
8381 			iwm_rx_bmiss(sc, pkt, data);
8382 			break;
8383 
8384 		case IWM_MFUART_LOAD_NOTIFICATION:
8385 			break;
8386 
8387 		case IWM_ALIVE: {
8388 			struct iwm_alive_resp_v1 *resp1;
8389 			struct iwm_alive_resp_v2 *resp2;
8390 			struct iwm_alive_resp_v3 *resp3;
8391 
8392 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
8393 				SYNC_RESP_STRUCT(resp1, pkt);
8394 				sc->sc_uc.uc_error_event_table
8395 				    = le32toh(resp1->error_event_table_ptr);
8396 				sc->sc_uc.uc_log_event_table
8397 				    = le32toh(resp1->log_event_table_ptr);
8398 				sc->sched_base = le32toh(resp1->scd_base_ptr);
8399 				if (resp1->status == IWM_ALIVE_STATUS_OK)
8400 					sc->sc_uc.uc_ok = 1;
8401 				else
8402 					sc->sc_uc.uc_ok = 0;
8403 			}
8404 
8405 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
8406 				SYNC_RESP_STRUCT(resp2, pkt);
8407 				sc->sc_uc.uc_error_event_table
8408 				    = le32toh(resp2->error_event_table_ptr);
8409 				sc->sc_uc.uc_log_event_table
8410 				    = le32toh(resp2->log_event_table_ptr);
8411 				sc->sched_base = le32toh(resp2->scd_base_ptr);
8412 				sc->sc_uc.uc_umac_error_event_table
8413 				    = le32toh(resp2->error_info_addr);
8414 				if (resp2->status == IWM_ALIVE_STATUS_OK)
8415 					sc->sc_uc.uc_ok = 1;
8416 				else
8417 					sc->sc_uc.uc_ok = 0;
8418 			}
8419 
8420 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
8421 				SYNC_RESP_STRUCT(resp3, pkt);
8422 				sc->sc_uc.uc_error_event_table
8423 				    = le32toh(resp3->error_event_table_ptr);
8424 				sc->sc_uc.uc_log_event_table
8425 				    = le32toh(resp3->log_event_table_ptr);
8426 				sc->sched_base = le32toh(resp3->scd_base_ptr);
8427 				sc->sc_uc.uc_umac_error_event_table
8428 				    = le32toh(resp3->error_info_addr);
8429 				if (resp3->status == IWM_ALIVE_STATUS_OK)
8430 					sc->sc_uc.uc_ok = 1;
8431 				else
8432 					sc->sc_uc.uc_ok = 0;
8433 			}
8434 
8435 			sc->sc_uc.uc_intr = 1;
8436 			wakeup(&sc->sc_uc);
8437 			break;
8438 		}
8439 
8440 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
8441 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
8442 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
8443 			iwm_phy_db_set_section(sc, phy_db_notif);
8444 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
8445 			wakeup(&sc->sc_init_complete);
8446 			break;
8447 		}
8448 
8449 		case IWM_STATISTICS_NOTIFICATION: {
8450 			struct iwm_notif_statistics *stats;
8451 			SYNC_RESP_STRUCT(stats, pkt);
8452 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8453 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
8454 			break;
8455 		}
8456 
8457 		case IWM_MCC_CHUB_UPDATE_CMD: {
8458 			struct iwm_mcc_chub_notif *notif;
8459 			SYNC_RESP_STRUCT(notif, pkt);
8460 
8461 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
8462 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
8463 			sc->sc_fw_mcc[2] = '\0';
8464 		}
8465 
8466 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
8467 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
8468 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
8469 			break;
8470 
8471 		case IWM_PHY_CONFIGURATION_CMD:
8472 		case IWM_TX_ANT_CONFIGURATION_CMD:
8473 		case IWM_ADD_STA:
8474 		case IWM_MAC_CONTEXT_CMD:
8475 		case IWM_REPLY_SF_CFG_CMD:
8476 		case IWM_POWER_TABLE_CMD:
8477 		case IWM_LTR_CONFIG:
8478 		case IWM_PHY_CONTEXT_CMD:
8479 		case IWM_BINDING_CONTEXT_CMD:
8480 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
8481 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
8482 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
8483 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
8484 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
8485 		case IWM_REPLY_BEACON_FILTERING_CMD:
8486 		case IWM_MAC_PM_POWER_TABLE:
8487 		case IWM_TIME_QUOTA_CMD:
8488 		case IWM_REMOVE_STA:
8489 		case IWM_TXPATH_FLUSH:
8490 		case IWM_LQ_CMD:
8491 		case IWM_WIDE_ID(IWM_LONG_GROUP,
8492 				 IWM_FW_PAGING_BLOCK_CMD):
8493 		case IWM_BT_CONFIG:
8494 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
8495 		case IWM_NVM_ACCESS_CMD:
8496 		case IWM_MCC_UPDATE_CMD:
8497 		case IWM_TIME_EVENT_CMD: {
8498 			size_t pkt_len;
8499 
8500 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
8501 				break;
8502 
8503 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
8504 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8505 
8506 			pkt_len = sizeof(pkt->len_n_flags) +
8507 			    iwm_rx_packet_len(pkt);
8508 
8509 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
8510 			    pkt_len < sizeof(*pkt) ||
8511 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
8512 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
8513 				    sc->sc_cmd_resp_len[idx]);
8514 				sc->sc_cmd_resp_pkt[idx] = NULL;
8515 				break;
8516 			}
8517 
8518 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
8519 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8520 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
8521 			break;
8522 		}
8523 
8524 		/* ignore */
8525 		case IWM_PHY_DB_CMD:
8526 			break;
8527 
8528 		case IWM_INIT_COMPLETE_NOTIF:
8529 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
8530 			wakeup(&sc->sc_init_complete);
8531 			break;
8532 
8533 		case IWM_SCAN_OFFLOAD_COMPLETE: {
8534 			struct iwm_periodic_scan_complete *notif;
8535 			SYNC_RESP_STRUCT(notif, pkt);
8536 			break;
8537 		}
8538 
8539 		case IWM_SCAN_ITERATION_COMPLETE: {
8540 			struct iwm_lmac_scan_complete_notif *notif;
8541 			SYNC_RESP_STRUCT(notif, pkt);
8542 			iwm_endscan(sc);
8543 			break;
8544 		}
8545 
8546 		case IWM_SCAN_COMPLETE_UMAC: {
8547 			struct iwm_umac_scan_complete *notif;
8548 			SYNC_RESP_STRUCT(notif, pkt);
8549 			iwm_endscan(sc);
8550 			break;
8551 		}
8552 
8553 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
8554 			struct iwm_umac_scan_iter_complete_notif *notif;
8555 			SYNC_RESP_STRUCT(notif, pkt);
8556 			iwm_endscan(sc);
8557 			break;
8558 		}
8559 
8560 		case IWM_REPLY_ERROR: {
8561 			struct iwm_error_resp *resp;
8562 			SYNC_RESP_STRUCT(resp, pkt);
8563 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
8564 				DEVNAME(sc), le32toh(resp->error_type),
8565 				resp->cmd_id);
8566 			break;
8567 		}
8568 
8569 		case IWM_TIME_EVENT_NOTIFICATION: {
8570 			struct iwm_time_event_notif *notif;
8571 			uint32_t action;
8572 			SYNC_RESP_STRUCT(notif, pkt);
8573 
8574 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
8575 				break;
8576 			action = le32toh(notif->action);
8577 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
8578 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
8579 			break;
8580 		}
8581 
8582 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
8583 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
8584 		    break;
8585 
8586 		/*
8587 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8588 		 * messages. Just ignore them for now.
8589 		 */
8590 		case IWM_DEBUG_LOG_MSG:
8591 			break;
8592 
8593 		case IWM_MCAST_FILTER_CMD:
8594 			break;
8595 
8596 		case IWM_SCD_QUEUE_CFG: {
8597 			struct iwm_scd_txq_cfg_rsp *rsp;
8598 			SYNC_RESP_STRUCT(rsp, pkt);
8599 
8600 			break;
8601 		}
8602 
8603 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
8604 			break;
8605 
8606 		default:
8607 			handled = 0;
8608 			printf("%s: unhandled firmware response 0x%x/0x%x "
8609 			    "rx ring %d[%d]\n",
8610 			    DEVNAME(sc), code, pkt->len_n_flags,
8611 			    (qid & ~0x80), idx);
8612 			break;
8613 		}
8614 
8615 		/*
8616 		 * uCode sets bit 0x80 when it originates the notification,
8617 		 * i.e. when the notification is not a direct response to a
8618 		 * command sent by the driver.
8619 		 * For example, uCode issues IWM_REPLY_RX when it sends a
8620 		 * received frame to the driver.
8621 		 */
8622 		if (handled && !(qid & (1 << 7))) {
8623 			iwm_cmd_done(sc, qid, idx, code);
8624 		}
8625 
8626 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
8627 	}
8628 
8629 	if (m0 && m0 != data->m)
8630 		m_freem(m0);
8631 }
8632 
8633 void
8634 iwm_notif_intr(struct iwm_softc *sc)
8635 {
8636 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
8637 	uint32_t wreg;
8638 	uint16_t hw;
8639 	int count;
8640 
8641 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
8642 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
8643 
8644 	if (sc->sc_mqrx_supported) {
8645 		count = IWM_RX_MQ_RING_COUNT;
8646 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
8647 	} else {
8648 		count = IWM_RX_RING_COUNT;
8649 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
8650 	}
8651 
8652 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
8653 	hw &= (count - 1);
8654 	while (sc->rxq.cur != hw) {
8655 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8656 		iwm_rx_pkt(sc, data, &ml);
8657 		ADVANCE_RXQ(sc);
8658 	}
8659 	if_input(&sc->sc_ic.ic_if, &ml);
8660 
8661 	/*
8662 	 * Tell the firmware what we have processed.
8663 	 * Seems like the hardware gets upset unless we align the write by 8??
8664 	 */
8665 	hw = (hw == 0) ? count - 1 : hw - 1;
8666 	IWM_WRITE(sc, wreg, hw & ~7);
8667 }
8668 
8669 int
8670 iwm_intr(void *arg)
8671 {
8672 	struct iwm_softc *sc = arg;
8673 	int handled = 0;
8674 	int rv = 0;
8675 	uint32_t r1, r2;
8676 
8677 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
8678 
8679 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
8680 		uint32_t *ict = sc->ict_dma.vaddr;
8681 		int tmp;
8682 
8683 		tmp = htole32(ict[sc->ict_cur]);
8684 		if (!tmp)
8685 			goto out_ena;
8686 
8687 		/*
8688 		 * ok, there was something.  keep plowing until we have all.
8689 		 */
8690 		r1 = r2 = 0;
8691 		while (tmp) {
8692 			r1 |= tmp;
8693 			ict[sc->ict_cur] = 0;
8694 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
8695 			tmp = htole32(ict[sc->ict_cur]);
8696 		}
8697 
8698 		/* this is where the fun begins.  don't ask */
8699 		if (r1 == 0xffffffff)
8700 			r1 = 0;
8701 
8702 		/*
8703 		 * Workaround for hardware bug where bits are falsely cleared
8704 		 * when using interrupt coalescing.  Bit 15 should be set if
8705 		 * bits 18 and 19 are set.
8706 		 */
8707 		if (r1 & 0xc0000)
8708 			r1 |= 0x8000;
8709 
8710 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8711 	} else {
8712 		r1 = IWM_READ(sc, IWM_CSR_INT);
8713 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
8714 	}
8715 	if (r1 == 0 && r2 == 0) {
8716 		goto out_ena;
8717 	}
8718 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
8719 		goto out;
8720 
8721 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
8722 
8723 	/* ignored */
8724 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
8725 
8726 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
8727 		handled |= IWM_CSR_INT_BIT_RF_KILL;
8728 		iwm_check_rfkill(sc);
8729 		task_add(systq, &sc->init_task);
8730 		rv = 1;
8731 		goto out_ena;
8732 	}
8733 
8734 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
8735 #ifdef IWM_DEBUG
8736 		int i;
8737 
8738 		iwm_nic_error(sc);
8739 
8740 		/* Dump driver status (TX and RX rings) while we're here. */
8741 		DPRINTF(("driver status:\n"));
8742 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
8743 			struct iwm_tx_ring *ring = &sc->txq[i];
8744 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
8745 			    "queued=%-3d\n",
8746 			    i, ring->qid, ring->cur, ring->queued));
8747 		}
8748 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
8749 		DPRINTF(("  802.11 state %s\n",
8750 		    ieee80211_state_name[sc->sc_ic.ic_state]));
8751 #endif
8752 
8753 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8754 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8755 			task_add(systq, &sc->init_task);
8756 		rv = 1;
8757 		goto out;
8758 
8759 	}
8760 
8761 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
8762 		handled |= IWM_CSR_INT_BIT_HW_ERR;
8763 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8764 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
8765 			sc->sc_flags |= IWM_FLAG_HW_ERR;
8766 			task_add(systq, &sc->init_task);
8767 		}
8768 		rv = 1;
8769 		goto out;
8770 	}
8771 
8772 	/* firmware chunk loaded */
8773 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
8774 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
8775 		handled |= IWM_CSR_INT_BIT_FH_TX;
8776 
8777 		sc->sc_fw_chunk_done = 1;
8778 		wakeup(&sc->sc_fw);
8779 	}
8780 
8781 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
8782 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
8783 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
8784 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
8785 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
8786 		}
8787 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
8788 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
8789 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
8790 		}
8791 
8792 		/* Disable periodic interrupt; we use it as just a one-shot. */
8793 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
8794 
8795 		/*
8796 		 * Enable periodic interrupt in 8 msec only if we received
8797 		 * real RX interrupt (instead of just periodic int), to catch
8798 		 * any dangling Rx interrupt.  If it was just the periodic
8799 		 * interrupt, there was no dangling Rx activity, and no need
8800 		 * to extend the periodic interrupt; one-shot is enough.
8801 		 */
8802 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
8803 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
8804 			    IWM_CSR_INT_PERIODIC_ENA);
8805 
8806 		iwm_notif_intr(sc);
8807 	}
8808 
8809 	rv = 1;
8810 
8811  out_ena:
8812 	iwm_restore_interrupts(sc);
8813  out:
8814 	return rv;
8815 }
8816 
8817 int
8818 iwm_intr_msix(void *arg)
8819 {
8820 	struct iwm_softc *sc = arg;
8821 	uint32_t inta_fh, inta_hw;
8822 	int vector = 0;
8823 
8824 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
8825 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
8826 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
8827 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
8828 	inta_fh &= sc->sc_fh_mask;
8829 	inta_hw &= sc->sc_hw_mask;
8830 
8831 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
8832 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
8833 		iwm_notif_intr(sc);
8834 	}
8835 
8836 	/* firmware chunk loaded */
8837 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
8838 		sc->sc_fw_chunk_done = 1;
8839 		wakeup(&sc->sc_fw);
8840 	}
8841 
8842 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
8843 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
8844 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
8845 #ifdef IWM_DEBUG
8846 		int i;
8847 
8848 		iwm_nic_error(sc);
8849 
8850 		/* Dump driver status (TX and RX rings) while we're here. */
8851 		DPRINTF(("driver status:\n"));
8852 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
8853 			struct iwm_tx_ring *ring = &sc->txq[i];
8854 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
8855 			    "queued=%-3d\n",
8856 			    i, ring->qid, ring->cur, ring->queued));
8857 		}
8858 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
8859 		DPRINTF(("  802.11 state %s\n",
8860 		    ieee80211_state_name[sc->sc_ic.ic_state]));
8861 #endif
8862 
8863 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8864 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8865 			task_add(systq, &sc->init_task);
8866 		return 1;
8867 	}
8868 
8869 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
8870 		iwm_check_rfkill(sc);
8871 		task_add(systq, &sc->init_task);
8872 	}
8873 
8874 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
8875 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8876 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
8877 			sc->sc_flags |= IWM_FLAG_HW_ERR;
8878 			task_add(systq, &sc->init_task);
8879 		}
8880 		return 1;
8881 	}
8882 
8883 	/*
8884 	 * Before sending the interrupt the HW disables it to prevent
8885 	 * a nested interrupt. This is done by writing 1 to the corresponding
8886 	 * bit in the mask register. After handling the interrupt, it should be
8887 	 * re-enabled by clearing this bit. This register is defined as
8888 	 * write 1 clear (W1C) register, meaning that it's being clear
8889 	 * by writing 1 to the bit.
8890 	 */
8891 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
8892 	return 1;
8893 }
8894 
8895 typedef void *iwm_match_t;
8896 
8897 static const struct pci_matchid iwm_devices[] = {
8898 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
8899 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
8900 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
8901 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
8902 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
8903 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
8904 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
8905 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
8906 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
8907 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
8908 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
8909 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
8910 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
8911 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
8912 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
8913 };
8914 
8915 int
8916 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
8917 {
8918 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
8919 	    nitems(iwm_devices));
8920 }
8921 
8922 int
8923 iwm_preinit(struct iwm_softc *sc)
8924 {
8925 	struct ieee80211com *ic = &sc->sc_ic;
8926 	struct ifnet *ifp = IC2IFP(ic);
8927 	int err;
8928 	static int attached;
8929 
8930 	err = iwm_prepare_card_hw(sc);
8931 	if (err) {
8932 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8933 		return err;
8934 	}
8935 
8936 	if (attached) {
8937 		/* Update MAC in case the upper layers changed it. */
8938 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
8939 		    ((struct arpcom *)ifp)->ac_enaddr);
8940 		return 0;
8941 	}
8942 
8943 	err = iwm_start_hw(sc);
8944 	if (err) {
8945 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8946 		return err;
8947 	}
8948 
8949 	err = iwm_run_init_mvm_ucode(sc, 1);
8950 	iwm_stop_device(sc);
8951 	if (err)
8952 		return err;
8953 
8954 	/* Print version info and MAC address on first successful fw load. */
8955 	attached = 1;
8956 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
8957 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
8958 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
8959 
8960 	if (sc->sc_nvm.sku_cap_11n_enable)
8961 		iwm_setup_ht_rates(sc);
8962 
8963 	/* not all hardware can do 5GHz band */
8964 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
8965 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
8966 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
8967 
8968 	/* Configure channel information obtained from firmware. */
8969 	ieee80211_channel_init(ifp);
8970 
8971 	/* Configure MAC address. */
8972 	err = if_setlladdr(ifp, ic->ic_myaddr);
8973 	if (err)
8974 		printf("%s: could not set MAC address (error %d)\n",
8975 		    DEVNAME(sc), err);
8976 
8977 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
8978 
8979 	return 0;
8980 }
8981 
8982 void
8983 iwm_attach_hook(struct device *self)
8984 {
8985 	struct iwm_softc *sc = (void *)self;
8986 
8987 	KASSERT(!cold);
8988 
8989 	iwm_preinit(sc);
8990 }
8991 
8992 void
8993 iwm_attach(struct device *parent, struct device *self, void *aux)
8994 {
8995 	struct iwm_softc *sc = (void *)self;
8996 	struct pci_attach_args *pa = aux;
8997 	pci_intr_handle_t ih;
8998 	pcireg_t reg, memtype;
8999 	struct ieee80211com *ic = &sc->sc_ic;
9000 	struct ifnet *ifp = &ic->ic_if;
9001 	const char *intrstr;
9002 	int err;
9003 	int txq_i, i;
9004 
9005 	sc->sc_pct = pa->pa_pc;
9006 	sc->sc_pcitag = pa->pa_tag;
9007 	sc->sc_dmat = pa->pa_dmat;
9008 
9009 	rw_init(&sc->ioctl_rwl, "iwmioctl");
9010 
9011 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9012 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9013 	if (err == 0) {
9014 		printf("%s: PCIe capability structure not found!\n",
9015 		    DEVNAME(sc));
9016 		return;
9017 	}
9018 
9019 	/* Clear device-specific "PCI retry timeout" register (41h). */
9020 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9021 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9022 
9023 	/* Enable bus-mastering and hardware bug workaround. */
9024 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
9025 	reg |= PCI_COMMAND_MASTER_ENABLE;
9026 	/* if !MSI */
9027 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
9028 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9029 	}
9030 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
9031 
9032 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9033 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9034 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9035 	if (err) {
9036 		printf("%s: can't map mem space\n", DEVNAME(sc));
9037 		return;
9038 	}
9039 
9040 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9041 		sc->sc_msix = 1;
9042 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
9043 		printf("%s: can't map interrupt\n", DEVNAME(sc));
9044 		return;
9045 	}
9046 
9047 	intrstr = pci_intr_string(sc->sc_pct, ih);
9048 	if (sc->sc_msix)
9049 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9050 		    iwm_intr_msix, sc, DEVNAME(sc));
9051 	else
9052 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9053 		    iwm_intr, sc, DEVNAME(sc));
9054 
9055 	if (sc->sc_ih == NULL) {
9056 		printf("\n");
9057 		printf("%s: can't establish interrupt", DEVNAME(sc));
9058 		if (intrstr != NULL)
9059 			printf(" at %s", intrstr);
9060 		printf("\n");
9061 		return;
9062 	}
9063 	printf(", %s\n", intrstr);
9064 
9065 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
9066 	switch (PCI_PRODUCT(pa->pa_id)) {
9067 	case PCI_PRODUCT_INTEL_WL_3160_1:
9068 	case PCI_PRODUCT_INTEL_WL_3160_2:
9069 		sc->sc_fwname = "iwm-3160-17";
9070 		sc->host_interrupt_operation_mode = 1;
9071 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9072 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9073 		sc->sc_nvm_max_section_size = 16384;
9074 		sc->nvm_type = IWM_NVM;
9075 		break;
9076 	case PCI_PRODUCT_INTEL_WL_3165_1:
9077 	case PCI_PRODUCT_INTEL_WL_3165_2:
9078 		sc->sc_fwname = "iwm-7265-17";
9079 		sc->host_interrupt_operation_mode = 0;
9080 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9081 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9082 		sc->sc_nvm_max_section_size = 16384;
9083 		sc->nvm_type = IWM_NVM;
9084 		break;
9085 	case PCI_PRODUCT_INTEL_WL_3168_1:
9086 		sc->sc_fwname = "iwm-3168-29";
9087 		sc->host_interrupt_operation_mode = 0;
9088 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9089 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9090 		sc->sc_nvm_max_section_size = 16384;
9091 		sc->nvm_type = IWM_NVM_SDP;
9092 		break;
9093 	case PCI_PRODUCT_INTEL_WL_7260_1:
9094 	case PCI_PRODUCT_INTEL_WL_7260_2:
9095 		sc->sc_fwname = "iwm-7260-17";
9096 		sc->host_interrupt_operation_mode = 1;
9097 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9098 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9099 		sc->sc_nvm_max_section_size = 16384;
9100 		sc->nvm_type = IWM_NVM;
9101 		break;
9102 	case PCI_PRODUCT_INTEL_WL_7265_1:
9103 	case PCI_PRODUCT_INTEL_WL_7265_2:
9104 		sc->sc_fwname = "iwm-7265-17";
9105 		sc->host_interrupt_operation_mode = 0;
9106 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9107 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9108 		sc->sc_nvm_max_section_size = 16384;
9109 		sc->nvm_type = IWM_NVM;
9110 		break;
9111 	case PCI_PRODUCT_INTEL_WL_8260_1:
9112 	case PCI_PRODUCT_INTEL_WL_8260_2:
9113 		sc->sc_fwname = "iwm-8000C-34";
9114 		sc->host_interrupt_operation_mode = 0;
9115 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
9116 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9117 		sc->sc_nvm_max_section_size = 32768;
9118 		sc->nvm_type = IWM_NVM_EXT;
9119 		break;
9120 	case PCI_PRODUCT_INTEL_WL_8265_1:
9121 		sc->sc_fwname = "iwm-8265-34";
9122 		sc->host_interrupt_operation_mode = 0;
9123 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
9124 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9125 		sc->sc_nvm_max_section_size = 32768;
9126 		sc->nvm_type = IWM_NVM_EXT;
9127 		break;
9128 	case PCI_PRODUCT_INTEL_WL_9260_1:
9129 		sc->sc_fwname = "iwm-9260-34";
9130 		sc->host_interrupt_operation_mode = 0;
9131 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
9132 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9133 		sc->sc_nvm_max_section_size = 32768;
9134 		sc->sc_mqrx_supported = 1;
9135 		break;
9136 	case PCI_PRODUCT_INTEL_WL_9560_1:
9137 	case PCI_PRODUCT_INTEL_WL_9560_2:
9138 		sc->sc_fwname = "iwm-9000-34";
9139 		sc->host_interrupt_operation_mode = 0;
9140 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
9141 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9142 		sc->sc_nvm_max_section_size = 32768;
9143 		sc->sc_mqrx_supported = 1;
9144 		sc->sc_integrated = 1;
9145 		break;
9146 	default:
9147 		printf("%s: unknown adapter type\n", DEVNAME(sc));
9148 		return;
9149 	}
9150 
9151 	/*
9152 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9153 	 * changed, and now the revision step also includes bit 0-1 (no more
9154 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9155 	 * in the old format.
9156 	 */
9157 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
9158 		uint32_t hw_step;
9159 
9160 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9161 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
9162 
9163 		if (iwm_prepare_card_hw(sc) != 0) {
9164 			printf("%s: could not initialize hardware\n",
9165 			    DEVNAME(sc));
9166 			return;
9167 		}
9168 
9169 		/*
9170 		 * In order to recognize C step the driver should read the
9171 		 * chip version id located at the AUX bus MISC address.
9172 		 */
9173 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
9174 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9175 		DELAY(2);
9176 
9177 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
9178 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9179 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9180 				   25000);
9181 		if (!err) {
9182 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
9183 			return;
9184 		}
9185 
9186 		if (iwm_nic_lock(sc)) {
9187 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
9188 			hw_step |= IWM_ENABLE_WFPM;
9189 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
9190 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
9191 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
9192 			if (hw_step == 0x3)
9193 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
9194 						(IWM_SILICON_C_STEP << 2);
9195 			iwm_nic_unlock(sc);
9196 		} else {
9197 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
9198 			return;
9199 		}
9200 	}
9201 
9202 	/*
9203 	 * Allocate DMA memory for firmware transfers.
9204 	 * Must be aligned on a 16-byte boundary.
9205 	 */
9206 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
9207 	    sc->sc_fwdmasegsz, 16);
9208 	if (err) {
9209 		printf("%s: could not allocate memory for firmware\n",
9210 		    DEVNAME(sc));
9211 		return;
9212 	}
9213 
9214 	/* Allocate "Keep Warm" page, used internally by the card. */
9215 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
9216 	if (err) {
9217 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
9218 		goto fail1;
9219 	}
9220 
9221 	/* Allocate interrupt cause table (ICT).*/
9222 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9223 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
9224 	if (err) {
9225 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
9226 		goto fail2;
9227 	}
9228 
9229 	/* TX scheduler rings must be aligned on a 1KB boundary. */
9230 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
9231 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
9232 	if (err) {
9233 		printf("%s: could not allocate TX scheduler rings\n",
9234 		    DEVNAME(sc));
9235 		goto fail3;
9236 	}
9237 
9238 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
9239 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9240 		if (err) {
9241 			printf("%s: could not allocate TX ring %d\n",
9242 			    DEVNAME(sc), txq_i);
9243 			goto fail4;
9244 		}
9245 	}
9246 
9247 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
9248 	if (err) {
9249 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
9250 		goto fail4;
9251 	}
9252 
9253 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
9254 	if (sc->sc_nswq == NULL)
9255 		goto fail4;
9256 
9257 	/* Clear pending interrupts. */
9258 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
9259 
9260 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
9261 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
9262 	ic->ic_state = IEEE80211_S_INIT;
9263 
9264 	/* Set device capabilities. */
9265 	ic->ic_caps =
9266 	    IEEE80211_C_WEP |		/* WEP */
9267 	    IEEE80211_C_RSN |		/* WPA/RSN */
9268 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
9269 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
9270 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
9271 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
9272 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
9273 
9274 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
9275 	ic->ic_htcaps |=
9276 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
9277 	ic->ic_htxcaps = 0;
9278 	ic->ic_txbfcaps = 0;
9279 	ic->ic_aselcaps = 0;
9280 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
9281 
9282 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9283 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9284 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9285 
9286 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
9287 		sc->sc_phyctxt[i].id = i;
9288 	}
9289 
9290 	sc->sc_amrr.amrr_min_success_threshold =  1;
9291 	sc->sc_amrr.amrr_max_success_threshold = 15;
9292 
9293 	/* IBSS channel undefined for now. */
9294 	ic->ic_ibss_chan = &ic->ic_channels[1];
9295 
9296 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
9297 
9298 	ifp->if_softc = sc;
9299 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9300 	ifp->if_ioctl = iwm_ioctl;
9301 	ifp->if_start = iwm_start;
9302 	ifp->if_watchdog = iwm_watchdog;
9303 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
9304 
9305 	if_attach(ifp);
9306 	ieee80211_ifattach(ifp);
9307 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
9308 
9309 #if NBPFILTER > 0
9310 	iwm_radiotap_attach(sc);
9311 #endif
9312 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
9313 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
9314 	task_set(&sc->init_task, iwm_init_task, sc);
9315 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
9316 	task_set(&sc->setrates_task, iwm_setrates_task, sc);
9317 	task_set(&sc->ba_task, iwm_ba_task, sc);
9318 	task_set(&sc->htprot_task, iwm_htprot_task, sc);
9319 
9320 	ic->ic_node_alloc = iwm_node_alloc;
9321 	ic->ic_bgscan_start = iwm_bgscan;
9322 
9323 	/* Override 802.11 state transition machine. */
9324 	sc->sc_newstate = ic->ic_newstate;
9325 	ic->ic_newstate = iwm_newstate;
9326 	ic->ic_update_htprot = iwm_update_htprot;
9327 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
9328 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
9329 #ifdef notyet
9330 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
9331 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
9332 #endif
9333 	/*
9334 	 * We cannot read the MAC address without loading the
9335 	 * firmware from disk. Postpone until mountroot is done.
9336 	 */
9337 	config_mountroot(self, iwm_attach_hook);
9338 
9339 	return;
9340 
9341 fail4:	while (--txq_i >= 0)
9342 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
9343 	iwm_free_rx_ring(sc, &sc->rxq);
9344 	iwm_dma_contig_free(&sc->sched_dma);
9345 fail3:	if (sc->ict_dma.vaddr != NULL)
9346 		iwm_dma_contig_free(&sc->ict_dma);
9347 
9348 fail2:	iwm_dma_contig_free(&sc->kw_dma);
9349 fail1:	iwm_dma_contig_free(&sc->fw_dma);
9350 	return;
9351 }
9352 
9353 #if NBPFILTER > 0
9354 void
9355 iwm_radiotap_attach(struct iwm_softc *sc)
9356 {
9357 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
9358 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
9359 
9360 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
9361 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
9362 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
9363 
9364 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
9365 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
9366 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
9367 }
9368 #endif
9369 
9370 void
9371 iwm_init_task(void *arg1)
9372 {
9373 	struct iwm_softc *sc = arg1;
9374 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9375 	int s = splnet();
9376 	int generation = sc->sc_generation;
9377 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
9378 
9379 	rw_enter_write(&sc->ioctl_rwl);
9380 	if (generation != sc->sc_generation) {
9381 		rw_exit(&sc->ioctl_rwl);
9382 		splx(s);
9383 		return;
9384 	}
9385 
9386 	if (ifp->if_flags & IFF_RUNNING)
9387 		iwm_stop(ifp);
9388 	else
9389 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
9390 
9391 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
9392 		iwm_init(ifp);
9393 
9394 	rw_exit(&sc->ioctl_rwl);
9395 	splx(s);
9396 }
9397 
9398 int
9399 iwm_resume(struct iwm_softc *sc)
9400 {
9401 	pcireg_t reg;
9402 
9403 	/* Clear device-specific "PCI retry timeout" register (41h). */
9404 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9405 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9406 
9407 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
9408 	iwm_conf_msix_hw(sc, 0);
9409 
9410 	iwm_enable_rfkill_int(sc);
9411 	iwm_check_rfkill(sc);
9412 
9413 	return iwm_prepare_card_hw(sc);
9414 }
9415 
9416 int
9417 iwm_activate(struct device *self, int act)
9418 {
9419 	struct iwm_softc *sc = (struct iwm_softc *)self;
9420 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9421 	int err = 0;
9422 
9423 	switch (act) {
9424 	case DVACT_QUIESCE:
9425 		if (ifp->if_flags & IFF_RUNNING) {
9426 			rw_enter_write(&sc->ioctl_rwl);
9427 			iwm_stop(ifp);
9428 			rw_exit(&sc->ioctl_rwl);
9429 		}
9430 		break;
9431 	case DVACT_RESUME:
9432 		err = iwm_resume(sc);
9433 		if (err)
9434 			printf("%s: could not initialize hardware\n",
9435 			    DEVNAME(sc));
9436 		break;
9437 	case DVACT_WAKEUP:
9438 		/* Hardware should be up at this point. */
9439 		if (iwm_set_hw_ready(sc))
9440 			task_add(systq, &sc->init_task);
9441 		break;
9442 	}
9443 
9444 	return 0;
9445 }
9446 
9447 struct cfdriver iwm_cd = {
9448 	NULL, "iwm", DV_IFNET
9449 };
9450 
9451 struct cfattach iwm_ca = {
9452 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
9453 	NULL, iwm_activate
9454 };
9455