xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_iwx.c,v 1.45 2020/10/11 07:05:28 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 
133 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
134 
135 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
136 
137 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
138 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
139 
140 #ifdef IWX_DEBUG
141 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
142 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
143 int iwx_debug = 1;
144 #else
145 #define DPRINTF(x)	do { ; } while (0)
146 #define DPRINTFN(n, x)	do { ; } while (0)
147 #endif
148 
149 #include <dev/pci/if_iwxreg.h>
150 #include <dev/pci/if_iwxvar.h>
151 
152 const uint8_t iwx_nvm_channels_8000[] = {
153 	/* 2.4 GHz */
154 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
155 	/* 5 GHz */
156 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
157 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
158 	149, 153, 157, 161, 165, 169, 173, 177, 181
159 };
160 
161 static const uint8_t iwx_nvm_channels_uhb[] = {
162 	/* 2.4 GHz */
163 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
164 	/* 5 GHz */
165 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
166 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
167 	149, 153, 157, 161, 165, 169, 173, 177, 181,
168 	/* 6-7 GHz */
169 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
170 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
171 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
172 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
173 };
174 
175 #define IWX_NUM_2GHZ_CHANNELS	14
176 
177 const struct iwx_rate {
178 	uint16_t rate;
179 	uint8_t plcp;
180 	uint8_t ht_plcp;
181 } iwx_rates[] = {
182 		/* Legacy */		/* HT */
183 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
184 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
185 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
186 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
187 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
188 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
189 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
190 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
191 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
192 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
193 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
194 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
195 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
196 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
197 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
198 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
199 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
200 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
201 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
202 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
203 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
204 };
205 #define IWX_RIDX_CCK	0
206 #define IWX_RIDX_OFDM	4
207 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
208 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
209 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
210 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
211 
212 /* Convert an MCS index into an iwx_rates[] index. */
213 const int iwx_mcs2ridx[] = {
214 	IWX_RATE_MCS_0_INDEX,
215 	IWX_RATE_MCS_1_INDEX,
216 	IWX_RATE_MCS_2_INDEX,
217 	IWX_RATE_MCS_3_INDEX,
218 	IWX_RATE_MCS_4_INDEX,
219 	IWX_RATE_MCS_5_INDEX,
220 	IWX_RATE_MCS_6_INDEX,
221 	IWX_RATE_MCS_7_INDEX,
222 	IWX_RATE_MCS_8_INDEX,
223 	IWX_RATE_MCS_9_INDEX,
224 	IWX_RATE_MCS_10_INDEX,
225 	IWX_RATE_MCS_11_INDEX,
226 	IWX_RATE_MCS_12_INDEX,
227 	IWX_RATE_MCS_13_INDEX,
228 	IWX_RATE_MCS_14_INDEX,
229 	IWX_RATE_MCS_15_INDEX,
230 };
231 
232 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
233 int	iwx_is_mimo_ht_plcp(uint8_t);
234 int	iwx_is_mimo_mcs(int);
235 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
236 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
237 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
238 int	iwx_apply_debug_destination(struct iwx_softc *);
239 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
240 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
241 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
242 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
243 	    struct iwx_context_info_dram *);
244 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
245 	    uint8_t *, size_t);
246 int	iwx_set_default_calib(struct iwx_softc *, const void *);
247 void	iwx_fw_info_free(struct iwx_fw_info *);
248 int	iwx_read_firmware(struct iwx_softc *);
249 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
250 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
251 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
252 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
253 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
254 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
255 int	iwx_nic_lock(struct iwx_softc *);
256 void	iwx_nic_assert_locked(struct iwx_softc *);
257 void	iwx_nic_unlock(struct iwx_softc *);
258 void	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
259 	    uint32_t);
260 void	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
261 void	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
262 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
263 	    bus_size_t);
264 void	iwx_dma_contig_free(struct iwx_dma_info *);
265 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
266 void	iwx_disable_rx_dma(struct iwx_softc *);
267 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
268 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
269 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
270 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
271 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
272 void	iwx_enable_rfkill_int(struct iwx_softc *);
273 int	iwx_check_rfkill(struct iwx_softc *);
274 void	iwx_enable_interrupts(struct iwx_softc *);
275 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
276 void	iwx_restore_interrupts(struct iwx_softc *);
277 void	iwx_disable_interrupts(struct iwx_softc *);
278 void	iwx_ict_reset(struct iwx_softc *);
279 int	iwx_set_hw_ready(struct iwx_softc *);
280 int	iwx_prepare_card_hw(struct iwx_softc *);
281 void	iwx_force_power_gating(struct iwx_softc *);
282 void	iwx_apm_config(struct iwx_softc *);
283 int	iwx_apm_init(struct iwx_softc *);
284 void	iwx_apm_stop(struct iwx_softc *);
285 int	iwx_allow_mcast(struct iwx_softc *);
286 void	iwx_init_msix_hw(struct iwx_softc *);
287 void	iwx_conf_msix_hw(struct iwx_softc *, int);
288 int	iwx_start_hw(struct iwx_softc *);
289 void	iwx_stop_device(struct iwx_softc *);
290 void	iwx_nic_config(struct iwx_softc *);
291 int	iwx_nic_rx_init(struct iwx_softc *);
292 int	iwx_nic_init(struct iwx_softc *);
293 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
294 void	iwx_post_alive(struct iwx_softc *);
295 void	iwx_protect_session(struct iwx_softc *, struct iwx_node *, uint32_t,
296 	    uint32_t);
297 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
298 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
299 void	iwx_setup_ht_rates(struct iwx_softc *);
300 int	iwx_mimo_enabled(struct iwx_softc *);
301 void	iwx_htprot_task(void *);
302 void	iwx_update_htprot(struct ieee80211com *, struct ieee80211_node *);
303 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
304 	    uint8_t);
305 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
306 	    uint8_t);
307 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
308 	    uint16_t, uint16_t, int);
309 #ifdef notyet
310 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
311 	    uint8_t);
312 void	iwx_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
313 	    uint8_t);
314 #endif
315 void	iwx_ba_task(void *);
316 
317 int	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
318 int	iwx_is_valid_mac_addr(const uint8_t *);
319 int	iwx_nvm_get(struct iwx_softc *);
320 int	iwx_load_firmware(struct iwx_softc *);
321 int	iwx_start_fw(struct iwx_softc *);
322 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
323 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
324 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
325 int	iwx_send_dqa_cmd(struct iwx_softc *);
326 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
327 int	iwx_config_ltr(struct iwx_softc *);
328 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
329 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
330 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
331 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
332 	    struct iwx_rx_data *);
333 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
334 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
335 	    struct ieee80211_node *);
336 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
337 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
338 void	iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
339 	    struct iwx_node *);
340 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
341 	    struct iwx_rx_data *);
342 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
343 	    struct iwx_rx_data *);
344 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
345 int	iwx_phy_ctxt_cmd_uhb(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
346 	    uint8_t, uint32_t, uint32_t);
347 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
348 	    uint8_t, uint32_t, uint32_t);
349 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
350 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
351 	    const void *);
352 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
353 	    uint32_t *);
354 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
355 	    const void *, uint32_t *);
356 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
357 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
358 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
359 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
360 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
361 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *, int);
362 int	iwx_flush_tx_path(struct iwx_softc *);
363 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
364 	    struct iwx_beacon_filter_cmd *);
365 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
366 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
367 	    struct iwx_mac_power_cmd *);
368 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
369 int	iwx_power_update_device(struct iwx_softc *);
370 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
371 int	iwx_disable_beacon_filter(struct iwx_softc *);
372 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
373 int	iwx_add_aux_sta(struct iwx_softc *);
374 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
375 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
376 int	iwx_config_umac_scan(struct iwx_softc *);
377 int	iwx_umac_scan(struct iwx_softc *, int);
378 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
379 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
380 int	iwx_rval2ridx(int);
381 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
382 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
383 	    struct iwx_mac_ctx_cmd *, uint32_t);
384 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
385 	    struct iwx_mac_data_sta *, int);
386 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
387 int	iwx_clear_statistics(struct iwx_softc *);
388 int	iwx_update_quotas(struct iwx_softc *, struct iwx_node *, int);
389 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
390 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
391 int	iwx_scan(struct iwx_softc *);
392 int	iwx_bgscan(struct ieee80211com *);
393 int	iwx_umac_scan_abort(struct iwx_softc *);
394 int	iwx_scan_abort(struct iwx_softc *);
395 int	iwx_rs_rval2idx(uint8_t);
396 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
397 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
398 int	iwx_enable_data_tx_queues(struct iwx_softc *);
399 int	iwx_auth(struct iwx_softc *);
400 int	iwx_deauth(struct iwx_softc *);
401 int	iwx_assoc(struct iwx_softc *);
402 int	iwx_disassoc(struct iwx_softc *);
403 int	iwx_run(struct iwx_softc *);
404 int	iwx_run_stop(struct iwx_softc *);
405 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
406 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
407 	    struct ieee80211_key *);
408 void	iwx_delete_key(struct ieee80211com *,
409 	    struct ieee80211_node *, struct ieee80211_key *);
410 int	iwx_media_change(struct ifnet *);
411 void	iwx_newstate_task(void *);
412 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
413 void	iwx_endscan(struct iwx_softc *);
414 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
415 	    struct ieee80211_node *);
416 int	iwx_sf_config(struct iwx_softc *, int);
417 int	iwx_send_bt_init_conf(struct iwx_softc *);
418 int	iwx_send_soc_conf(struct iwx_softc *);
419 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
420 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
421 int	iwx_init_hw(struct iwx_softc *);
422 int	iwx_init(struct ifnet *);
423 void	iwx_start(struct ifnet *);
424 void	iwx_stop(struct ifnet *);
425 void	iwx_watchdog(struct ifnet *);
426 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
427 const char *iwx_desc_lookup(uint32_t);
428 void	iwx_nic_error(struct iwx_softc *);
429 void	iwx_nic_umac_error(struct iwx_softc *);
430 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
431 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
432 	    struct mbuf_list *);
433 void	iwx_notif_intr(struct iwx_softc *);
434 int	iwx_intr(void *);
435 int	iwx_intr_msix(void *);
436 int	iwx_match(struct device *, void *, void *);
437 int	iwx_preinit(struct iwx_softc *);
438 void	iwx_attach_hook(struct device *);
439 void	iwx_attach(struct device *, struct device *, void *);
440 void	iwx_init_task(void *);
441 int	iwx_activate(struct device *, int);
442 int	iwx_resume(struct iwx_softc *);
443 
444 #if NBPFILTER > 0
445 void	iwx_radiotap_attach(struct iwx_softc *);
446 #endif
447 
448 uint8_t
449 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
450 {
451 	const struct iwx_fw_cmd_version *entry;
452 	int i;
453 
454 	for (i = 0; i < sc->n_cmd_versions; i++) {
455 		entry = &sc->cmd_versions[i];
456 		if (entry->group == grp && entry->cmd == cmd)
457 			return entry->cmd_ver;
458 	}
459 
460 	return IWX_FW_CMD_VER_UNKNOWN;
461 }
462 
463 int
464 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
465 {
466 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
467 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
468 }
469 
470 int
471 iwx_is_mimo_mcs(int mcs)
472 {
473 	int ridx = iwx_mcs2ridx[mcs];
474 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
475 
476 }
477 
478 int
479 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
480 {
481 	struct iwx_fw_cscheme_list *l = (void *)data;
482 
483 	if (dlen < sizeof(*l) ||
484 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
485 		return EINVAL;
486 
487 	/* we don't actually store anything for now, always use s/w crypto */
488 
489 	return 0;
490 }
491 
492 int
493 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
494     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
495 {
496 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
497 	if (err) {
498 		printf("%s: could not allocate context info DMA memory\n",
499 		    DEVNAME(sc));
500 		return err;
501 	}
502 
503 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
504 
505 	return 0;
506 }
507 
508 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
509 {
510 	struct iwx_self_init_dram *dram = &sc->init_dram;
511 	int i;
512 
513 	if (!dram->paging)
514 		return;
515 
516 	/* free paging*/
517 	for (i = 0; i < dram->paging_cnt; i++)
518 		iwx_dma_contig_free(dram->paging);
519 
520 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
521 	dram->paging_cnt = 0;
522 	dram->paging = NULL;
523 }
524 
525 int
526 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
527 {
528 	int i = 0;
529 
530 	while (start < fws->fw_count &&
531 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
532 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
533 		start++;
534 		i++;
535 	}
536 
537 	return i;
538 }
539 
540 int
541 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
542     struct iwx_context_info_dram *ctxt_dram)
543 {
544 	struct iwx_self_init_dram *dram = &sc->init_dram;
545 	int i, ret, fw_cnt = 0;
546 
547 	KASSERT(dram->paging == NULL);
548 
549 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
550 	/* add 1 due to separator */
551 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
552 	/* add 2 due to separators */
553 	dram->paging_cnt = iwx_get_num_sections(fws,
554 	    dram->lmac_cnt + dram->umac_cnt + 2);
555 
556 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
557 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
558 	if (!dram->fw) {
559 		printf("%s: could not allocate memory for firmware sections\n",
560 		    DEVNAME(sc));
561 		return ENOMEM;
562 	}
563 
564 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
565 	    M_DEVBUF, M_ZERO | M_NOWAIT);
566 	if (!dram->paging) {
567 		printf("%s: could not allocate memory for firmware paging\n",
568 		    DEVNAME(sc));
569 		return ENOMEM;
570 	}
571 
572 	/* initialize lmac sections */
573 	for (i = 0; i < dram->lmac_cnt; i++) {
574 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
575 						   &dram->fw[fw_cnt]);
576 		if (ret)
577 			return ret;
578 		ctxt_dram->lmac_img[i] =
579 			htole64(dram->fw[fw_cnt].paddr);
580 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
581 		    (unsigned long long)dram->fw[fw_cnt].paddr,
582 		    (unsigned long long)dram->fw[fw_cnt].size));
583 		fw_cnt++;
584 	}
585 
586 	/* initialize umac sections */
587 	for (i = 0; i < dram->umac_cnt; i++) {
588 		/* access FW with +1 to make up for lmac separator */
589 		ret = iwx_ctxt_info_alloc_dma(sc,
590 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
591 		if (ret)
592 			return ret;
593 		ctxt_dram->umac_img[i] =
594 			htole64(dram->fw[fw_cnt].paddr);
595 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
596 			(unsigned long long)dram->fw[fw_cnt].paddr,
597 			(unsigned long long)dram->fw[fw_cnt].size));
598 		fw_cnt++;
599 	}
600 
601 	/*
602 	 * Initialize paging.
603 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
604 	 * stored separately.
605 	 * This is since the timing of its release is different -
606 	 * while fw memory can be released on alive, the paging memory can be
607 	 * freed only when the device goes down.
608 	 * Given that, the logic here in accessing the fw image is a bit
609 	 * different - fw_cnt isn't changing so loop counter is added to it.
610 	 */
611 	for (i = 0; i < dram->paging_cnt; i++) {
612 		/* access FW with +2 to make up for lmac & umac separators */
613 		int fw_idx = fw_cnt + i + 2;
614 
615 		ret = iwx_ctxt_info_alloc_dma(sc,
616 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
617 		if (ret)
618 			return ret;
619 
620 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
621 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
622 		    (unsigned long long)dram->paging[i].paddr,
623 		    (unsigned long long)dram->paging[i].size));
624 	}
625 
626 	return 0;
627 }
628 
629 int
630 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
631     uint8_t min_power)
632 {
633 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
634 	uint32_t size = 0;
635 	uint8_t power;
636 	int err;
637 
638 	if (fw_mon->size)
639 		return 0;
640 
641 	for (power = max_power; power >= min_power; power--) {
642 		size = (1 << power);
643 
644 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
645 		if (err)
646 			continue;
647 
648 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
649 			 DEVNAME(sc), size));
650 		break;
651 	}
652 
653 	if (err) {
654 		fw_mon->size = 0;
655 		return err;
656 	}
657 
658 	if (power != max_power)
659 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
660 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
661 			(unsigned long)(1 << (max_power - 10))));
662 
663 	return 0;
664 }
665 
666 int
667 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
668 {
669 	if (!max_power) {
670 		/* default max_power is maximum */
671 		max_power = 26;
672 	} else {
673 		max_power += 11;
674 	}
675 
676 	if (max_power > 26) {
677 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
678 		     "check the FW TLV\n", DEVNAME(sc), max_power));
679 		return 0;
680 	}
681 
682 	if (sc->fw_mon.size)
683 		return 0;
684 
685 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
686 }
687 
688 int
689 iwx_apply_debug_destination(struct iwx_softc *sc)
690 {
691 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
692 	int i, err;
693 	uint8_t mon_mode, size_power, base_shift, end_shift;
694 	uint32_t base_reg, end_reg;
695 
696 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
697 	mon_mode = dest_v1->monitor_mode;
698 	size_power = dest_v1->size_power;
699 	base_reg = le32toh(dest_v1->base_reg);
700 	end_reg = le32toh(dest_v1->end_reg);
701 	base_shift = dest_v1->base_shift;
702 	end_shift = dest_v1->end_shift;
703 
704 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
705 
706 	if (mon_mode == EXTERNAL_MODE) {
707 		err = iwx_alloc_fw_monitor(sc, size_power);
708 		if (err)
709 			return err;
710 	}
711 
712 	if (!iwx_nic_lock(sc))
713 		return EBUSY;
714 
715 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
716 		uint32_t addr, val;
717 		uint8_t op;
718 
719 		addr = le32toh(dest_v1->reg_ops[i].addr);
720 		val = le32toh(dest_v1->reg_ops[i].val);
721 		op = dest_v1->reg_ops[i].op;
722 
723 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
724 		switch (op) {
725 		case CSR_ASSIGN:
726 			IWX_WRITE(sc, addr, val);
727 			break;
728 		case CSR_SETBIT:
729 			IWX_SETBITS(sc, addr, (1 << val));
730 			break;
731 		case CSR_CLEARBIT:
732 			IWX_CLRBITS(sc, addr, (1 << val));
733 			break;
734 		case PRPH_ASSIGN:
735 			iwx_write_prph(sc, addr, val);
736 			break;
737 		case PRPH_SETBIT:
738 			iwx_set_bits_prph(sc, addr, (1 << val));
739 			break;
740 		case PRPH_CLEARBIT:
741 			iwx_clear_bits_prph(sc, addr, (1 << val));
742 			break;
743 		case PRPH_BLOCKBIT:
744 			if (iwx_read_prph(sc, addr) & (1 << val))
745 				goto monitor;
746 			break;
747 		default:
748 			DPRINTF(("%s: FW debug - unknown OP %d\n",
749 			    DEVNAME(sc), op));
750 			break;
751 		}
752 	}
753 
754 monitor:
755 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
756 		iwx_write_prph(sc, le32toh(base_reg),
757 		    sc->fw_mon.paddr >> base_shift);
758 		iwx_write_prph(sc, end_reg,
759 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
760 		    >> end_shift);
761 	}
762 
763 	iwx_nic_unlock(sc);
764 	return 0;
765 }
766 
767 int
768 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
769 {
770 	struct iwx_context_info *ctxt_info;
771 	struct iwx_context_info_rbd_cfg *rx_cfg;
772 	uint32_t control_flags = 0, rb_size;
773 	uint64_t paddr;
774 	int err;
775 
776 	ctxt_info = sc->ctxt_info_dma.vaddr;
777 
778 	ctxt_info->version.version = 0;
779 	ctxt_info->version.mac_id =
780 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
781 	/* size is in DWs */
782 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
783 
784 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
785 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
786 	else
787 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
788 
789 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
790 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
791 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
792 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
793 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
794 	ctxt_info->control.control_flags = htole32(control_flags);
795 
796 	/* initialize RX default queue */
797 	rx_cfg = &ctxt_info->rbd_cfg;
798 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
799 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
800 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
801 
802 	/* initialize TX command queue */
803 	ctxt_info->hcmd_cfg.cmd_queue_addr =
804 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
805 	ctxt_info->hcmd_cfg.cmd_queue_size =
806 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
807 
808 	/* allocate ucode sections in dram and set addresses */
809 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
810 	if (err) {
811 		iwx_ctxt_info_free_fw_img(sc);
812 		return err;
813 	}
814 
815 	/* Configure debug, if exists */
816 	if (sc->sc_fw.dbg_dest_tlv_v1) {
817 		err = iwx_apply_debug_destination(sc);
818 		if (err) {
819 			iwx_ctxt_info_free_fw_img(sc);
820 			return err;
821 		}
822 	}
823 
824 	/*
825 	 * Write the context info DMA base address. The device expects a
826 	 * 64-bit address but a simple bus_space_write_8 to this register
827 	 * won't work on some devices, such as the AX201.
828 	 */
829 	paddr = sc->ctxt_info_dma.paddr;
830 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
831 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
832 
833 	/* kick FW self load */
834 	if (!iwx_nic_lock(sc))
835 		return EBUSY;
836 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
837 	iwx_nic_unlock(sc);
838 
839 	/* Context info will be released upon alive or failure to get one */
840 
841 	return 0;
842 }
843 
844 void
845 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
846 {
847 	struct iwx_self_init_dram *dram = &sc->init_dram;
848 	int i;
849 
850 	if (!dram->fw)
851 		return;
852 
853 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
854 		iwx_dma_contig_free(&dram->fw[i]);
855 
856 	free(dram->fw, M_DEVBUF,
857 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
858 	dram->lmac_cnt = 0;
859 	dram->umac_cnt = 0;
860 	dram->fw = NULL;
861 }
862 
863 int
864 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
865     uint8_t *data, size_t dlen)
866 {
867 	struct iwx_fw_sects *fws;
868 	struct iwx_fw_onesect *fwone;
869 
870 	if (type >= IWX_UCODE_TYPE_MAX)
871 		return EINVAL;
872 	if (dlen < sizeof(uint32_t))
873 		return EINVAL;
874 
875 	fws = &sc->sc_fw.fw_sects[type];
876 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
877 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
878 		return EINVAL;
879 
880 	fwone = &fws->fw_sect[fws->fw_count];
881 
882 	/* first 32bit are device load offset */
883 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
884 
885 	/* rest is data */
886 	fwone->fws_data = data + sizeof(uint32_t);
887 	fwone->fws_len = dlen - sizeof(uint32_t);
888 
889 	fws->fw_count++;
890 	fws->fw_totlen += fwone->fws_len;
891 
892 	return 0;
893 }
894 
895 #define IWX_DEFAULT_SCAN_CHANNELS	40
896 /* Newer firmware might support more channels. Raise this value if needed. */
897 #define IWX_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
898 
899 struct iwx_tlv_calib_data {
900 	uint32_t ucode_type;
901 	struct iwx_tlv_calib_ctrl calib;
902 } __packed;
903 
904 int
905 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
906 {
907 	const struct iwx_tlv_calib_data *def_calib = data;
908 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
909 
910 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
911 		return EINVAL;
912 
913 	sc->sc_default_calib[ucode_type].flow_trigger =
914 	    def_calib->calib.flow_trigger;
915 	sc->sc_default_calib[ucode_type].event_trigger =
916 	    def_calib->calib.event_trigger;
917 
918 	return 0;
919 }
920 
921 void
922 iwx_fw_info_free(struct iwx_fw_info *fw)
923 {
924 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
925 	fw->fw_rawdata = NULL;
926 	fw->fw_rawsize = 0;
927 	/* don't touch fw->fw_status */
928 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
929 }
930 
931 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
932 
933 int
934 iwx_read_firmware(struct iwx_softc *sc)
935 {
936 	struct iwx_fw_info *fw = &sc->sc_fw;
937 	struct iwx_tlv_ucode_header *uhdr;
938 	struct iwx_ucode_tlv tlv;
939 	uint32_t tlv_type;
940 	uint8_t *data;
941 	int err;
942 	size_t len;
943 
944 	if (fw->fw_status == IWX_FW_STATUS_DONE)
945 		return 0;
946 
947 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
948 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
949 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
950 
951 	if (fw->fw_rawdata != NULL)
952 		iwx_fw_info_free(fw);
953 
954 	err = loadfirmware(sc->sc_fwname,
955 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
956 	if (err) {
957 		printf("%s: could not read firmware %s (error %d)\n",
958 		    DEVNAME(sc), sc->sc_fwname, err);
959 		goto out;
960 	}
961 
962 	sc->sc_capaflags = 0;
963 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
964 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
965 
966 	uhdr = (void *)fw->fw_rawdata;
967 	if (*(uint32_t *)fw->fw_rawdata != 0
968 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
969 		printf("%s: invalid firmware %s\n",
970 		    DEVNAME(sc), sc->sc_fwname);
971 		err = EINVAL;
972 		goto out;
973 	}
974 
975 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
976 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
977 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
978 	    IWX_UCODE_API(le32toh(uhdr->ver)));
979 	data = uhdr->data;
980 	len = fw->fw_rawsize - sizeof(*uhdr);
981 
982 	while (len >= sizeof(tlv)) {
983 		size_t tlv_len;
984 		void *tlv_data;
985 
986 		memcpy(&tlv, data, sizeof(tlv));
987 		tlv_len = le32toh(tlv.length);
988 		tlv_type = le32toh(tlv.type);
989 
990 		len -= sizeof(tlv);
991 		data += sizeof(tlv);
992 		tlv_data = data;
993 
994 		if (len < tlv_len) {
995 			printf("%s: firmware too short: %zu bytes\n",
996 			    DEVNAME(sc), len);
997 			err = EINVAL;
998 			goto parse_out;
999 		}
1000 
1001 		switch (tlv_type) {
1002 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1003 			if (tlv_len < sizeof(uint32_t)) {
1004 				err = EINVAL;
1005 				goto parse_out;
1006 			}
1007 			sc->sc_capa_max_probe_len
1008 			    = le32toh(*(uint32_t *)tlv_data);
1009 			if (sc->sc_capa_max_probe_len >
1010 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1011 				err = EINVAL;
1012 				goto parse_out;
1013 			}
1014 			break;
1015 		case IWX_UCODE_TLV_PAN:
1016 			if (tlv_len) {
1017 				err = EINVAL;
1018 				goto parse_out;
1019 			}
1020 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1021 			break;
1022 		case IWX_UCODE_TLV_FLAGS:
1023 			if (tlv_len < sizeof(uint32_t)) {
1024 				err = EINVAL;
1025 				goto parse_out;
1026 			}
1027 			/*
1028 			 * Apparently there can be many flags, but Linux driver
1029 			 * parses only the first one, and so do we.
1030 			 *
1031 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1032 			 * Intentional or a bug?  Observations from
1033 			 * current firmware file:
1034 			 *  1) TLV_PAN is parsed first
1035 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1036 			 * ==> this resets TLV_PAN to itself... hnnnk
1037 			 */
1038 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1039 			break;
1040 		case IWX_UCODE_TLV_CSCHEME:
1041 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1042 			if (err)
1043 				goto parse_out;
1044 			break;
1045 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1046 			uint32_t num_cpu;
1047 			if (tlv_len != sizeof(uint32_t)) {
1048 				err = EINVAL;
1049 				goto parse_out;
1050 			}
1051 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1052 			if (num_cpu < 1 || num_cpu > 2) {
1053 				err = EINVAL;
1054 				goto parse_out;
1055 			}
1056 			break;
1057 		}
1058 		case IWX_UCODE_TLV_SEC_RT:
1059 			err = iwx_firmware_store_section(sc,
1060 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1061 			if (err)
1062 				goto parse_out;
1063 			break;
1064 		case IWX_UCODE_TLV_SEC_INIT:
1065 			err = iwx_firmware_store_section(sc,
1066 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1067 			if (err)
1068 				goto parse_out;
1069 			break;
1070 		case IWX_UCODE_TLV_SEC_WOWLAN:
1071 			err = iwx_firmware_store_section(sc,
1072 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1073 			if (err)
1074 				goto parse_out;
1075 			break;
1076 		case IWX_UCODE_TLV_DEF_CALIB:
1077 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1078 				err = EINVAL;
1079 				goto parse_out;
1080 			}
1081 			err = iwx_set_default_calib(sc, tlv_data);
1082 			if (err)
1083 				goto parse_out;
1084 			break;
1085 		case IWX_UCODE_TLV_PHY_SKU:
1086 			if (tlv_len != sizeof(uint32_t)) {
1087 				err = EINVAL;
1088 				goto parse_out;
1089 			}
1090 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1091 			break;
1092 
1093 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1094 			struct iwx_ucode_api *api;
1095 			int idx, i;
1096 			if (tlv_len != sizeof(*api)) {
1097 				err = EINVAL;
1098 				goto parse_out;
1099 			}
1100 			api = (struct iwx_ucode_api *)tlv_data;
1101 			idx = le32toh(api->api_index);
1102 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1103 				err = EINVAL;
1104 				goto parse_out;
1105 			}
1106 			for (i = 0; i < 32; i++) {
1107 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1108 					continue;
1109 				setbit(sc->sc_ucode_api, i + (32 * idx));
1110 			}
1111 			break;
1112 		}
1113 
1114 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1115 			struct iwx_ucode_capa *capa;
1116 			int idx, i;
1117 			if (tlv_len != sizeof(*capa)) {
1118 				err = EINVAL;
1119 				goto parse_out;
1120 			}
1121 			capa = (struct iwx_ucode_capa *)tlv_data;
1122 			idx = le32toh(capa->api_index);
1123 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1124 				goto parse_out;
1125 			}
1126 			for (i = 0; i < 32; i++) {
1127 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1128 					continue;
1129 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1130 			}
1131 			break;
1132 		}
1133 
1134 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1135 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1136 			/* ignore, not used by current driver */
1137 			break;
1138 
1139 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1140 			err = iwx_firmware_store_section(sc,
1141 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1142 			    tlv_len);
1143 			if (err)
1144 				goto parse_out;
1145 			break;
1146 
1147 		case IWX_UCODE_TLV_PAGING:
1148 			if (tlv_len != sizeof(uint32_t)) {
1149 				err = EINVAL;
1150 				goto parse_out;
1151 			}
1152 			break;
1153 
1154 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1155 			if (tlv_len != sizeof(uint32_t)) {
1156 				err = EINVAL;
1157 				goto parse_out;
1158 			}
1159 			sc->sc_capa_n_scan_channels =
1160 			  le32toh(*(uint32_t *)tlv_data);
1161 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1162 				err = ERANGE;
1163 				goto parse_out;
1164 			}
1165 			break;
1166 
1167 		case IWX_UCODE_TLV_FW_VERSION:
1168 			if (tlv_len != sizeof(uint32_t) * 3) {
1169 				err = EINVAL;
1170 				goto parse_out;
1171 			}
1172 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
1173 			    "%u.%u.%u",
1174 			    le32toh(((uint32_t *)tlv_data)[0]),
1175 			    le32toh(((uint32_t *)tlv_data)[1]),
1176 			    le32toh(((uint32_t *)tlv_data)[2]));
1177 			break;
1178 
1179 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1180 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1181 
1182 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1183 			if (*fw->dbg_dest_ver != 0) {
1184 				err = EINVAL;
1185 				goto parse_out;
1186 			}
1187 
1188 			if (fw->dbg_dest_tlv_init)
1189 				break;
1190 			fw->dbg_dest_tlv_init = true;
1191 
1192 			dest_v1 = (void *)tlv_data;
1193 			fw->dbg_dest_tlv_v1 = dest_v1;
1194 			fw->n_dest_reg = tlv_len -
1195 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1196 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1197 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1198 			break;
1199 		}
1200 
1201 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1202 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1203 
1204 			if (!fw->dbg_dest_tlv_init ||
1205 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1206 			    fw->dbg_conf_tlv[conf->id] != NULL)
1207 				break;
1208 
1209 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1210 			fw->dbg_conf_tlv[conf->id] = conf;
1211 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1212 			break;
1213 		}
1214 
1215 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1216 			struct iwx_umac_debug_addrs *dbg_ptrs =
1217 				(void *)tlv_data;
1218 
1219 			if (tlv_len != sizeof(*dbg_ptrs)) {
1220 				err = EINVAL;
1221 				goto parse_out;
1222 			}
1223 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1224 				break;
1225 			sc->sc_uc.uc_umac_error_event_table =
1226 				le32toh(dbg_ptrs->error_info_addr) &
1227 				~IWX_FW_ADDR_CACHE_CONTROL;
1228 			sc->sc_uc.error_event_table_tlv_status |=
1229 				IWX_ERROR_EVENT_TABLE_UMAC;
1230 			break;
1231 		}
1232 
1233 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1234 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1235 				(void *)tlv_data;
1236 
1237 			if (tlv_len != sizeof(*dbg_ptrs)) {
1238 				err = EINVAL;
1239 				goto parse_out;
1240 			}
1241 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1242 				break;
1243 			sc->sc_uc.uc_lmac_error_event_table[0] =
1244 				le32toh(dbg_ptrs->error_event_table_ptr) &
1245 				~IWX_FW_ADDR_CACHE_CONTROL;
1246 			sc->sc_uc.error_event_table_tlv_status |=
1247 				IWX_ERROR_EVENT_TABLE_LMAC1;
1248 			break;
1249 		}
1250 
1251 		case IWX_UCODE_TLV_FW_MEM_SEG:
1252 			break;
1253 
1254 		case IWX_UCODE_TLV_CMD_VERSIONS:
1255 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1256 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1257 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1258 			}
1259 			if (sc->n_cmd_versions != 0) {
1260 				err = EINVAL;
1261 				goto parse_out;
1262 			}
1263 			if (tlv_len > sizeof(sc->cmd_versions)) {
1264 				err = EINVAL;
1265 				goto parse_out;
1266 			}
1267 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1268 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1269 			break;
1270 
1271 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1272 			break;
1273 
1274 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1275 			break;
1276 
1277 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1278 		case 58:
1279 		case 0x1000003:
1280 		case 0x1000004:
1281 			break;
1282 
1283 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1284 		case 0x1000000:
1285 		case 0x1000002:
1286 			break;
1287 
1288 		default:
1289 			err = EINVAL;
1290 			goto parse_out;
1291 		}
1292 
1293 		len -= roundup(tlv_len, 4);
1294 		data += roundup(tlv_len, 4);
1295 	}
1296 
1297 	KASSERT(err == 0);
1298 
1299  parse_out:
1300 	if (err) {
1301 		printf("%s: firmware parse error %d, "
1302 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1303 	}
1304 
1305  out:
1306 	if (err) {
1307 		fw->fw_status = IWX_FW_STATUS_NONE;
1308 		if (fw->fw_rawdata != NULL)
1309 			iwx_fw_info_free(fw);
1310 	} else
1311 		fw->fw_status = IWX_FW_STATUS_DONE;
1312 	wakeup(&sc->sc_fw);
1313 
1314 	return err;
1315 }
1316 
1317 uint32_t
1318 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1319 {
1320 	iwx_nic_assert_locked(sc);
1321 	IWX_WRITE(sc,
1322 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1323 	IWX_BARRIER_READ_WRITE(sc);
1324 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1325 }
1326 
1327 void
1328 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1329 {
1330 	iwx_nic_assert_locked(sc);
1331 	IWX_WRITE(sc,
1332 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1333 	IWX_BARRIER_WRITE(sc);
1334 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1335 }
1336 
1337 void
1338 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1339 {
1340 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1341 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1342 }
1343 
1344 int
1345 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1346 {
1347 	int offs, err = 0;
1348 	uint32_t *vals = buf;
1349 
1350 	if (iwx_nic_lock(sc)) {
1351 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1352 		for (offs = 0; offs < dwords; offs++)
1353 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1354 		iwx_nic_unlock(sc);
1355 	} else {
1356 		err = EBUSY;
1357 	}
1358 	return err;
1359 }
1360 
1361 int
1362 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1363 {
1364 	int offs;
1365 	const uint32_t *vals = buf;
1366 
1367 	if (iwx_nic_lock(sc)) {
1368 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1369 		/* WADDR auto-increments */
1370 		for (offs = 0; offs < dwords; offs++) {
1371 			uint32_t val = vals ? vals[offs] : 0;
1372 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1373 		}
1374 		iwx_nic_unlock(sc);
1375 	} else {
1376 		return EBUSY;
1377 	}
1378 	return 0;
1379 }
1380 
1381 int
1382 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1383 {
1384 	return iwx_write_mem(sc, addr, &val, 1);
1385 }
1386 
1387 int
1388 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1389     int timo)
1390 {
1391 	for (;;) {
1392 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1393 			return 1;
1394 		}
1395 		if (timo < 10) {
1396 			return 0;
1397 		}
1398 		timo -= 10;
1399 		DELAY(10);
1400 	}
1401 }
1402 
1403 int
1404 iwx_nic_lock(struct iwx_softc *sc)
1405 {
1406 	if (sc->sc_nic_locks > 0) {
1407 		iwx_nic_assert_locked(sc);
1408 		sc->sc_nic_locks++;
1409 		return 1; /* already locked */
1410 	}
1411 
1412 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1413 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1414 
1415 	DELAY(2);
1416 
1417 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1418 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1419 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1420 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1421 		sc->sc_nic_locks++;
1422 		return 1;
1423 	}
1424 
1425 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1426 	return 0;
1427 }
1428 
1429 void
1430 iwx_nic_assert_locked(struct iwx_softc *sc)
1431 {
1432 	uint32_t reg = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1433 	if ((reg & IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1434 		panic("%s: mac clock not ready", DEVNAME(sc));
1435 	if (reg & IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1436 		panic("%s: mac gone to sleep", DEVNAME(sc));
1437 	if (sc->sc_nic_locks <= 0)
1438 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1439 }
1440 
1441 void
1442 iwx_nic_unlock(struct iwx_softc *sc)
1443 {
1444 	if (sc->sc_nic_locks > 0) {
1445 		if (--sc->sc_nic_locks == 0)
1446 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1447 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1448 	} else
1449 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1450 }
1451 
1452 void
1453 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1454     uint32_t mask)
1455 {
1456 	uint32_t val;
1457 
1458 	/* XXX: no error path? */
1459 	if (iwx_nic_lock(sc)) {
1460 		val = iwx_read_prph(sc, reg) & mask;
1461 		val |= bits;
1462 		iwx_write_prph(sc, reg, val);
1463 		iwx_nic_unlock(sc);
1464 	}
1465 }
1466 
1467 void
1468 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1469 {
1470 	iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1471 }
1472 
1473 void
1474 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1475 {
1476 	iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1477 }
1478 
1479 int
1480 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1481     bus_size_t size, bus_size_t alignment)
1482 {
1483 	int nsegs, err;
1484 	caddr_t va;
1485 
1486 	dma->tag = tag;
1487 	dma->size = size;
1488 
1489 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1490 	    &dma->map);
1491 	if (err)
1492 		goto fail;
1493 
1494 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1495 	    BUS_DMA_NOWAIT);
1496 	if (err)
1497 		goto fail;
1498 
1499 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1500 	    BUS_DMA_NOWAIT);
1501 	if (err)
1502 		goto fail;
1503 	dma->vaddr = va;
1504 
1505 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1506 	    BUS_DMA_NOWAIT);
1507 	if (err)
1508 		goto fail;
1509 
1510 	memset(dma->vaddr, 0, size);
1511 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1512 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1513 
1514 	return 0;
1515 
1516 fail:	iwx_dma_contig_free(dma);
1517 	return err;
1518 }
1519 
1520 void
1521 iwx_dma_contig_free(struct iwx_dma_info *dma)
1522 {
1523 	if (dma->map != NULL) {
1524 		if (dma->vaddr != NULL) {
1525 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1526 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1527 			bus_dmamap_unload(dma->tag, dma->map);
1528 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1529 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1530 			dma->vaddr = NULL;
1531 		}
1532 		bus_dmamap_destroy(dma->tag, dma->map);
1533 		dma->map = NULL;
1534 	}
1535 }
1536 
1537 int
1538 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1539 {
1540 	bus_size_t size;
1541 	int i, err;
1542 
1543 	ring->cur = 0;
1544 
1545 	/* Allocate RX descriptors (256-byte aligned). */
1546 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1547 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1548 	if (err) {
1549 		printf("%s: could not allocate RX ring DMA memory\n",
1550 		    DEVNAME(sc));
1551 		goto fail;
1552 	}
1553 	ring->desc = ring->free_desc_dma.vaddr;
1554 
1555 	/* Allocate RX status area (16-byte aligned). */
1556 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1557 	    sizeof(*ring->stat), 16);
1558 	if (err) {
1559 		printf("%s: could not allocate RX status DMA memory\n",
1560 		    DEVNAME(sc));
1561 		goto fail;
1562 	}
1563 	ring->stat = ring->stat_dma.vaddr;
1564 
1565 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1566 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1567 	    size, 256);
1568 	if (err) {
1569 		printf("%s: could not allocate RX ring DMA memory\n",
1570 		    DEVNAME(sc));
1571 		goto fail;
1572 	}
1573 
1574 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1575 		struct iwx_rx_data *data = &ring->data[i];
1576 
1577 		memset(data, 0, sizeof(*data));
1578 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1579 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1580 		    &data->map);
1581 		if (err) {
1582 			printf("%s: could not create RX buf DMA map\n",
1583 			    DEVNAME(sc));
1584 			goto fail;
1585 		}
1586 
1587 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1588 		if (err)
1589 			goto fail;
1590 	}
1591 	return 0;
1592 
1593 fail:	iwx_free_rx_ring(sc, ring);
1594 	return err;
1595 }
1596 
1597 void
1598 iwx_disable_rx_dma(struct iwx_softc *sc)
1599 {
1600 	int ntries;
1601 
1602 	if (iwx_nic_lock(sc)) {
1603 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1604 		for (ntries = 0; ntries < 1000; ntries++) {
1605 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1606 			    IWX_RXF_DMA_IDLE)
1607 				break;
1608 			DELAY(10);
1609 		}
1610 		iwx_nic_unlock(sc);
1611 	}
1612 }
1613 
1614 void
1615 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1616 {
1617 	ring->cur = 0;
1618 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1619 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1620 	memset(ring->stat, 0, sizeof(*ring->stat));
1621 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1622 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1623 
1624 }
1625 
1626 void
1627 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1628 {
1629 	int i;
1630 
1631 	iwx_dma_contig_free(&ring->free_desc_dma);
1632 	iwx_dma_contig_free(&ring->stat_dma);
1633 	iwx_dma_contig_free(&ring->used_desc_dma);
1634 
1635 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1636 		struct iwx_rx_data *data = &ring->data[i];
1637 
1638 		if (data->m != NULL) {
1639 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1640 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1641 			bus_dmamap_unload(sc->sc_dmat, data->map);
1642 			m_freem(data->m);
1643 			data->m = NULL;
1644 		}
1645 		if (data->map != NULL)
1646 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1647 	}
1648 }
1649 
1650 int
1651 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1652 {
1653 	bus_addr_t paddr;
1654 	bus_size_t size;
1655 	int i, err;
1656 
1657 	ring->qid = qid;
1658 	ring->queued = 0;
1659 	ring->cur = 0;
1660 	ring->tail = 0;
1661 
1662 	/* Allocate TX descriptors (256-byte aligned). */
1663 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
1664 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1665 	if (err) {
1666 		printf("%s: could not allocate TX ring DMA memory\n",
1667 		    DEVNAME(sc));
1668 		goto fail;
1669 	}
1670 	ring->desc = ring->desc_dma.vaddr;
1671 
1672 	/*
1673 	 * There is no need to allocate DMA buffers for unused rings.
1674 	 * The hardware supports up to 31 Tx rings which is more
1675 	 * than we currently need.
1676 	 *
1677 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1678 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1679 	 * are sc->tqx[ac + IWX_DQA_AUX_QUEUE + 1], i.e. sc->txq[2:5],
1680 	 * in order to provide one queue per EDCA category.
1681 	 *
1682 	 * Tx aggregation will require additional queues (one queue per TID
1683 	 * for which aggregation is enabled) but we do not implement this yet.
1684 	 */
1685 	if (qid > IWX_DQA_MIN_MGMT_QUEUE)
1686 		return 0;
1687 
1688 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1689 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1690 	if (err) {
1691 		printf("%s: could not allocate byte count table DMA memory\n",
1692 		    DEVNAME(sc));
1693 		goto fail;
1694 	}
1695 
1696 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
1697 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1698 	    IWX_FIRST_TB_SIZE_ALIGN);
1699 	if (err) {
1700 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1701 		goto fail;
1702 	}
1703 	ring->cmd = ring->cmd_dma.vaddr;
1704 
1705 	paddr = ring->cmd_dma.paddr;
1706 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1707 		struct iwx_tx_data *data = &ring->data[i];
1708 		size_t mapsize;
1709 
1710 		data->cmd_paddr = paddr;
1711 		paddr += sizeof(struct iwx_device_cmd);
1712 
1713 		/* FW commands may require more mapped space than packets. */
1714 		if (qid == IWX_DQA_CMD_QUEUE)
1715 			mapsize = (sizeof(struct iwx_cmd_header) +
1716 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1717 		else
1718 			mapsize = MCLBYTES;
1719 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1720 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1721 		    &data->map);
1722 		if (err) {
1723 			printf("%s: could not create TX buf DMA map\n",
1724 			    DEVNAME(sc));
1725 			goto fail;
1726 		}
1727 	}
1728 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1729 	return 0;
1730 
1731 fail:	iwx_free_tx_ring(sc, ring);
1732 	return err;
1733 }
1734 
1735 void
1736 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1737 {
1738 	int i;
1739 
1740 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1741 		struct iwx_tx_data *data = &ring->data[i];
1742 
1743 		if (data->m != NULL) {
1744 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1745 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1746 			bus_dmamap_unload(sc->sc_dmat, data->map);
1747 			m_freem(data->m);
1748 			data->m = NULL;
1749 		}
1750 	}
1751 
1752 	/* Clear byte count table. */
1753 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
1754 
1755 	/* Clear TX descriptors. */
1756 	memset(ring->desc, 0, ring->desc_dma.size);
1757 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1758 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1759 	sc->qfullmsk &= ~(1 << ring->qid);
1760 	ring->queued = 0;
1761 	ring->cur = 0;
1762 	ring->tail = 0;
1763 }
1764 
1765 void
1766 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1767 {
1768 	int i;
1769 
1770 	iwx_dma_contig_free(&ring->desc_dma);
1771 	iwx_dma_contig_free(&ring->cmd_dma);
1772 	iwx_dma_contig_free(&ring->bc_tbl);
1773 
1774 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1775 		struct iwx_tx_data *data = &ring->data[i];
1776 
1777 		if (data->m != NULL) {
1778 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1779 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1780 			bus_dmamap_unload(sc->sc_dmat, data->map);
1781 			m_freem(data->m);
1782 			data->m = NULL;
1783 		}
1784 		if (data->map != NULL)
1785 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1786 	}
1787 }
1788 
1789 void
1790 iwx_enable_rfkill_int(struct iwx_softc *sc)
1791 {
1792 	if (!sc->sc_msix) {
1793 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1794 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1795 	} else {
1796 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1797 		    sc->sc_fh_init_mask);
1798 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1799 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1800 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1801 	}
1802 
1803 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1804 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1805 }
1806 
1807 int
1808 iwx_check_rfkill(struct iwx_softc *sc)
1809 {
1810 	uint32_t v;
1811 	int s;
1812 	int rv;
1813 
1814 	s = splnet();
1815 
1816 	/*
1817 	 * "documentation" is not really helpful here:
1818 	 *  27:	HW_RF_KILL_SW
1819 	 *	Indicates state of (platform's) hardware RF-Kill switch
1820 	 *
1821 	 * But apparently when it's off, it's on ...
1822 	 */
1823 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1824 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1825 	if (rv) {
1826 		sc->sc_flags |= IWX_FLAG_RFKILL;
1827 	} else {
1828 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1829 	}
1830 
1831 	splx(s);
1832 	return rv;
1833 }
1834 
1835 void
1836 iwx_enable_interrupts(struct iwx_softc *sc)
1837 {
1838 	if (!sc->sc_msix) {
1839 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1840 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1841 	} else {
1842 		/*
1843 		 * fh/hw_mask keeps all the unmasked causes.
1844 		 * Unlike msi, in msix cause is enabled when it is unset.
1845 		 */
1846 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1847 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1848 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1849 		    ~sc->sc_fh_mask);
1850 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1851 		    ~sc->sc_hw_mask);
1852 	}
1853 }
1854 
1855 void
1856 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1857 {
1858 	if (!sc->sc_msix) {
1859 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1860 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1861 	} else {
1862 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1863 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1864 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1865 		/*
1866 		 * Leave all the FH causes enabled to get the ALIVE
1867 		 * notification.
1868 		 */
1869 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1870 		    ~sc->sc_fh_init_mask);
1871 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1872 	}
1873 }
1874 
1875 void
1876 iwx_restore_interrupts(struct iwx_softc *sc)
1877 {
1878 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1879 }
1880 
1881 void
1882 iwx_disable_interrupts(struct iwx_softc *sc)
1883 {
1884 	int s = splnet();
1885 
1886 	if (!sc->sc_msix) {
1887 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
1888 
1889 		/* acknowledge all interrupts */
1890 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
1891 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
1892 	} else {
1893 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1894 		    sc->sc_fh_init_mask);
1895 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1896 		    sc->sc_hw_init_mask);
1897 	}
1898 
1899 	splx(s);
1900 }
1901 
1902 void
1903 iwx_ict_reset(struct iwx_softc *sc)
1904 {
1905 	iwx_disable_interrupts(sc);
1906 
1907 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
1908 	sc->ict_cur = 0;
1909 
1910 	/* Set physical address of ICT (4KB aligned). */
1911 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
1912 	    IWX_CSR_DRAM_INT_TBL_ENABLE
1913 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
1914 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
1915 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
1916 
1917 	/* Switch to ICT interrupt mode in driver. */
1918 	sc->sc_flags |= IWX_FLAG_USE_ICT;
1919 
1920 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
1921 	iwx_enable_interrupts(sc);
1922 }
1923 
1924 #define IWX_HW_READY_TIMEOUT 50
1925 int
1926 iwx_set_hw_ready(struct iwx_softc *sc)
1927 {
1928 	int ready;
1929 
1930 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1931 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1932 
1933 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
1934 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1935 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1936 	    IWX_HW_READY_TIMEOUT);
1937 	if (ready)
1938 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
1939 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
1940 
1941 	return ready;
1942 }
1943 #undef IWX_HW_READY_TIMEOUT
1944 
1945 int
1946 iwx_prepare_card_hw(struct iwx_softc *sc)
1947 {
1948 	int t = 0;
1949 
1950 	if (iwx_set_hw_ready(sc))
1951 		return 0;
1952 
1953 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
1954 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1955 	DELAY(1000);
1956 
1957 
1958 	/* If HW is not ready, prepare the conditions to check again */
1959 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1960 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
1961 
1962 	do {
1963 		if (iwx_set_hw_ready(sc))
1964 			return 0;
1965 		DELAY(200);
1966 		t += 200;
1967 	} while (t < 150000);
1968 
1969 	return ETIMEDOUT;
1970 }
1971 
1972 void
1973 iwx_force_power_gating(struct iwx_softc *sc)
1974 {
1975 	iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
1976 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1977 	DELAY(20);
1978 	iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
1979 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
1980 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
1981 	DELAY(20);
1982 	iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
1983 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1984 }
1985 
1986 void
1987 iwx_apm_config(struct iwx_softc *sc)
1988 {
1989 	pcireg_t lctl, cap;
1990 
1991 	/*
1992 	 * L0S states have been found to be unstable with our devices
1993 	 * and in newer hardware they are not officially supported at
1994 	 * all, so we must always set the L0S_DISABLED bit.
1995 	 */
1996 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
1997 
1998 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1999 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2000 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2001 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2002 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2003 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2004 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2005 	    DEVNAME(sc),
2006 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2007 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2008 }
2009 
2010 /*
2011  * Start up NIC's basic functionality after it has been reset
2012  * e.g. after platform boot or shutdown.
2013  * NOTE:  This does not load uCode nor start the embedded processor
2014  */
2015 int
2016 iwx_apm_init(struct iwx_softc *sc)
2017 {
2018 	int err = 0;
2019 
2020 	/*
2021 	 * Disable L0s without affecting L1;
2022 	 *  don't wait for ICH L0s (ICH bug W/A)
2023 	 */
2024 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2025 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2026 
2027 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2028 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2029 
2030 	/*
2031 	 * Enable HAP INTA (interrupt from management bus) to
2032 	 * wake device's PCI Express link L1a -> L0s
2033 	 */
2034 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2035 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2036 
2037 	iwx_apm_config(sc);
2038 
2039 	/*
2040 	 * Set "initialization complete" bit to move adapter from
2041 	 * D0U* --> D0A* (powered-up active) state.
2042 	 */
2043 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2044 
2045 	/*
2046 	 * Wait for clock stabilization; once stabilized, access to
2047 	 * device-internal resources is supported, e.g. iwx_write_prph()
2048 	 * and accesses to uCode SRAM.
2049 	 */
2050 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2051 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2052 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2053 		printf("%s: timeout waiting for clock stabilization\n",
2054 		    DEVNAME(sc));
2055 		err = ETIMEDOUT;
2056 		goto out;
2057 	}
2058  out:
2059 	if (err)
2060 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2061 	return err;
2062 }
2063 
2064 void
2065 iwx_apm_stop(struct iwx_softc *sc)
2066 {
2067 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2068 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2069 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2070 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2071 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2072 	DELAY(1000);
2073 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2074 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2075 	DELAY(5000);
2076 
2077 	/* stop device's busmaster DMA activity */
2078 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2079 
2080 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2081 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2082 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2083 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2084 
2085 	/*
2086 	 * Clear "initialization complete" bit to move adapter from
2087 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2088 	 */
2089 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2090 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2091 }
2092 
2093 void
2094 iwx_init_msix_hw(struct iwx_softc *sc)
2095 {
2096 	iwx_conf_msix_hw(sc, 0);
2097 
2098 	if (!sc->sc_msix)
2099 		return;
2100 
2101 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2102 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2103 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2104 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2105 }
2106 
2107 void
2108 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2109 {
2110 	int vector = 0;
2111 
2112 	if (!sc->sc_msix) {
2113 		/* Newer chips default to MSIX. */
2114 		if (!stopped && iwx_nic_lock(sc)) {
2115 			iwx_write_prph(sc, IWX_UREG_CHICK,
2116 			    IWX_UREG_CHICK_MSI_ENABLE);
2117 			iwx_nic_unlock(sc);
2118 		}
2119 		return;
2120 	}
2121 
2122 	if (!stopped && iwx_nic_lock(sc)) {
2123 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2124 		iwx_nic_unlock(sc);
2125 	}
2126 
2127 	/* Disable all interrupts */
2128 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2129 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2130 
2131 	/* Map fallback-queue (command/mgmt) to a single vector */
2132 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2133 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2134 	/* Map RSS queue (data) to the same vector */
2135 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2136 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2137 
2138 	/* Enable the RX queues cause interrupts */
2139 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2140 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2141 
2142 	/* Map non-RX causes to the same vector */
2143 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2144 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2145 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2146 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2147 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2148 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2149 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2150 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2151 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2152 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2153 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2154 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2155 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2156 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2157 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2158 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2159 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2160 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2161 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2162 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2163 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2164 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2165 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2166 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2167 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2168 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2169 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2170 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2171 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2172 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2173 
2174 	/* Enable non-RX causes interrupts */
2175 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2176 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2177 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2178 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2179 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2180 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2181 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2182 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2183 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2184 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2185 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2186 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2187 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2188 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2189 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2190 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2191 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2192 }
2193 
2194 int
2195 iwx_start_hw(struct iwx_softc *sc)
2196 {
2197 	int err;
2198 	int t = 0;
2199 
2200 	err = iwx_prepare_card_hw(sc);
2201 	if (err)
2202 		return err;
2203 
2204 	/* Reset the entire device */
2205 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2206 	DELAY(5000);
2207 
2208 	if (sc->sc_integrated) {
2209 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2210 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2211 		DELAY(20);
2212 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2213 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2214 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2215 			printf("%s: timeout waiting for clock stabilization\n",
2216 			    DEVNAME(sc));
2217 			return ETIMEDOUT;
2218 		}
2219 
2220 		iwx_force_power_gating(sc);
2221 
2222 		/* Reset the entire device */
2223 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2224 		DELAY(5000);
2225 	}
2226 
2227 	err = iwx_apm_init(sc);
2228 	if (err)
2229 		return err;
2230 
2231 	iwx_init_msix_hw(sc);
2232 
2233 	while (t < 150000 && !iwx_set_hw_ready(sc)) {
2234 		DELAY(200);
2235 		t += 200;
2236 		if (iwx_set_hw_ready(sc)) {
2237 			break;
2238 		}
2239 	}
2240 	if (t >= 150000)
2241 		return ETIMEDOUT;
2242 
2243 	iwx_enable_rfkill_int(sc);
2244 	iwx_check_rfkill(sc);
2245 
2246 	return 0;
2247 }
2248 
2249 void
2250 iwx_stop_device(struct iwx_softc *sc)
2251 {
2252 	int qid;
2253 
2254 	iwx_disable_interrupts(sc);
2255 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2256 
2257 	iwx_disable_rx_dma(sc);
2258 	iwx_reset_rx_ring(sc, &sc->rxq);
2259 	for (qid = 0; qid < nitems(sc->txq); qid++)
2260 		iwx_reset_tx_ring(sc, &sc->txq[qid]);
2261 
2262 	/* Make sure (redundant) we've released our request to stay awake */
2263 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2264 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2265 	if (sc->sc_nic_locks > 0)
2266 		printf("%s: %d active NIC locks forcefully cleared\n",
2267 		    DEVNAME(sc), sc->sc_nic_locks);
2268 	sc->sc_nic_locks = 0;
2269 
2270 	/* Stop the device, and put it in low power state */
2271 	iwx_apm_stop(sc);
2272 
2273 	/* Reset the on-board processor. */
2274 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2275 	DELAY(5000);
2276 
2277 	/*
2278 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2279 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2280 	 * that enables radio won't fire on the correct irq, and the
2281 	 * driver won't be able to handle the interrupt.
2282 	 * Configure the IVAR table again after reset.
2283 	 */
2284 	iwx_conf_msix_hw(sc, 1);
2285 
2286 	/*
2287 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2288 	 * Clear the interrupt again.
2289 	 */
2290 	iwx_disable_interrupts(sc);
2291 
2292 	/* Even though we stop the HW we still want the RF kill interrupt. */
2293 	iwx_enable_rfkill_int(sc);
2294 	iwx_check_rfkill(sc);
2295 
2296 	iwx_prepare_card_hw(sc);
2297 
2298 	iwx_ctxt_info_free_paging(sc);
2299 }
2300 
2301 void
2302 iwx_nic_config(struct iwx_softc *sc)
2303 {
2304 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2305 	uint32_t mask, val, reg_val = 0;
2306 
2307 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2308 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2309 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2310 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2311 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2312 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2313 
2314 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2315 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2316 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2317 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2318 
2319 	/* radio configuration */
2320 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2321 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2322 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2323 
2324 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2325 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2326 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2327 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2328 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2329 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2330 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2331 
2332 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2333 	val &= ~mask;
2334 	val |= reg_val;
2335 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2336 }
2337 
2338 int
2339 iwx_nic_rx_init(struct iwx_softc *sc)
2340 {
2341 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2342 
2343 	/*
2344 	 * We don't configure the RFH; the firmware will do that.
2345 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2346 	 */
2347 	return 0;
2348 }
2349 
2350 int
2351 iwx_nic_init(struct iwx_softc *sc)
2352 {
2353 	int err;
2354 
2355 	iwx_apm_init(sc);
2356 	iwx_nic_config(sc);
2357 
2358 	err = iwx_nic_rx_init(sc);
2359 	if (err)
2360 		return err;
2361 
2362 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2363 
2364 	return 0;
2365 }
2366 
2367 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2368 const uint8_t iwx_ac_to_tx_fifo[] = {
2369 	IWX_GEN2_EDCA_TX_FIFO_BE,
2370 	IWX_GEN2_EDCA_TX_FIFO_BK,
2371 	IWX_GEN2_EDCA_TX_FIFO_VI,
2372 	IWX_GEN2_EDCA_TX_FIFO_VO,
2373 };
2374 
2375 int
2376 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2377     int num_slots)
2378 {
2379 	struct iwx_tx_queue_cfg_cmd cmd;
2380 	struct iwx_rx_packet *pkt;
2381 	struct iwx_tx_queue_cfg_rsp *resp;
2382 	struct iwx_host_cmd hcmd = {
2383 		.id = IWX_SCD_QUEUE_CFG,
2384 		.flags = IWX_CMD_WANT_RESP,
2385 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2386 	};
2387 	struct iwx_tx_ring *ring = &sc->txq[qid];
2388 	int err, fwqid;
2389 	uint32_t wr_idx;
2390 	size_t resp_len;
2391 
2392 	iwx_reset_tx_ring(sc, ring);
2393 
2394 	memset(&cmd, 0, sizeof(cmd));
2395 	cmd.sta_id = sta_id;
2396 	cmd.tid = tid;
2397 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2398 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2399 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2400 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2401 
2402 	hcmd.data[0] = &cmd;
2403 	hcmd.len[0] = sizeof(cmd);
2404 
2405 	err = iwx_send_cmd(sc, &hcmd);
2406 	if (err)
2407 		return err;
2408 
2409 	pkt = hcmd.resp_pkt;
2410 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2411 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2412 		err = EIO;
2413 		goto out;
2414 	}
2415 
2416 	resp_len = iwx_rx_packet_payload_len(pkt);
2417 	if (resp_len != sizeof(*resp)) {
2418 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2419 		err = EIO;
2420 		goto out;
2421 	}
2422 
2423 	resp = (void *)pkt->data;
2424 	fwqid = le16toh(resp->queue_number);
2425 	wr_idx = le16toh(resp->write_pointer);
2426 
2427 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2428 	if (fwqid != qid) {
2429 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2430 		err = EIO;
2431 		goto out;
2432 	}
2433 
2434 	if (wr_idx != ring->cur) {
2435 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2436 		err = EIO;
2437 		goto out;
2438 	}
2439 out:
2440 	iwx_free_resp(sc, &hcmd);
2441 	return err;
2442 }
2443 
2444 void
2445 iwx_post_alive(struct iwx_softc *sc)
2446 {
2447 	iwx_ict_reset(sc);
2448 }
2449 
2450 /*
2451  * For the high priority TE use a time event type that has similar priority to
2452  * the FW's action scan priority.
2453  */
2454 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2455 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2456 
2457 int
2458 iwx_send_time_event_cmd(struct iwx_softc *sc,
2459     const struct iwx_time_event_cmd *cmd)
2460 {
2461 	struct iwx_rx_packet *pkt;
2462 	struct iwx_time_event_resp *resp;
2463 	struct iwx_host_cmd hcmd = {
2464 		.id = IWX_TIME_EVENT_CMD,
2465 		.flags = IWX_CMD_WANT_RESP,
2466 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2467 	};
2468 	uint32_t resp_len;
2469 	int err;
2470 
2471 	hcmd.data[0] = cmd;
2472 	hcmd.len[0] = sizeof(*cmd);
2473 	err = iwx_send_cmd(sc, &hcmd);
2474 	if (err)
2475 		return err;
2476 
2477 	pkt = hcmd.resp_pkt;
2478 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2479 		err = EIO;
2480 		goto out;
2481 	}
2482 
2483 	resp_len = iwx_rx_packet_payload_len(pkt);
2484 	if (resp_len != sizeof(*resp)) {
2485 		err = EIO;
2486 		goto out;
2487 	}
2488 
2489 	resp = (void *)pkt->data;
2490 	if (le32toh(resp->status) == 0)
2491 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2492 	else
2493 		err = EIO;
2494 out:
2495 	iwx_free_resp(sc, &hcmd);
2496 	return err;
2497 }
2498 
2499 void
2500 iwx_protect_session(struct iwx_softc *sc, struct iwx_node *in,
2501     uint32_t duration, uint32_t max_delay)
2502 {
2503 	struct iwx_time_event_cmd time_cmd;
2504 
2505 	/* Do nothing if a time event is already scheduled. */
2506 	if (sc->sc_flags & IWX_FLAG_TE_ACTIVE)
2507 		return;
2508 
2509 	memset(&time_cmd, 0, sizeof(time_cmd));
2510 
2511 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_ADD);
2512 	time_cmd.id_and_color =
2513 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2514 	time_cmd.id = htole32(IWX_TE_BSS_STA_AGGRESSIVE_ASSOC);
2515 
2516 	time_cmd.apply_time = htole32(0);
2517 
2518 	time_cmd.max_frags = IWX_TE_V2_FRAG_NONE;
2519 	time_cmd.max_delay = htole32(max_delay);
2520 	/* TODO: why do we need to interval = bi if it is not periodic? */
2521 	time_cmd.interval = htole32(1);
2522 	time_cmd.duration = htole32(duration);
2523 	time_cmd.repeat = 1;
2524 	time_cmd.policy
2525 	    = htole16(IWX_TE_V2_NOTIF_HOST_EVENT_START |
2526 	        IWX_TE_V2_NOTIF_HOST_EVENT_END |
2527 		IWX_T2_V2_START_IMMEDIATELY);
2528 
2529 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2530 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2531 
2532 	DELAY(100);
2533 }
2534 
2535 void
2536 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
2537 {
2538 	struct iwx_time_event_cmd time_cmd;
2539 
2540 	/* Do nothing if the time event has already ended. */
2541 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
2542 		return;
2543 
2544 	memset(&time_cmd, 0, sizeof(time_cmd));
2545 
2546 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_REMOVE);
2547 	time_cmd.id_and_color =
2548 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2549 	time_cmd.id = htole32(sc->sc_time_event_uid);
2550 
2551 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2552 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
2553 
2554 	DELAY(100);
2555 }
2556 
2557 /*
2558  * NVM read access and content parsing.  We do not support
2559  * external NVM or writing NVM.
2560  */
2561 
2562 uint8_t
2563 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2564 {
2565 	uint8_t tx_ant;
2566 
2567 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2568 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2569 
2570 	if (sc->sc_nvm.valid_tx_ant)
2571 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2572 
2573 	return tx_ant;
2574 }
2575 
2576 uint8_t
2577 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2578 {
2579 	uint8_t rx_ant;
2580 
2581 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2582 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2583 
2584 	if (sc->sc_nvm.valid_rx_ant)
2585 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2586 
2587 	return rx_ant;
2588 }
2589 
2590 void
2591 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2592     uint32_t *channel_profile_v4, int nchan_profile)
2593 {
2594 	struct ieee80211com *ic = &sc->sc_ic;
2595 	struct iwx_nvm_data *data = &sc->sc_nvm;
2596 	int ch_idx;
2597 	struct ieee80211_channel *channel;
2598 	uint32_t ch_flags;
2599 	int is_5ghz;
2600 	int flags, hw_value;
2601 	int nchan;
2602 	const uint8_t *nvm_channels;
2603 
2604 	if (sc->sc_uhb_supported) {
2605 		nchan = nitems(iwx_nvm_channels_uhb);
2606 		nvm_channels = iwx_nvm_channels_uhb;
2607 	} else {
2608 		nchan = nitems(iwx_nvm_channels_8000);
2609 		nvm_channels = iwx_nvm_channels_8000;
2610 	}
2611 
2612 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2613 		if (channel_profile_v4)
2614 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
2615 		else
2616 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
2617 
2618 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2619 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2620 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2621 
2622 		hw_value = nvm_channels[ch_idx];
2623 		channel = &ic->ic_channels[hw_value];
2624 
2625 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
2626 			channel->ic_freq = 0;
2627 			channel->ic_flags = 0;
2628 			continue;
2629 		}
2630 
2631 		if (!is_5ghz) {
2632 			flags = IEEE80211_CHAN_2GHZ;
2633 			channel->ic_flags
2634 			    = IEEE80211_CHAN_CCK
2635 			    | IEEE80211_CHAN_OFDM
2636 			    | IEEE80211_CHAN_DYN
2637 			    | IEEE80211_CHAN_2GHZ;
2638 		} else {
2639 			flags = IEEE80211_CHAN_5GHZ;
2640 			channel->ic_flags =
2641 			    IEEE80211_CHAN_A;
2642 		}
2643 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2644 
2645 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2646 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2647 
2648 		if (data->sku_cap_11n_enable)
2649 			channel->ic_flags |= IEEE80211_CHAN_HT;
2650 	}
2651 }
2652 
2653 int
2654 iwx_mimo_enabled(struct iwx_softc *sc)
2655 {
2656 	struct ieee80211com *ic = &sc->sc_ic;
2657 
2658 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2659 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2660 }
2661 
2662 void
2663 iwx_setup_ht_rates(struct iwx_softc *sc)
2664 {
2665 	struct ieee80211com *ic = &sc->sc_ic;
2666 	uint8_t rx_ant;
2667 
2668 	/* TX is supported with the same MCS as RX. */
2669 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2670 
2671 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2672 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2673 
2674 	if (!iwx_mimo_enabled(sc))
2675 		return;
2676 
2677 	rx_ant = iwx_fw_valid_rx_ant(sc);
2678 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2679 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2680 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2681 }
2682 
2683 #define IWX_MAX_RX_BA_SESSIONS 16
2684 
2685 void
2686 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2687     uint16_t ssn, uint16_t winsize, int start)
2688 {
2689 	struct ieee80211com *ic = &sc->sc_ic;
2690 	struct iwx_add_sta_cmd cmd;
2691 	struct iwx_node *in = (void *)ni;
2692 	int err, s;
2693 	uint32_t status;
2694 
2695 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
2696 		ieee80211_addba_req_refuse(ic, ni, tid);
2697 		return;
2698 	}
2699 
2700 	memset(&cmd, 0, sizeof(cmd));
2701 
2702 	cmd.sta_id = IWX_STATION_ID;
2703 	cmd.mac_id_n_color
2704 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2705 	cmd.add_modify = IWX_STA_MODE_MODIFY;
2706 
2707 	if (start) {
2708 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2709 		cmd.add_immediate_ba_ssn = htole16(ssn);
2710 		cmd.rx_ba_window = htole16(winsize);
2711 	} else {
2712 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2713 	}
2714 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
2715 	    IWX_STA_MODIFY_REMOVE_BA_TID;
2716 
2717 	status = IWX_ADD_STA_SUCCESS;
2718 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
2719 	    &status);
2720 
2721 	s = splnet();
2722 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) == IWX_ADD_STA_SUCCESS) {
2723 		if (start) {
2724 			sc->sc_rx_ba_sessions++;
2725 			ieee80211_addba_req_accept(ic, ni, tid);
2726 		} else if (sc->sc_rx_ba_sessions > 0)
2727 			sc->sc_rx_ba_sessions--;
2728 	} else if (start)
2729 		ieee80211_addba_req_refuse(ic, ni, tid);
2730 
2731 	splx(s);
2732 }
2733 
2734 void
2735 iwx_htprot_task(void *arg)
2736 {
2737 	struct iwx_softc *sc = arg;
2738 	struct ieee80211com *ic = &sc->sc_ic;
2739 	struct iwx_node *in = (void *)ic->ic_bss;
2740 	int err, s = splnet();
2741 
2742 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2743 		refcnt_rele_wake(&sc->task_refs);
2744 		splx(s);
2745 		return;
2746 	}
2747 
2748 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2749 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
2750 	if (err)
2751 		printf("%s: could not change HT protection: error %d\n",
2752 		    DEVNAME(sc), err);
2753 
2754 	refcnt_rele_wake(&sc->task_refs);
2755 	splx(s);
2756 }
2757 
2758 /*
2759  * This function is called by upper layer when HT protection settings in
2760  * beacons have changed.
2761  */
2762 void
2763 iwx_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2764 {
2765 	struct iwx_softc *sc = ic->ic_softc;
2766 
2767 	/* assumes that ni == ic->ic_bss */
2768 	iwx_add_task(sc, systq, &sc->htprot_task);
2769 }
2770 
2771 void
2772 iwx_ba_task(void *arg)
2773 {
2774 	struct iwx_softc *sc = arg;
2775 	struct ieee80211com *ic = &sc->sc_ic;
2776 	struct ieee80211_node *ni = ic->ic_bss;
2777 	int s = splnet();
2778 
2779 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2780 		refcnt_rele_wake(&sc->task_refs);
2781 		splx(s);
2782 		return;
2783 	}
2784 
2785 	if (sc->ba_start)
2786 		iwx_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
2787 		    sc->ba_winsize, 1);
2788 	else
2789 		iwx_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
2790 
2791 	refcnt_rele_wake(&sc->task_refs);
2792 	splx(s);
2793 }
2794 
2795 /*
2796  * This function is called by upper layer when an ADDBA request is received
2797  * from another STA and before the ADDBA response is sent.
2798  */
2799 int
2800 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2801     uint8_t tid)
2802 {
2803 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2804 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
2805 
2806 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS)
2807 		return ENOSPC;
2808 
2809 	sc->ba_start = 1;
2810 	sc->ba_tid = tid;
2811 	sc->ba_ssn = htole16(ba->ba_winstart);
2812 	sc->ba_winsize = htole16(ba->ba_winsize);
2813 	iwx_add_task(sc, systq, &sc->ba_task);
2814 
2815 	return EBUSY;
2816 }
2817 
2818 /*
2819  * This function is called by upper layer on teardown of an HT-immediate
2820  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2821  */
2822 void
2823 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2824     uint8_t tid)
2825 {
2826 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
2827 
2828 	sc->ba_start = 0;
2829 	sc->ba_tid = tid;
2830 	iwx_add_task(sc, systq, &sc->ba_task);
2831 }
2832 
2833 /* Read the mac address from WFMP registers. */
2834 int
2835 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
2836 {
2837 	const uint8_t *hw_addr;
2838 	uint32_t mac_addr0, mac_addr1;
2839 
2840 	if (!iwx_nic_lock(sc))
2841 		return EBUSY;
2842 
2843 	mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
2844 	mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
2845 
2846 	hw_addr = (const uint8_t *)&mac_addr0;
2847 	data->hw_addr[0] = hw_addr[3];
2848 	data->hw_addr[1] = hw_addr[2];
2849 	data->hw_addr[2] = hw_addr[1];
2850 	data->hw_addr[3] = hw_addr[0];
2851 
2852 	hw_addr = (const uint8_t *)&mac_addr1;
2853 	data->hw_addr[4] = hw_addr[1];
2854 	data->hw_addr[5] = hw_addr[0];
2855 
2856 	iwx_nic_unlock(sc);
2857 	return 0;
2858 }
2859 
2860 int
2861 iwx_is_valid_mac_addr(const uint8_t *addr)
2862 {
2863 	static const uint8_t reserved_mac[] = {
2864 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2865 	};
2866 
2867 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
2868 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
2869 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
2870 	    !ETHER_IS_MULTICAST(addr));
2871 }
2872 
2873 int
2874 iwx_nvm_get(struct iwx_softc *sc)
2875 {
2876 	struct iwx_nvm_get_info cmd = {};
2877 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
2878 	struct iwx_host_cmd hcmd = {
2879 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
2880 		.data = { &cmd, },
2881 		.len = { sizeof(cmd) },
2882 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
2883 		    IWX_NVM_GET_INFO)
2884 	};
2885 	int err;
2886 	uint32_t mac_flags;
2887 	/*
2888 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
2889 	 * in v3, except for the channel profile part of the
2890 	 * regulatory.  So we can just access the new struct, with the
2891 	 * exception of the latter.
2892 	 */
2893 	struct iwx_nvm_get_info_rsp *rsp;
2894 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
2895 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
2896 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
2897 
2898 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
2899 	err = iwx_send_cmd(sc, &hcmd);
2900 	if (err)
2901 		return err;
2902 
2903 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
2904 		err = EIO;
2905 		goto out;
2906 	}
2907 
2908 	memset(nvm, 0, sizeof(*nvm));
2909 
2910 	iwx_set_mac_addr_from_csr(sc, nvm);
2911 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
2912 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
2913 		err = EINVAL;
2914 		goto out;
2915 	}
2916 
2917 	rsp = (void *)hcmd.resp_pkt->data;
2918 
2919 	/* Initialize general data */
2920 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
2921 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
2922 
2923 	/* Initialize MAC sku data */
2924 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
2925 	nvm->sku_cap_11ac_enable =
2926 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
2927 	nvm->sku_cap_11n_enable =
2928 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
2929 	nvm->sku_cap_11ax_enable =
2930 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
2931 	nvm->sku_cap_band_24GHz_enable =
2932 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
2933 	nvm->sku_cap_band_52GHz_enable =
2934 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
2935 	nvm->sku_cap_mimo_disable =
2936 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
2937 
2938 	/* Initialize PHY sku data */
2939 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
2940 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
2941 
2942 	if (le32toh(rsp->regulatory.lar_enabled) &&
2943 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
2944 		nvm->lar_enabled = 1;
2945 	}
2946 
2947 	if (v4) {
2948 		iwx_init_channel_map(sc, NULL,
2949 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
2950 	} else {
2951 		rsp_v3 = (void *)rsp;
2952 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
2953 		    NULL, IWX_NUM_CHANNELS_V1);
2954 	}
2955 out:
2956 	iwx_free_resp(sc, &hcmd);
2957 	return err;
2958 }
2959 
2960 int
2961 iwx_load_firmware(struct iwx_softc *sc)
2962 {
2963 	struct iwx_fw_sects *fws;
2964 	int err, w;
2965 
2966 	sc->sc_uc.uc_intr = 0;
2967 
2968 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
2969 	err = iwx_ctxt_info_init(sc, fws);
2970 	if (err) {
2971 		printf("%s: could not init context info\n", DEVNAME(sc));
2972 		return err;
2973 	}
2974 
2975 	/* wait for the firmware to load */
2976 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2977 		err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", MSEC_TO_NSEC(100));
2978 	}
2979 	if (err || !sc->sc_uc.uc_ok)
2980 		printf("%s: could not load firmware\n", DEVNAME(sc));
2981 
2982 	iwx_ctxt_info_free_fw_img(sc);
2983 
2984 	if (!sc->sc_uc.uc_ok)
2985 		return EINVAL;
2986 
2987 	return err;
2988 }
2989 
2990 int
2991 iwx_start_fw(struct iwx_softc *sc)
2992 {
2993 	int err;
2994 
2995 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2996 
2997 	iwx_disable_interrupts(sc);
2998 
2999 	/* make sure rfkill handshake bits are cleared */
3000 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3001 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3002 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3003 
3004 	/* clear (again), then enable firwmare load interrupt */
3005 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3006 
3007 	err = iwx_nic_init(sc);
3008 	if (err) {
3009 		printf("%s: unable to init nic\n", DEVNAME(sc));
3010 		return err;
3011 	}
3012 
3013 	iwx_enable_fwload_interrupt(sc);
3014 
3015 	return iwx_load_firmware(sc);
3016 }
3017 
3018 int
3019 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3020 {
3021 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3022 		.valid = htole32(valid_tx_ant),
3023 	};
3024 
3025 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3026 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3027 }
3028 
3029 int
3030 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3031 {
3032 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3033 
3034 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3035 	phy_cfg_cmd.calib_control.event_trigger =
3036 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3037 	phy_cfg_cmd.calib_control.flow_trigger =
3038 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3039 
3040 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3041 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3042 }
3043 
3044 int
3045 iwx_send_dqa_cmd(struct iwx_softc *sc)
3046 {
3047 	struct iwx_dqa_enable_cmd dqa_cmd = {
3048 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3049 	};
3050 	uint32_t cmd_id;
3051 
3052 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3053 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3054 }
3055 
3056 int
3057 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3058 {
3059 	int err;
3060 
3061 	err = iwx_read_firmware(sc);
3062 	if (err)
3063 		return err;
3064 
3065 	err = iwx_start_fw(sc);
3066 	if (err)
3067 		return err;
3068 
3069 	iwx_post_alive(sc);
3070 
3071 	return 0;
3072 }
3073 
3074 int
3075 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3076 {
3077 	const int wait_flags = IWX_INIT_COMPLETE;
3078 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3079 	struct iwx_init_extended_cfg_cmd init_cfg = {
3080 		.init_flags = htole32(IWX_INIT_NVM),
3081 	};
3082 	int err;
3083 
3084 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3085 		printf("%s: radio is disabled by hardware switch\n",
3086 		    DEVNAME(sc));
3087 		return EPERM;
3088 	}
3089 
3090 	sc->sc_init_complete = 0;
3091 	err = iwx_load_ucode_wait_alive(sc);
3092 	if (err) {
3093 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3094 		return err;
3095 	}
3096 
3097 	/*
3098 	 * Send init config command to mark that we are sending NVM
3099 	 * access commands
3100 	 */
3101 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3102 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3103 	if (err)
3104 		return err;
3105 
3106 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3107 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3108 	if (err)
3109 		return err;
3110 
3111 	/* Wait for the init complete notification from the firmware. */
3112 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3113 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3114 		    SEC_TO_NSEC(2));
3115 		if (err)
3116 			return err;
3117 	}
3118 
3119 	if (readnvm) {
3120 		err = iwx_nvm_get(sc);
3121 		if (err) {
3122 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3123 			return err;
3124 		}
3125 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3126 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3127 			    sc->sc_nvm.hw_addr);
3128 
3129 	}
3130 	return 0;
3131 }
3132 
3133 int
3134 iwx_config_ltr(struct iwx_softc *sc)
3135 {
3136 	struct iwx_ltr_config_cmd cmd = {
3137 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3138 	};
3139 
3140 	if (!sc->sc_ltr_enabled)
3141 		return 0;
3142 
3143 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3144 }
3145 
3146 void
3147 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3148 {
3149 	struct iwx_rx_data *data = &ring->data[idx];
3150 
3151 	((uint64_t *)ring->desc)[idx] =
3152 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3153 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3154 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3155 	    BUS_DMASYNC_PREWRITE);
3156 }
3157 
3158 int
3159 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3160 {
3161 	struct iwx_rx_ring *ring = &sc->rxq;
3162 	struct iwx_rx_data *data = &ring->data[idx];
3163 	struct mbuf *m;
3164 	int err;
3165 	int fatal = 0;
3166 
3167 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3168 	if (m == NULL)
3169 		return ENOBUFS;
3170 
3171 	if (size <= MCLBYTES) {
3172 		MCLGET(m, M_DONTWAIT);
3173 	} else {
3174 		MCLGETI(m, M_DONTWAIT, NULL, IWX_RBUF_SIZE);
3175 	}
3176 	if ((m->m_flags & M_EXT) == 0) {
3177 		m_freem(m);
3178 		return ENOBUFS;
3179 	}
3180 
3181 	if (data->m != NULL) {
3182 		bus_dmamap_unload(sc->sc_dmat, data->map);
3183 		fatal = 1;
3184 	}
3185 
3186 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3187 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3188 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3189 	if (err) {
3190 		/* XXX */
3191 		if (fatal)
3192 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3193 		m_freem(m);
3194 		return err;
3195 	}
3196 	data->m = m;
3197 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3198 
3199 	/* Update RX descriptor. */
3200 	iwx_update_rx_desc(sc, ring, idx);
3201 
3202 	return 0;
3203 }
3204 
3205 int
3206 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3207     struct iwx_rx_mpdu_desc *desc)
3208 {
3209 	int energy_a, energy_b;
3210 
3211 	energy_a = desc->v1.energy_a;
3212 	energy_b = desc->v1.energy_b;
3213 	energy_a = energy_a ? -energy_a : -256;
3214 	energy_b = energy_b ? -energy_b : -256;
3215 	return MAX(energy_a, energy_b);
3216 }
3217 
3218 void
3219 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3220     struct iwx_rx_data *data)
3221 {
3222 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3223 
3224 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3225 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3226 
3227 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3228 }
3229 
3230 /*
3231  * Retrieve the average noise (in dBm) among receivers.
3232  */
3233 int
3234 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3235 {
3236 	int i, total, nbant, noise;
3237 
3238 	total = nbant = noise = 0;
3239 	for (i = 0; i < 3; i++) {
3240 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3241 		if (noise) {
3242 			total += noise;
3243 			nbant++;
3244 		}
3245 	}
3246 
3247 	/* There should be at least one antenna but check anyway. */
3248 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3249 }
3250 
3251 int
3252 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3253 {
3254 	struct ieee80211com *ic = &sc->sc_ic;
3255 	struct ieee80211_key *k = &ni->ni_pairwise_key;
3256 	struct ieee80211_frame *wh;
3257 	uint64_t pn, *prsc;
3258 	uint8_t *ivp;
3259 	uint8_t tid;
3260 	int hdrlen, hasqos;
3261 
3262 	wh = mtod(m, struct ieee80211_frame *);
3263 	hdrlen = ieee80211_get_hdrlen(wh);
3264 	ivp = (uint8_t *)wh + hdrlen;
3265 
3266 	/* Check that ExtIV bit is be set. */
3267 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3268 		return 1;
3269 
3270 	hasqos = ieee80211_has_qos(wh);
3271 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3272 	prsc = &k->k_rsc[tid];
3273 
3274 	/* Extract the 48-bit PN from the CCMP header. */
3275 	pn = (uint64_t)ivp[0]       |
3276 	     (uint64_t)ivp[1] <<  8 |
3277 	     (uint64_t)ivp[4] << 16 |
3278 	     (uint64_t)ivp[5] << 24 |
3279 	     (uint64_t)ivp[6] << 32 |
3280 	     (uint64_t)ivp[7] << 40;
3281 	if (pn <= *prsc) {
3282 		ic->ic_stats.is_ccmp_replays++;
3283 		return 1;
3284 	}
3285 	/* Last seen packet number is updated in ieee80211_inputm(). */
3286 
3287 	/*
3288 	 * Some firmware versions strip the MIC, and some don't. It is not
3289 	 * clear which of the capability flags could tell us what to expect.
3290 	 * For now, keep things simple and just leave the MIC in place if
3291 	 * it is present.
3292 	 *
3293 	 * The IV will be stripped by ieee80211_inputm().
3294 	 */
3295 	return 0;
3296 }
3297 
3298 void
3299 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3300     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3301     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3302     struct mbuf_list *ml)
3303 {
3304 	struct ieee80211com *ic = &sc->sc_ic;
3305 	struct ieee80211_frame *wh;
3306 	struct ieee80211_node *ni;
3307 	struct ieee80211_channel *bss_chan;
3308 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3309 	struct ifnet *ifp = IC2IFP(ic);
3310 
3311 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3312 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3313 
3314 	wh = mtod(m, struct ieee80211_frame *);
3315 	ni = ieee80211_find_rxnode(ic, wh);
3316 	if (ni == ic->ic_bss) {
3317 		/*
3318 		 * We may switch ic_bss's channel during scans.
3319 		 * Record the current channel so we can restore it later.
3320 		 */
3321 		bss_chan = ni->ni_chan;
3322 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3323 	}
3324 	ni->ni_chan = &ic->ic_channels[chanidx];
3325 
3326 	/* Handle hardware decryption. */
3327 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3328 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3329 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3330 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
3331 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3332 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3333 			ic->ic_stats.is_ccmp_dec_errs++;
3334 			ifp->if_ierrors++;
3335 			m_freem(m);
3336 			ieee80211_release_node(ic, ni);
3337 			return;
3338 		}
3339 		/* Check whether decryption was successful or not. */
3340 		if ((rx_pkt_status &
3341 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3342 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
3343 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3344 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
3345 			ic->ic_stats.is_ccmp_dec_errs++;
3346 			ifp->if_ierrors++;
3347 			m_freem(m);
3348 			ieee80211_release_node(ic, ni);
3349 			return;
3350 		}
3351 		if (iwx_ccmp_decap(sc, m, ni) != 0) {
3352 			ifp->if_ierrors++;
3353 			m_freem(m);
3354 			ieee80211_release_node(ic, ni);
3355 			return;
3356 		}
3357 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
3358 	}
3359 
3360 #if NBPFILTER > 0
3361 	if (sc->sc_drvbpf != NULL) {
3362 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
3363 		uint16_t chan_flags;
3364 
3365 		tap->wr_flags = 0;
3366 		if (is_shortpre)
3367 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3368 		tap->wr_chan_freq =
3369 		    htole16(ic->ic_channels[chanidx].ic_freq);
3370 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3371 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3372 			chan_flags &= ~IEEE80211_CHAN_HT;
3373 		tap->wr_chan_flags = htole16(chan_flags);
3374 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3375 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3376 		tap->wr_tsft = device_timestamp;
3377 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
3378 			uint8_t mcs = (rate_n_flags &
3379 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
3380 			    IWX_RATE_HT_MCS_NSS_MSK));
3381 			tap->wr_rate = (0x80 | mcs);
3382 		} else {
3383 			uint8_t rate = (rate_n_flags &
3384 			    IWX_RATE_LEGACY_RATE_MSK);
3385 			switch (rate) {
3386 			/* CCK rates. */
3387 			case  10: tap->wr_rate =   2; break;
3388 			case  20: tap->wr_rate =   4; break;
3389 			case  55: tap->wr_rate =  11; break;
3390 			case 110: tap->wr_rate =  22; break;
3391 			/* OFDM rates. */
3392 			case 0xd: tap->wr_rate =  12; break;
3393 			case 0xf: tap->wr_rate =  18; break;
3394 			case 0x5: tap->wr_rate =  24; break;
3395 			case 0x7: tap->wr_rate =  36; break;
3396 			case 0x9: tap->wr_rate =  48; break;
3397 			case 0xb: tap->wr_rate =  72; break;
3398 			case 0x1: tap->wr_rate =  96; break;
3399 			case 0x3: tap->wr_rate = 108; break;
3400 			/* Unknown rate: should not happen. */
3401 			default:  tap->wr_rate =   0;
3402 			}
3403 		}
3404 
3405 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
3406 		    m, BPF_DIRECTION_IN);
3407 	}
3408 #endif
3409 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
3410 	/*
3411 	 * ieee80211_inputm() might have changed our BSS.
3412 	 * Restore ic_bss's channel if we are still in the same BSS.
3413 	 */
3414 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3415 		ni->ni_chan = bss_chan;
3416 	ieee80211_release_node(ic, ni);
3417 }
3418 
3419 void
3420 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
3421     size_t maxlen, struct mbuf_list *ml)
3422 {
3423 	struct ieee80211com *ic = &sc->sc_ic;
3424 	struct ieee80211_rxinfo rxi;
3425 	struct iwx_rx_mpdu_desc *desc;
3426 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
3427 	int rssi;
3428 	uint8_t chanidx;
3429 	uint16_t phy_info;
3430 
3431 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
3432 
3433 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
3434 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3435 		m_freem(m);
3436 		return; /* drop */
3437 	}
3438 
3439 	len = le16toh(desc->mpdu_len);
3440 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
3441 		/* Allow control frames in monitor mode. */
3442 		if (len < sizeof(struct ieee80211_frame_cts)) {
3443 			ic->ic_stats.is_rx_tooshort++;
3444 			IC2IFP(ic)->if_ierrors++;
3445 			m_freem(m);
3446 			return;
3447 		}
3448 	} else if (len < sizeof(struct ieee80211_frame)) {
3449 		ic->ic_stats.is_rx_tooshort++;
3450 		IC2IFP(ic)->if_ierrors++;
3451 		m_freem(m);
3452 		return;
3453 	}
3454 	if (len > maxlen - sizeof(*desc)) {
3455 		IC2IFP(ic)->if_ierrors++;
3456 		m_freem(m);
3457 		return;
3458 	}
3459 
3460 	m->m_data = pktdata + sizeof(*desc);
3461 	m->m_pkthdr.len = m->m_len = len;
3462 
3463 	/* Account for padding following the frame header. */
3464 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
3465 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3466 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3467 		if (type == IEEE80211_FC0_TYPE_CTL) {
3468 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
3469 			case IEEE80211_FC0_SUBTYPE_CTS:
3470 				hdrlen = sizeof(struct ieee80211_frame_cts);
3471 				break;
3472 			case IEEE80211_FC0_SUBTYPE_ACK:
3473 				hdrlen = sizeof(struct ieee80211_frame_ack);
3474 				break;
3475 			default:
3476 				hdrlen = sizeof(struct ieee80211_frame_min);
3477 				break;
3478 			}
3479 		} else
3480 			hdrlen = ieee80211_get_hdrlen(wh);
3481 
3482 		if ((le16toh(desc->status) &
3483 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
3484 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3485 			/* Padding is inserted after the IV. */
3486 			hdrlen += IEEE80211_CCMP_HDRLEN;
3487 		}
3488 
3489 		memmove(m->m_data + 2, m->m_data, hdrlen);
3490 		m_adj(m, 2);
3491 	}
3492 
3493 	phy_info = le16toh(desc->phy_info);
3494 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
3495 	chanidx = desc->v1.channel;
3496 	device_timestamp = desc->v1.gp2_on_air_rise;
3497 
3498 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
3499 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
3500 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
3501 
3502 	memset(&rxi, 0, sizeof(rxi));
3503 	rxi.rxi_rssi = rssi;
3504 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
3505 
3506 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
3507 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
3508 	    rate_n_flags, device_timestamp, &rxi, ml);
3509 }
3510 
3511 void
3512 iwx_rx_tx_cmd_single(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3513     struct iwx_node *in)
3514 {
3515 	struct ieee80211com *ic = &sc->sc_ic;
3516 	struct ifnet *ifp = IC2IFP(ic);
3517 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
3518 	int status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
3519 	int txfail;
3520 
3521 	KASSERT(tx_resp->frame_count == 1);
3522 
3523 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
3524 	    status != IWX_TX_STATUS_DIRECT_DONE);
3525 
3526 	if (txfail)
3527 		ifp->if_oerrors++;
3528 }
3529 
3530 void
3531 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
3532 {
3533 	struct ieee80211com *ic = &sc->sc_ic;
3534 
3535 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3536 	    BUS_DMASYNC_POSTWRITE);
3537 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3538 	m_freem(txd->m);
3539 	txd->m = NULL;
3540 
3541 	KASSERT(txd->in);
3542 	ieee80211_release_node(ic, &txd->in->in_ni);
3543 	txd->in = NULL;
3544 }
3545 
3546 void
3547 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3548     struct iwx_rx_data *data)
3549 {
3550 	struct ieee80211com *ic = &sc->sc_ic;
3551 	struct ifnet *ifp = IC2IFP(ic);
3552 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
3553 	int idx = cmd_hdr->idx;
3554 	int qid = cmd_hdr->qid;
3555 	struct iwx_tx_ring *ring = &sc->txq[qid];
3556 	struct iwx_tx_data *txd;
3557 
3558 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
3559 	    BUS_DMASYNC_POSTREAD);
3560 
3561 	sc->sc_tx_timer = 0;
3562 
3563 	txd = &ring->data[idx];
3564 	if (txd->m == NULL)
3565 		return;
3566 
3567 	iwx_rx_tx_cmd_single(sc, pkt, txd->in);
3568 	iwx_txd_done(sc, txd);
3569 	iwx_tx_update_byte_tbl(ring, idx, 0, 0);
3570 
3571 	/*
3572 	 * XXX Sometimes we miss Tx completion interrupts.
3573 	 * We cannot check Tx success/failure for affected frames; just free
3574 	 * the associated mbuf and release the associated node reference.
3575 	 */
3576 	while (ring->tail != idx) {
3577 		txd = &ring->data[ring->tail];
3578 		if (txd->m != NULL) {
3579 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
3580 			    __func__, ring->tail, idx));
3581 			iwx_txd_done(sc, txd);
3582 			iwx_tx_update_byte_tbl(ring, idx, 0, 0);
3583 			ring->queued--;
3584 		}
3585 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
3586 	}
3587 
3588 	if (--ring->queued < IWX_TX_RING_LOMARK) {
3589 		sc->qfullmsk &= ~(1 << ring->qid);
3590 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
3591 			ifq_clr_oactive(&ifp->if_snd);
3592 			/*
3593 			 * Well, we're in interrupt context, but then again
3594 			 * I guess net80211 does all sorts of stunts in
3595 			 * interrupt context, so maybe this is no biggie.
3596 			 */
3597 			(*ifp->if_start)(ifp);
3598 		}
3599 	}
3600 }
3601 
3602 void
3603 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3604     struct iwx_rx_data *data)
3605 {
3606 	struct ieee80211com *ic = &sc->sc_ic;
3607 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
3608 	uint32_t missed;
3609 
3610 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
3611 	    (ic->ic_state != IEEE80211_S_RUN))
3612 		return;
3613 
3614 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3615 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
3616 
3617 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
3618 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
3619 		if (ic->ic_if.if_flags & IFF_DEBUG)
3620 			printf("%s: receiving no beacons from %s; checking if "
3621 			    "this AP is still responding to probe requests\n",
3622 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
3623 		/*
3624 		 * Rather than go directly to scan state, try to send a
3625 		 * directed probe request first. If that fails then the
3626 		 * state machine will drop us into scanning after timing
3627 		 * out waiting for a probe response.
3628 		 */
3629 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3630 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3631 	}
3632 
3633 }
3634 
3635 int
3636 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
3637 {
3638 	struct iwx_binding_cmd cmd;
3639 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
3640 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
3641 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
3642 	uint32_t status;
3643 
3644 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
3645 		panic("binding already added");
3646 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
3647 		panic("binding already removed");
3648 
3649 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
3650 		return EINVAL;
3651 
3652 	memset(&cmd, 0, sizeof(cmd));
3653 
3654 	cmd.id_and_color
3655 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3656 	cmd.action = htole32(action);
3657 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3658 
3659 	cmd.macs[0] = htole32(mac_id);
3660 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
3661 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
3662 
3663 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
3664 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
3665 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
3666 	else
3667 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
3668 
3669 	status = 0;
3670 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
3671 	    &cmd, &status);
3672 	if (err == 0 && status != 0)
3673 		err = EIO;
3674 
3675 	return err;
3676 }
3677 
3678 int
3679 iwx_phy_ctxt_cmd_uhb(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
3680     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3681     uint32_t apply_time)
3682 {
3683 	struct ieee80211com *ic = &sc->sc_ic;
3684 	struct iwx_phy_context_cmd_uhb cmd;
3685 	uint8_t active_cnt, idle_cnt;
3686 	struct ieee80211_channel *chan = ctxt->channel;
3687 
3688 	memset(&cmd, 0, sizeof(cmd));
3689 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
3690 	    ctxt->color));
3691 	cmd.action = htole32(action);
3692 	cmd.apply_time = htole32(apply_time);
3693 
3694 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3695 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
3696 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
3697 	cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
3698 	cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
3699 
3700 	idle_cnt = chains_static;
3701 	active_cnt = chains_dynamic;
3702 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
3703 					IWX_PHY_RX_CHAIN_VALID_POS);
3704 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
3705 	cmd.rxchain_info |= htole32(active_cnt <<
3706 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
3707 	cmd.txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
3708 
3709 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
3710 }
3711 
3712 int
3713 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
3714     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3715     uint32_t apply_time)
3716 {
3717 	struct ieee80211com *ic = &sc->sc_ic;
3718 	struct iwx_phy_context_cmd cmd;
3719 	uint8_t active_cnt, idle_cnt;
3720 	struct ieee80211_channel *chan = ctxt->channel;
3721 
3722 	/*
3723 	 * Intel increased the size of the fw_channel_info struct and neglected
3724 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
3725 	 * member in the middle.
3726 	 * To keep things simple we use a separate function to handle the larger
3727 	 * variant of the phy context command.
3728 	 */
3729 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
3730 		return iwx_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
3731 		    chains_dynamic, action, apply_time);
3732 
3733 	memset(&cmd, 0, sizeof(cmd));
3734 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
3735 	    ctxt->color));
3736 	cmd.action = htole32(action);
3737 	cmd.apply_time = htole32(apply_time);
3738 
3739 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3740 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
3741 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
3742 	cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
3743 	cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
3744 
3745 	idle_cnt = chains_static;
3746 	active_cnt = chains_dynamic;
3747 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
3748 					IWX_PHY_RX_CHAIN_VALID_POS);
3749 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
3750 	cmd.rxchain_info |= htole32(active_cnt <<
3751 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
3752 	cmd.txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
3753 
3754 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
3755 }
3756 
3757 int
3758 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
3759 {
3760 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
3761 	struct iwx_tfh_tfd *desc;
3762 	struct iwx_tx_data *txdata;
3763 	struct iwx_device_cmd *cmd;
3764 	struct mbuf *m;
3765 	bus_addr_t paddr;
3766 	uint64_t addr;
3767 	int err = 0, i, paylen, off, s;
3768 	int idx, code, async, group_id;
3769 	size_t hdrlen, datasz;
3770 	uint8_t *data;
3771 	int generation = sc->sc_generation;
3772 
3773 	code = hcmd->id;
3774 	async = hcmd->flags & IWX_CMD_ASYNC;
3775 	idx = ring->cur;
3776 
3777 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3778 		paylen += hcmd->len[i];
3779 	}
3780 
3781 	/* If this command waits for a response, allocate response buffer. */
3782 	hcmd->resp_pkt = NULL;
3783 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
3784 		uint8_t *resp_buf;
3785 		KASSERT(!async);
3786 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
3787 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
3788 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
3789 			return ENOSPC;
3790 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
3791 		    M_NOWAIT | M_ZERO);
3792 		if (resp_buf == NULL)
3793 			return ENOMEM;
3794 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
3795 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
3796 	} else {
3797 		sc->sc_cmd_resp_pkt[idx] = NULL;
3798 	}
3799 
3800 	s = splnet();
3801 
3802 	desc = &ring->desc[idx];
3803 	txdata = &ring->data[idx];
3804 
3805 	group_id = iwx_cmd_groupid(code);
3806 	if (group_id != 0) {
3807 		hdrlen = sizeof(cmd->hdr_wide);
3808 		datasz = sizeof(cmd->data_wide);
3809 	} else {
3810 		hdrlen = sizeof(cmd->hdr);
3811 		datasz = sizeof(cmd->data);
3812 	}
3813 
3814 	if (paylen > datasz) {
3815 		/* Command is too large to fit in pre-allocated space. */
3816 		size_t totlen = hdrlen + paylen;
3817 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
3818 			printf("%s: firmware command too long (%zd bytes)\n",
3819 			    DEVNAME(sc), totlen);
3820 			err = EINVAL;
3821 			goto out;
3822 		}
3823 		m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
3824 		if (m == NULL) {
3825 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
3826 			    DEVNAME(sc), totlen);
3827 			err = ENOMEM;
3828 			goto out;
3829 		}
3830 		cmd = mtod(m, struct iwx_device_cmd *);
3831 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3832 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3833 		if (err) {
3834 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
3835 			    DEVNAME(sc), totlen);
3836 			m_freem(m);
3837 			goto out;
3838 		}
3839 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
3840 		paddr = txdata->map->dm_segs[0].ds_addr;
3841 	} else {
3842 		cmd = &ring->cmd[idx];
3843 		paddr = txdata->cmd_paddr;
3844 	}
3845 
3846 	if (group_id != 0) {
3847 		cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
3848 		cmd->hdr_wide.group_id = group_id;
3849 		cmd->hdr_wide.qid = ring->qid;
3850 		cmd->hdr_wide.idx = idx;
3851 		cmd->hdr_wide.length = htole16(paylen);
3852 		cmd->hdr_wide.version = iwx_cmd_version(code);
3853 		data = cmd->data_wide;
3854 	} else {
3855 		cmd->hdr.code = code;
3856 		cmd->hdr.flags = 0;
3857 		cmd->hdr.qid = ring->qid;
3858 		cmd->hdr.idx = idx;
3859 		data = cmd->data;
3860 	}
3861 
3862 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3863 		if (hcmd->len[i] == 0)
3864 			continue;
3865 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3866 		off += hcmd->len[i];
3867 	}
3868 	KASSERT(off == paylen);
3869 
3870 	desc->tbs[0].tb_len = htole16(hdrlen + paylen);
3871 	addr = htole64((uint64_t)paddr);
3872 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
3873 	desc->num_tbs = 1;
3874 
3875 	if (paylen > datasz) {
3876 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3877 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3878 	} else {
3879 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3880 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3881 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3882 	}
3883 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3884 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3885 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3886 	/* Kick command ring. */
3887 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
3888 	ring->queued++;
3889 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
3890 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
3891 
3892 	if (!async) {
3893 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
3894 		if (err == 0) {
3895 			/* if hardware is no longer up, return error */
3896 			if (generation != sc->sc_generation) {
3897 				err = ENXIO;
3898 				goto out;
3899 			}
3900 
3901 			/* Response buffer will be freed in iwx_free_resp(). */
3902 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
3903 			sc->sc_cmd_resp_pkt[idx] = NULL;
3904 		} else if (generation == sc->sc_generation) {
3905 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
3906 			    sc->sc_cmd_resp_len[idx]);
3907 			sc->sc_cmd_resp_pkt[idx] = NULL;
3908 		}
3909 	}
3910  out:
3911 	splx(s);
3912 
3913 	return err;
3914 }
3915 
3916 int
3917 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
3918     uint16_t len, const void *data)
3919 {
3920 	struct iwx_host_cmd cmd = {
3921 		.id = id,
3922 		.len = { len, },
3923 		.data = { data, },
3924 		.flags = flags,
3925 	};
3926 
3927 	return iwx_send_cmd(sc, &cmd);
3928 }
3929 
3930 int
3931 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
3932     uint32_t *status)
3933 {
3934 	struct iwx_rx_packet *pkt;
3935 	struct iwx_cmd_response *resp;
3936 	int err, resp_len;
3937 
3938 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
3939 	cmd->flags |= IWX_CMD_WANT_RESP;
3940 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
3941 
3942 	err = iwx_send_cmd(sc, cmd);
3943 	if (err)
3944 		return err;
3945 
3946 	pkt = cmd->resp_pkt;
3947 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
3948 		return EIO;
3949 
3950 	resp_len = iwx_rx_packet_payload_len(pkt);
3951 	if (resp_len != sizeof(*resp)) {
3952 		iwx_free_resp(sc, cmd);
3953 		return EIO;
3954 	}
3955 
3956 	resp = (void *)pkt->data;
3957 	*status = le32toh(resp->status);
3958 	iwx_free_resp(sc, cmd);
3959 	return err;
3960 }
3961 
3962 int
3963 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
3964     const void *data, uint32_t *status)
3965 {
3966 	struct iwx_host_cmd cmd = {
3967 		.id = id,
3968 		.len = { len, },
3969 		.data = { data, },
3970 	};
3971 
3972 	return iwx_send_cmd_status(sc, &cmd, status);
3973 }
3974 
3975 void
3976 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
3977 {
3978 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
3979 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
3980 	hcmd->resp_pkt = NULL;
3981 }
3982 
3983 void
3984 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
3985 {
3986 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
3987 	struct iwx_tx_data *data;
3988 
3989 	if (qid != IWX_DQA_CMD_QUEUE) {
3990 		return;	/* Not a command ack. */
3991 	}
3992 
3993 	data = &ring->data[idx];
3994 
3995 	if (data->m != NULL) {
3996 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3997 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3998 		bus_dmamap_unload(sc->sc_dmat, data->map);
3999 		m_freem(data->m);
4000 		data->m = NULL;
4001 	}
4002 	wakeup(&ring->desc[idx]);
4003 
4004 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
4005 	if (ring->queued == 0) {
4006 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4007 			DEVNAME(sc), code));
4008 	} else if (ring->queued > 0)
4009 		ring->queued--;
4010 }
4011 
4012 /*
4013  * Fill in various bit for management frames, and leave them
4014  * unfilled for data frames (firmware takes care of that).
4015  * Return the selected TX rate.
4016  */
4017 const struct iwx_rate *
4018 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
4019     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
4020 {
4021 	struct ieee80211com *ic = &sc->sc_ic;
4022 	struct ieee80211_node *ni = &in->in_ni;
4023 	struct ieee80211_rateset *rs = &ni->ni_rates;
4024 	const struct iwx_rate *rinfo;
4025 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4026 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
4027 	int ridx, rate_flags;
4028 	uint32_t flags = 0;
4029 
4030 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4031 	    type != IEEE80211_FC0_TYPE_DATA) {
4032 		/* for non-data, use the lowest supported rate */
4033 		ridx = min_ridx;
4034 		flags |= IWX_TX_FLAGS_CMD_RATE;
4035 	} else if (ic->ic_fixed_mcs != -1) {
4036 		ridx = sc->sc_fixed_ridx;
4037 		flags |= IWX_TX_FLAGS_CMD_RATE;
4038 	} else if (ic->ic_fixed_rate != -1) {
4039 		ridx = sc->sc_fixed_ridx;
4040 		flags |= IWX_TX_FLAGS_CMD_RATE;
4041 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
4042 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
4043 	} else {
4044 		uint8_t rval;
4045 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4046 		ridx = iwx_rval2ridx(rval);
4047 		if (ridx < min_ridx)
4048 			ridx = min_ridx;
4049 	}
4050 
4051 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
4052 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
4053 		flags |= IWX_TX_FLAGS_HIGH_PRI;
4054 	tx->flags = htole32(flags);
4055 
4056 	rinfo = &iwx_rates[ridx];
4057 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
4058 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
4059 	else
4060 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
4061 	if (IWX_RIDX_IS_CCK(ridx))
4062 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
4063 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4064 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4065 		rate_flags |= IWX_RATE_MCS_HT_MSK;
4066 		if (ieee80211_node_supports_ht_sgi20(ni))
4067 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
4068 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4069 	} else
4070 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4071 
4072 	return rinfo;
4073 }
4074 
4075 void
4076 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
4077     uint16_t num_tbs)
4078 {
4079 	uint8_t filled_tfd_size, num_fetch_chunks;
4080 	uint16_t len = byte_cnt;
4081 	uint16_t bc_ent;
4082 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
4083 
4084 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
4085 			  num_tbs * sizeof(struct iwx_tfh_tb);
4086 	/*
4087 	 * filled_tfd_size contains the number of filled bytes in the TFD.
4088 	 * Dividing it by 64 will give the number of chunks to fetch
4089 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
4090 	 * If, for example, TFD contains only 3 TBs then 32 bytes
4091 	 * of the TFD are used, and only one chunk of 64 bytes should
4092 	 * be fetched
4093 	 */
4094 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
4095 
4096 	/* Before AX210, the HW expects DW */
4097 	len = howmany(len, 4);
4098 	bc_ent = htole16(len | (num_fetch_chunks << 12));
4099 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
4100 }
4101 
4102 int
4103 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4104 {
4105 	struct ieee80211com *ic = &sc->sc_ic;
4106 	struct iwx_node *in = (void *)ni;
4107 	struct iwx_tx_ring *ring;
4108 	struct iwx_tx_data *data;
4109 	struct iwx_tfh_tfd *desc;
4110 	struct iwx_device_cmd *cmd;
4111 	struct iwx_tx_cmd_gen2 *tx;
4112 	struct ieee80211_frame *wh;
4113 	struct ieee80211_key *k = NULL;
4114 	const struct iwx_rate *rinfo;
4115 	uint64_t paddr;
4116 	u_int hdrlen;
4117 	bus_dma_segment_t *seg;
4118 	uint16_t num_tbs;
4119 	uint8_t type;
4120 	int i, totlen, err, pad;
4121 
4122 	wh = mtod(m, struct ieee80211_frame *);
4123 	hdrlen = ieee80211_get_hdrlen(wh);
4124 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4125 
4126 	/*
4127 	 * Map EDCA categories to Tx data queues.
4128 	 *
4129 	 * We use static data queue assignments even in DQA mode. We do not
4130 	 * need to share Tx queues between stations because we only implement
4131 	 * client mode; the firmware's station table contains only one entry
4132 	 * which represents our access point.
4133 	 *
4134 	 * Tx aggregation will require additional queues (one queue per TID
4135 	 * for which aggregation is enabled) but we do not implement this yet.
4136 	 */
4137 	ring = &sc->txq[ac + IWX_DQA_AUX_QUEUE + 1];
4138 	desc = &ring->desc[ring->cur];
4139 	memset(desc, 0, sizeof(*desc));
4140 	data = &ring->data[ring->cur];
4141 
4142 	cmd = &ring->cmd[ring->cur];
4143 	cmd->hdr.code = IWX_TX_CMD;
4144 	cmd->hdr.flags = 0;
4145 	cmd->hdr.qid = ring->qid;
4146 	cmd->hdr.idx = ring->cur;
4147 
4148 	tx = (void *)cmd->data;
4149 	memset(tx, 0, sizeof(*tx));
4150 
4151 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
4152 
4153 #if NBPFILTER > 0
4154 	if (sc->sc_drvbpf != NULL) {
4155 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
4156 		uint16_t chan_flags;
4157 
4158 		tap->wt_flags = 0;
4159 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4160 		chan_flags = ni->ni_chan->ic_flags;
4161 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4162 			chan_flags &= ~IEEE80211_CHAN_HT;
4163 		tap->wt_chan_flags = htole16(chan_flags);
4164 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4165 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4166 		    type == IEEE80211_FC0_TYPE_DATA &&
4167 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4168 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4169 		} else
4170 			tap->wt_rate = rinfo->rate;
4171 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4172 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4173 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4174 
4175 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4176 		    m, BPF_DIRECTION_OUT);
4177 	}
4178 #endif
4179 
4180 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4181                 k = ieee80211_get_txkey(ic, wh, ni);
4182 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
4183 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4184 				return ENOBUFS;
4185 			/* 802.11 header may have moved. */
4186 			wh = mtod(m, struct ieee80211_frame *);
4187 			tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
4188 		} else {
4189 			k->k_tsc++;
4190 			/* Hardware increments PN internally and adds IV. */
4191 		}
4192 	} else
4193 		tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
4194 
4195 	totlen = m->m_pkthdr.len;
4196 
4197 	if (hdrlen & 3) {
4198 		/* First segment length must be a multiple of 4. */
4199 		pad = 4 - (hdrlen & 3);
4200 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
4201 	} else
4202 		pad = 0;
4203 
4204 	tx->len = htole16(totlen);
4205 
4206 	/* Copy 802.11 header in TX command. */
4207 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4208 
4209 	/* Trim 802.11 header. */
4210 	m_adj(m, hdrlen);
4211 
4212 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4213 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4214 	if (err && err != EFBIG) {
4215 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4216 		m_freem(m);
4217 		return err;
4218 	}
4219 	if (err) {
4220 		/* Too many DMA segments, linearize mbuf. */
4221 		if (m_defrag(m, M_DONTWAIT)) {
4222 			m_freem(m);
4223 			return ENOBUFS;
4224 		}
4225 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4226 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4227 		if (err) {
4228 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4229 			    err);
4230 			m_freem(m);
4231 			return err;
4232 		}
4233 	}
4234 	data->m = m;
4235 	data->in = in;
4236 
4237 	/* Fill TX descriptor. */
4238 	num_tbs = 2 + data->map->dm_nsegs;
4239 	desc->num_tbs = htole16(num_tbs);
4240 
4241 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
4242 	paddr = htole64(data->cmd_paddr);
4243 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
4244 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
4245 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
4246 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
4247 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
4248 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
4249 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
4250 
4251 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
4252 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
4253 
4254 	/* Other DMA segments are for data payload. */
4255 	seg = data->map->dm_segs;
4256 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4257 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
4258 		paddr = htole64(seg->ds_addr);
4259 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
4260 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
4261 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
4262 	}
4263 
4264 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4265 	    BUS_DMASYNC_PREWRITE);
4266 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4267 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4268 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4269 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4270 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4271 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4272 
4273 	iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
4274 
4275 	/* Kick TX ring. */
4276 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
4277 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
4278 
4279 	/* Mark TX ring as full if we reach a certain threshold. */
4280 	if (++ring->queued > IWX_TX_RING_HIMARK) {
4281 		sc->qfullmsk |= 1 << ring->qid;
4282 	}
4283 
4284 	return 0;
4285 }
4286 
4287 int
4288 iwx_flush_tx_path(struct iwx_softc *sc)
4289 {
4290 	struct iwx_tx_path_flush_cmd flush_cmd = {
4291 		.sta_id = htole32(IWX_STATION_ID),
4292 		.tid_mask = htole16(0xffff),
4293 	};
4294 	int err;
4295 
4296 	err = iwx_send_cmd_pdu(sc, IWX_TXPATH_FLUSH, 0,
4297 	    sizeof(flush_cmd), &flush_cmd);
4298 	if (err)
4299                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4300 	return err;
4301 }
4302 
4303 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
4304 
4305 int
4306 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
4307     struct iwx_beacon_filter_cmd *cmd)
4308 {
4309 	size_t len;
4310 
4311 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_BEACON_FILTER_V4))
4312 		len = sizeof(struct iwx_beacon_filter_cmd);
4313 	else
4314 		len = offsetof(struct iwx_beacon_filter_cmd,
4315 		    bf_threshold_absolute_low);
4316 
4317 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
4318 	    0, len, cmd);
4319 }
4320 
4321 int
4322 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
4323 {
4324 	struct iwx_beacon_filter_cmd cmd = {
4325 		IWX_BF_CMD_CONFIG_DEFAULTS,
4326 		.bf_enable_beacon_filter = htole32(1),
4327 		.ba_enable_beacon_abort = htole32(enable),
4328 	};
4329 
4330 	if (!sc->sc_bf.bf_enabled)
4331 		return 0;
4332 
4333 	sc->sc_bf.ba_enabled = enable;
4334 	return iwx_beacon_filter_send_cmd(sc, &cmd);
4335 }
4336 
4337 void
4338 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
4339     struct iwx_mac_power_cmd *cmd)
4340 {
4341 	struct ieee80211com *ic = &sc->sc_ic;
4342 	struct ieee80211_node *ni = &in->in_ni;
4343 	int dtim_period, dtim_msec, keep_alive;
4344 
4345 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
4346 	    in->in_color));
4347 	if (ni->ni_dtimperiod)
4348 		dtim_period = ni->ni_dtimperiod;
4349 	else
4350 		dtim_period = 1;
4351 
4352 	/*
4353 	 * Regardless of power management state the driver must set
4354 	 * keep alive period. FW will use it for sending keep alive NDPs
4355 	 * immediately after association. Check that keep alive period
4356 	 * is at least 3 * DTIM.
4357 	 */
4358 	dtim_msec = dtim_period * ni->ni_intval;
4359 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
4360 	keep_alive = roundup(keep_alive, 1000) / 1000;
4361 	cmd->keep_alive_seconds = htole16(keep_alive);
4362 
4363 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
4364 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4365 }
4366 
4367 int
4368 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
4369 {
4370 	int err;
4371 	int ba_enable;
4372 	struct iwx_mac_power_cmd cmd;
4373 
4374 	memset(&cmd, 0, sizeof(cmd));
4375 
4376 	iwx_power_build_cmd(sc, in, &cmd);
4377 
4378 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
4379 	    sizeof(cmd), &cmd);
4380 	if (err != 0)
4381 		return err;
4382 
4383 	ba_enable = !!(cmd.flags &
4384 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4385 	return iwx_update_beacon_abort(sc, in, ba_enable);
4386 }
4387 
4388 int
4389 iwx_power_update_device(struct iwx_softc *sc)
4390 {
4391 	struct iwx_device_power_cmd cmd = { };
4392 	struct ieee80211com *ic = &sc->sc_ic;
4393 
4394 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
4395 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4396 
4397 	return iwx_send_cmd_pdu(sc,
4398 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4399 }
4400 
4401 int
4402 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
4403 {
4404 	struct iwx_beacon_filter_cmd cmd = {
4405 		IWX_BF_CMD_CONFIG_DEFAULTS,
4406 		.bf_enable_beacon_filter = htole32(1),
4407 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
4408 	};
4409 	int err;
4410 
4411 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
4412 	if (err == 0)
4413 		sc->sc_bf.bf_enabled = 1;
4414 
4415 	return err;
4416 }
4417 
4418 int
4419 iwx_disable_beacon_filter(struct iwx_softc *sc)
4420 {
4421 	struct iwx_beacon_filter_cmd cmd;
4422 	int err;
4423 
4424 	memset(&cmd, 0, sizeof(cmd));
4425 
4426 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
4427 	if (err == 0)
4428 		sc->sc_bf.bf_enabled = 0;
4429 
4430 	return err;
4431 }
4432 
4433 int
4434 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
4435 {
4436 	struct iwx_add_sta_cmd add_sta_cmd;
4437 	int err;
4438 	uint32_t status;
4439 	struct ieee80211com *ic = &sc->sc_ic;
4440 
4441 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
4442 		panic("STA already added");
4443 
4444 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4445 
4446 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4447 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
4448 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
4449 	} else {
4450 		add_sta_cmd.sta_id = IWX_STATION_ID;
4451 		add_sta_cmd.station_type = IWX_STA_LINK;
4452 	}
4453 	add_sta_cmd.mac_id_n_color
4454 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4455 	if (!update) {
4456 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
4457 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4458 			    etheranyaddr);
4459 		else
4460 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4461 			    in->in_ni.ni_bssid);
4462 	}
4463 	add_sta_cmd.add_modify = update ? 1 : 0;
4464 	add_sta_cmd.station_flags_msk
4465 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
4466 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4467 	if (update)
4468 		add_sta_cmd.modify_mask |= (IWX_STA_MODIFY_TID_DISABLE_TX);
4469 
4470 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4471 		add_sta_cmd.station_flags_msk
4472 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
4473 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
4474 
4475 		add_sta_cmd.station_flags
4476 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
4477 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4478 		case IEEE80211_AMPDU_PARAM_SS_2:
4479 			add_sta_cmd.station_flags
4480 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
4481 			break;
4482 		case IEEE80211_AMPDU_PARAM_SS_4:
4483 			add_sta_cmd.station_flags
4484 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
4485 			break;
4486 		case IEEE80211_AMPDU_PARAM_SS_8:
4487 			add_sta_cmd.station_flags
4488 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
4489 			break;
4490 		case IEEE80211_AMPDU_PARAM_SS_16:
4491 			add_sta_cmd.station_flags
4492 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
4493 			break;
4494 		default:
4495 			break;
4496 		}
4497 	}
4498 
4499 	status = IWX_ADD_STA_SUCCESS;
4500 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
4501 	    &add_sta_cmd, &status);
4502 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
4503 		err = EIO;
4504 
4505 	return err;
4506 }
4507 
4508 int
4509 iwx_add_aux_sta(struct iwx_softc *sc)
4510 {
4511 	struct iwx_add_sta_cmd cmd;
4512 	int err, qid = IWX_DQA_AUX_QUEUE;
4513 	uint32_t status;
4514 
4515 	memset(&cmd, 0, sizeof(cmd));
4516 	cmd.sta_id = IWX_AUX_STA_ID;
4517 	cmd.station_type = IWX_STA_AUX_ACTIVITY;
4518 	cmd.mac_id_n_color =
4519 	    htole32(IWX_FW_CMD_ID_AND_COLOR(IWX_MAC_INDEX_AUX, 0));
4520 	cmd.tid_disable_tx = htole16(0xffff);
4521 
4522 	status = IWX_ADD_STA_SUCCESS;
4523 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
4524 	    &status);
4525 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
4526 		return EIO;
4527 
4528 	return iwx_enable_txq(sc, IWX_AUX_STA_ID, qid, IWX_MGMT_TID,
4529 	    IWX_TX_RING_COUNT);
4530 }
4531 
4532 int
4533 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
4534 {
4535 	struct ieee80211com *ic = &sc->sc_ic;
4536 	struct iwx_rm_sta_cmd rm_sta_cmd;
4537 	int err;
4538 
4539 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
4540 		panic("sta already removed");
4541 
4542 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
4543 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
4544 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
4545 	else
4546 		rm_sta_cmd.sta_id = IWX_STATION_ID;
4547 
4548 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
4549 	    &rm_sta_cmd);
4550 
4551 	return err;
4552 }
4553 
4554 uint8_t
4555 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
4556     struct iwx_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
4557 {
4558 	struct ieee80211com *ic = &sc->sc_ic;
4559 	struct ieee80211_channel *c;
4560 	uint8_t nchan;
4561 
4562 	for (nchan = 0, c = &ic->ic_channels[1];
4563 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4564 	    nchan < sc->sc_capa_n_scan_channels;
4565 	    c++) {
4566 		uint8_t channel_num;
4567 
4568 		if (c->ic_flags == 0)
4569 			continue;
4570 
4571 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4572 		if (isset(sc->sc_ucode_api,
4573 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
4574 			chan->v2.channel_num = channel_num;
4575 			if (IEEE80211_IS_CHAN_2GHZ(c))
4576 				chan->v2.band = IWX_PHY_BAND_24;
4577 			else
4578 				chan->v2.band = IWX_PHY_BAND_5;
4579 			chan->v2.iter_count = 1;
4580 			chan->v2.iter_interval = 0;
4581 		} else {
4582 			chan->v1.channel_num = channel_num;
4583 			chan->v1.iter_count = 1;
4584 			chan->v1.iter_interval = htole16(0);
4585 		}
4586 		if (n_ssids != 0 && !bgscan)
4587 			chan->flags = htole32(1 << 0); /* select SSID 0 */
4588 		chan++;
4589 		nchan++;
4590 	}
4591 
4592 	return nchan;
4593 }
4594 
4595 int
4596 iwx_fill_probe_req_v1(struct iwx_softc *sc, struct iwx_scan_probe_req_v1 *preq1)
4597 {
4598 	struct iwx_scan_probe_req preq2;
4599 	int err, i;
4600 
4601 	err = iwx_fill_probe_req(sc, &preq2);
4602 	if (err)
4603 		return err;
4604 
4605 	preq1->mac_header = preq2.mac_header;
4606 	for (i = 0; i < nitems(preq1->band_data); i++)
4607 		preq1->band_data[i] = preq2.band_data[i];
4608 	preq1->common_data = preq2.common_data;
4609 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
4610 	return 0;
4611 }
4612 
4613 int
4614 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
4615 {
4616 	struct ieee80211com *ic = &sc->sc_ic;
4617 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4618 	struct ieee80211_rateset *rs;
4619 	size_t remain = sizeof(preq->buf);
4620 	uint8_t *frm, *pos;
4621 
4622 	memset(preq, 0, sizeof(*preq));
4623 
4624 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4625 		return ENOBUFS;
4626 
4627 	/*
4628 	 * Build a probe request frame.  Most of the following code is a
4629 	 * copy & paste of what is done in net80211.
4630 	 */
4631 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4632 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4633 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4634 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4635 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4636 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4637 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4638 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4639 
4640 	frm = (uint8_t *)(wh + 1);
4641 	*frm++ = IEEE80211_ELEMID_SSID;
4642 	*frm++ = 0;
4643 	/* hardware inserts SSID */
4644 
4645 	/* Tell the firmware where the MAC header is. */
4646 	preq->mac_header.offset = 0;
4647 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4648 	remain -= frm - (uint8_t *)wh;
4649 
4650 	/* Fill in 2GHz IEs and tell firmware where they are. */
4651 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4652 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4653 		if (remain < 4 + rs->rs_nrates)
4654 			return ENOBUFS;
4655 	} else if (remain < 2 + rs->rs_nrates)
4656 		return ENOBUFS;
4657 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4658 	pos = frm;
4659 	frm = ieee80211_add_rates(frm, rs);
4660 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4661 		frm = ieee80211_add_xrates(frm, rs);
4662 	remain -= frm - pos;
4663 
4664 	if (isset(sc->sc_enabled_capa,
4665 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4666 		if (remain < 3)
4667 			return ENOBUFS;
4668 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4669 		*frm++ = 1;
4670 		*frm++ = 0;
4671 		remain -= 3;
4672 	}
4673 	preq->band_data[0].len = htole16(frm - pos);
4674 
4675 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4676 		/* Fill in 5GHz IEs. */
4677 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4678 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4679 			if (remain < 4 + rs->rs_nrates)
4680 				return ENOBUFS;
4681 		} else if (remain < 2 + rs->rs_nrates)
4682 			return ENOBUFS;
4683 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4684 		pos = frm;
4685 		frm = ieee80211_add_rates(frm, rs);
4686 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4687 			frm = ieee80211_add_xrates(frm, rs);
4688 		preq->band_data[1].len = htole16(frm - pos);
4689 		remain -= frm - pos;
4690 	}
4691 
4692 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4693 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4694 	pos = frm;
4695 	if (ic->ic_flags & IEEE80211_F_HTON) {
4696 		if (remain < 28)
4697 			return ENOBUFS;
4698 		frm = ieee80211_add_htcaps(frm, ic);
4699 		/* XXX add WME info? */
4700 	}
4701 	preq->common_data.len = htole16(frm - pos);
4702 
4703 	return 0;
4704 }
4705 
4706 int
4707 iwx_config_umac_scan(struct iwx_softc *sc)
4708 {
4709 	struct ieee80211com *ic = &sc->sc_ic;
4710 	struct iwx_scan_config *scan_config;
4711 	int err, nchan;
4712 	size_t cmd_size;
4713 	struct ieee80211_channel *c;
4714 	struct iwx_host_cmd hcmd = {
4715 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
4716 		.flags = 0,
4717 	};
4718 	static const uint32_t rates = (IWX_SCAN_CONFIG_RATE_1M |
4719 	    IWX_SCAN_CONFIG_RATE_2M | IWX_SCAN_CONFIG_RATE_5M |
4720 	    IWX_SCAN_CONFIG_RATE_11M | IWX_SCAN_CONFIG_RATE_6M |
4721 	    IWX_SCAN_CONFIG_RATE_9M | IWX_SCAN_CONFIG_RATE_12M |
4722 	    IWX_SCAN_CONFIG_RATE_18M | IWX_SCAN_CONFIG_RATE_24M |
4723 	    IWX_SCAN_CONFIG_RATE_36M | IWX_SCAN_CONFIG_RATE_48M |
4724 	    IWX_SCAN_CONFIG_RATE_54M);
4725 
4726 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4727 
4728 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4729 	if (scan_config == NULL)
4730 		return ENOMEM;
4731 
4732 	scan_config->tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
4733 	scan_config->rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
4734 	scan_config->legacy_rates = htole32(rates |
4735 	    IWX_SCAN_CONFIG_SUPPORTED_RATE(rates));
4736 
4737 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4738 	scan_config->dwell.active = 10;
4739 	scan_config->dwell.passive = 110;
4740 	scan_config->dwell.fragmented = 44;
4741 	scan_config->dwell.extended = 90;
4742 	scan_config->out_of_channel_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
4743 	scan_config->out_of_channel_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
4744 	scan_config->suspend_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
4745 	scan_config->suspend_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
4746 
4747 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
4748 
4749 	scan_config->bcast_sta_id = IWX_AUX_STA_ID;
4750 	scan_config->channel_flags = 0;
4751 
4752 	for (c = &ic->ic_channels[1], nchan = 0;
4753 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4754 	    nchan < sc->sc_capa_n_scan_channels; c++) {
4755 		if (c->ic_flags == 0)
4756 			continue;
4757 		scan_config->channel_array[nchan++] =
4758 		    ieee80211_mhz2ieee(c->ic_freq, 0);
4759 	}
4760 
4761 	scan_config->flags = htole32(IWX_SCAN_CONFIG_FLAG_ACTIVATE |
4762 	    IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
4763 	    IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
4764 	    IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
4765 	    IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
4766 	    IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
4767 	    IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
4768 	    IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
4769 	    IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
4770 	    IWX_SCAN_CONFIG_N_CHANNELS(nchan) |
4771 	    IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
4772 
4773 	hcmd.data[0] = scan_config;
4774 	hcmd.len[0] = cmd_size;
4775 
4776 	err = iwx_send_cmd(sc, &hcmd);
4777 	free(scan_config, M_DEVBUF, cmd_size);
4778 	return err;
4779 }
4780 
4781 int
4782 iwx_umac_scan_size(struct iwx_softc *sc)
4783 {
4784 	int base_size = IWX_SCAN_REQ_UMAC_SIZE_V1;
4785 	int tail_size;
4786 
4787 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4788 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V8;
4789 	else if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4790 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V7;
4791 #ifdef notyet
4792 	else if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4793 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V6;
4794 #endif
4795 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
4796 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v2);
4797 	else
4798 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v1);
4799 
4800 	return base_size + sizeof(struct iwx_scan_channel_cfg_umac) *
4801 	    sc->sc_capa_n_scan_channels + tail_size;
4802 }
4803 
4804 struct iwx_scan_umac_chan_param *
4805 iwx_get_scan_req_umac_chan_param(struct iwx_softc *sc,
4806     struct iwx_scan_req_umac *req)
4807 {
4808 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4809 		return &req->v8.channel;
4810 
4811 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4812 		return &req->v7.channel;
4813 #ifdef notyet
4814 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4815 		return &req->v6.channel;
4816 #endif
4817 	return &req->v1.channel;
4818 }
4819 
4820 void *
4821 iwx_get_scan_req_umac_data(struct iwx_softc *sc, struct iwx_scan_req_umac *req)
4822 {
4823 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4824 		return (void *)&req->v8.data;
4825 
4826 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4827 		return (void *)&req->v7.data;
4828 #ifdef notyet
4829 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4830 		return (void *)&req->v6.data;
4831 #endif
4832 	return (void *)&req->v1.data;
4833 
4834 }
4835 
4836 /* adaptive dwell max budget time [TU] for full scan */
4837 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
4838 /* adaptive dwell max budget time [TU] for directed scan */
4839 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
4840 /* adaptive dwell default high band APs number */
4841 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
4842 /* adaptive dwell default low band APs number */
4843 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
4844 /* adaptive dwell default APs number in social channels (1, 6, 11) */
4845 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
4846 
4847 int
4848 iwx_umac_scan(struct iwx_softc *sc, int bgscan)
4849 {
4850 	struct ieee80211com *ic = &sc->sc_ic;
4851 	struct iwx_host_cmd hcmd = {
4852 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
4853 		.len = { 0, },
4854 		.data = { NULL, },
4855 		.flags = 0,
4856 	};
4857 	struct iwx_scan_req_umac *req;
4858 	void *cmd_data, *tail_data;
4859 	struct iwx_scan_req_umac_tail_v2 *tail;
4860 	struct iwx_scan_req_umac_tail_v1 *tailv1;
4861 	struct iwx_scan_umac_chan_param *chanparam;
4862 	size_t req_len;
4863 	int err, async = bgscan;
4864 
4865 	req_len = iwx_umac_scan_size(sc);
4866 	if ((req_len < IWX_SCAN_REQ_UMAC_SIZE_V1 +
4867 	    sizeof(struct iwx_scan_req_umac_tail_v1)) ||
4868 	    req_len > IWX_MAX_CMD_PAYLOAD_SIZE)
4869 		return ERANGE;
4870 	req = malloc(req_len, M_DEVBUF,
4871 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
4872 	if (req == NULL)
4873 		return ENOMEM;
4874 
4875 	hcmd.len[0] = (uint16_t)req_len;
4876 	hcmd.data[0] = (void *)req;
4877 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
4878 
4879 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4880 		req->v7.adwell_default_n_aps_social =
4881 			IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
4882 		req->v7.adwell_default_n_aps =
4883 			IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
4884 
4885 		if (ic->ic_des_esslen != 0)
4886 			req->v7.adwell_max_budget =
4887 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
4888 		else
4889 			req->v7.adwell_max_budget =
4890 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
4891 
4892 		req->v7.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4893 		req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = 0;
4894 		req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = 0;
4895 
4896 		if (isset(sc->sc_ucode_api,
4897 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4898 			req->v8.active_dwell[IWX_SCAN_LB_LMAC_IDX] = 10;
4899 			req->v8.passive_dwell[IWX_SCAN_LB_LMAC_IDX] = 110;
4900 		} else {
4901 			req->v7.active_dwell = 10;
4902 			req->v7.passive_dwell = 110;
4903 			req->v7.fragmented_dwell = 44;
4904 		}
4905 	} else {
4906 		/* These timings correspond to iwlwifi's UNASSOC scan. */
4907 		req->v1.active_dwell = 10;
4908 		req->v1.passive_dwell = 110;
4909 		req->v1.fragmented_dwell = 44;
4910 		req->v1.extended_dwell = 90;
4911 
4912 		req->v1.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4913 	}
4914 
4915 	if (bgscan) {
4916 		const uint32_t timeout = htole32(120);
4917 		if (isset(sc->sc_ucode_api,
4918 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4919 			req->v8.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4920 			req->v8.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4921 		} else if (isset(sc->sc_ucode_api,
4922 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4923 			req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4924 			req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4925 		} else {
4926 			req->v1.max_out_time = timeout;
4927 			req->v1.suspend_time = timeout;
4928 		}
4929 	}
4930 
4931 	req->ooc_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4932 
4933 	cmd_data = iwx_get_scan_req_umac_data(sc, req);
4934 	chanparam = iwx_get_scan_req_umac_chan_param(sc, req);
4935 	chanparam->count = iwx_umac_scan_fill_channels(sc,
4936 	    (struct iwx_scan_channel_cfg_umac *)cmd_data,
4937 	    ic->ic_des_esslen != 0, bgscan);
4938 	chanparam->flags = 0;
4939 
4940 	tail_data = cmd_data + sizeof(struct iwx_scan_channel_cfg_umac) *
4941 	    sc->sc_capa_n_scan_channels;
4942 	tail = tail_data;
4943 	/* tail v1 layout differs in preq and direct_scan member fields. */
4944 	tailv1 = tail_data;
4945 
4946 	req->general_flags = htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
4947 	    IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
4948 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4949 		req->v8.general_flags2 =
4950 			IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
4951 	}
4952 
4953 #if 0 /* XXX Active scan causes firmware errors after association. */
4954 	/* Check if we're doing an active directed scan. */
4955 	if (ic->ic_des_esslen != 0) {
4956 		if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
4957 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4958 			tail->direct_scan[0].len = ic->ic_des_esslen;
4959 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
4960 			    ic->ic_des_esslen);
4961 		} else {
4962 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4963 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
4964 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
4965 			    ic->ic_des_esslen);
4966 		}
4967 		req->general_flags |=
4968 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
4969 	} else
4970 #endif
4971 		req->general_flags |= htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE);
4972 
4973 	if (isset(sc->sc_enabled_capa,
4974 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4975 		req->general_flags |=
4976 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
4977 
4978 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4979 		req->general_flags |=
4980 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
4981 	} else {
4982 		req->general_flags |=
4983 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
4984 	}
4985 
4986 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
4987 		err = iwx_fill_probe_req(sc, &tail->preq);
4988 	else
4989 		err = iwx_fill_probe_req_v1(sc, &tailv1->preq);
4990 	if (err) {
4991 		free(req, M_DEVBUF, req_len);
4992 		return err;
4993 	}
4994 
4995 	/* Specify the scan plan: We'll do one iteration. */
4996 	tail->schedule[0].interval = 0;
4997 	tail->schedule[0].iter_count = 1;
4998 
4999 	err = iwx_send_cmd(sc, &hcmd);
5000 	free(req, M_DEVBUF, req_len);
5001 	return err;
5002 }
5003 
5004 void
5005 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
5006 {
5007 	struct ieee80211com *ic = &sc->sc_ic;
5008 	struct ifnet *ifp = IC2IFP(ic);
5009 	char alpha2[3];
5010 
5011 	snprintf(alpha2, sizeof(alpha2), "%c%c",
5012 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
5013 
5014 	if (ifp->if_flags & IFF_DEBUG) {
5015 		printf("%s: firmware has detected regulatory domain '%s' "
5016 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
5017 	}
5018 
5019 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
5020 }
5021 
5022 uint8_t
5023 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5024 {
5025 	int i;
5026 	uint8_t rval;
5027 
5028 	for (i = 0; i < rs->rs_nrates; i++) {
5029 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5030 		if (rval == iwx_rates[ridx].rate)
5031 			return rs->rs_rates[i];
5032 	}
5033 
5034 	return 0;
5035 }
5036 
5037 int
5038 iwx_rval2ridx(int rval)
5039 {
5040 	int ridx;
5041 
5042 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
5043 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
5044 			continue;
5045 		if (rval == iwx_rates[ridx].rate)
5046 			break;
5047 	}
5048 
5049        return ridx;
5050 }
5051 
5052 void
5053 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
5054     int *ofdm_rates)
5055 {
5056 	struct ieee80211_node *ni = &in->in_ni;
5057 	struct ieee80211_rateset *rs = &ni->ni_rates;
5058 	int lowest_present_ofdm = -1;
5059 	int lowest_present_cck = -1;
5060 	uint8_t cck = 0;
5061 	uint8_t ofdm = 0;
5062 	int i;
5063 
5064 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5065 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5066 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
5067 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5068 				continue;
5069 			cck |= (1 << i);
5070 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5071 				lowest_present_cck = i;
5072 		}
5073 	}
5074 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
5075 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5076 			continue;
5077 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
5078 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5079 			lowest_present_ofdm = i;
5080 	}
5081 
5082 	/*
5083 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5084 	 * variables. This isn't sufficient though, as there might not
5085 	 * be all the right rates in the bitmap. E.g. if the only basic
5086 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5087 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5088 	 *
5089 	 *    [...] a STA responding to a received frame shall transmit
5090 	 *    its Control Response frame [...] at the highest rate in the
5091 	 *    BSSBasicRateSet parameter that is less than or equal to the
5092 	 *    rate of the immediately previous frame in the frame exchange
5093 	 *    sequence ([...]) and that is of the same modulation class
5094 	 *    ([...]) as the received frame. If no rate contained in the
5095 	 *    BSSBasicRateSet parameter meets these conditions, then the
5096 	 *    control frame sent in response to a received frame shall be
5097 	 *    transmitted at the highest mandatory rate of the PHY that is
5098 	 *    less than or equal to the rate of the received frame, and
5099 	 *    that is of the same modulation class as the received frame.
5100 	 *
5101 	 * As a consequence, we need to add all mandatory rates that are
5102 	 * lower than all of the basic rates to these bitmaps.
5103 	 */
5104 
5105 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
5106 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
5107 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
5108 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
5109 	/* 6M already there or needed so always add */
5110 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
5111 
5112 	/*
5113 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5114 	 * Note, however:
5115 	 *  - if no CCK rates are basic, it must be ERP since there must
5116 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5117 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5118 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5119 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5120 	 *  - if 2M is basic, 1M is mandatory
5121 	 *  - if 1M is basic, that's the only valid ACK rate.
5122 	 * As a consequence, it's not as complicated as it sounds, just add
5123 	 * any lower rates to the ACK rate bitmap.
5124 	 */
5125 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
5126 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
5127 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
5128 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
5129 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
5130 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
5131 	/* 1M already there or needed so always add */
5132 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
5133 
5134 	*cck_rates = cck;
5135 	*ofdm_rates = ofdm;
5136 }
5137 
5138 void
5139 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
5140     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
5141 {
5142 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5143 	struct ieee80211com *ic = &sc->sc_ic;
5144 	struct ieee80211_node *ni = ic->ic_bss;
5145 	int cck_ack_rates, ofdm_ack_rates;
5146 	int i;
5147 
5148 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5149 	    in->in_color));
5150 	cmd->action = htole32(action);
5151 
5152 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5153 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
5154 	else if (ic->ic_opmode == IEEE80211_M_STA)
5155 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
5156 	else
5157 		panic("unsupported operating mode %d\n", ic->ic_opmode);
5158 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
5159 
5160 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5161 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5162 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
5163 		return;
5164 	}
5165 
5166 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5167 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5168 	cmd->cck_rates = htole32(cck_ack_rates);
5169 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5170 
5171 	cmd->cck_short_preamble
5172 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5173 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
5174 	cmd->short_slot
5175 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5176 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
5177 
5178 	for (i = 0; i < EDCA_NUM_AC; i++) {
5179 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
5180 		int txf = iwx_ac_to_tx_fifo[i];
5181 
5182 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
5183 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
5184 		cmd->ac[txf].aifsn = ac->ac_aifsn;
5185 		cmd->ac[txf].fifos_mask = (1 << txf);
5186 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
5187 	}
5188 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5189 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
5190 
5191 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5192 		enum ieee80211_htprot htprot =
5193 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5194 		switch (htprot) {
5195 		case IEEE80211_HTPROT_NONE:
5196 			break;
5197 		case IEEE80211_HTPROT_NONMEMBER:
5198 		case IEEE80211_HTPROT_NONHT_MIXED:
5199 			cmd->protection_flags |=
5200 			    htole32(IWX_MAC_PROT_FLG_HT_PROT);
5201 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5202 				cmd->protection_flags |=
5203 				    htole32(IWX_MAC_PROT_FLG_SELF_CTS_EN);
5204 			break;
5205 		case IEEE80211_HTPROT_20MHZ:
5206 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
5207 				/* XXX ... and if our channel is 40 MHz ... */
5208 				cmd->protection_flags |=
5209 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
5210 				    IWX_MAC_PROT_FLG_FAT_PROT);
5211 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5212 					cmd->protection_flags |= htole32(
5213 					    IWX_MAC_PROT_FLG_SELF_CTS_EN);
5214 			}
5215 			break;
5216 		default:
5217 			break;
5218 		}
5219 
5220 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
5221 	}
5222 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5223 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
5224 
5225 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
5226 #undef IWX_EXP2
5227 }
5228 
5229 void
5230 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
5231     struct iwx_mac_data_sta *sta, int assoc)
5232 {
5233 	struct ieee80211_node *ni = &in->in_ni;
5234 	uint32_t dtim_off;
5235 	uint64_t tsf;
5236 
5237 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
5238 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
5239 	tsf = letoh64(tsf);
5240 
5241 	sta->is_assoc = htole32(assoc);
5242 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5243 	sta->dtim_tsf = htole64(tsf + dtim_off);
5244 	sta->bi = htole32(ni->ni_intval);
5245 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
5246 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
5247 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
5248 	sta->listen_interval = htole32(10);
5249 	sta->assoc_id = htole32(ni->ni_associd);
5250 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5251 }
5252 
5253 int
5254 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
5255     int assoc)
5256 {
5257 	struct ieee80211com *ic = &sc->sc_ic;
5258 	struct ieee80211_node *ni = &in->in_ni;
5259 	struct iwx_mac_ctx_cmd cmd;
5260 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
5261 
5262 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5263 		panic("MAC already added");
5264 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5265 		panic("MAC already removed");
5266 
5267 	memset(&cmd, 0, sizeof(cmd));
5268 
5269 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
5270 
5271 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5272 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
5273 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
5274 		    IWX_MAC_FILTER_ACCEPT_GRP |
5275 		    IWX_MAC_FILTER_IN_BEACON |
5276 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
5277 		    IWX_MAC_FILTER_IN_CRC32);
5278 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
5279 		/*
5280 		 * Allow beacons to pass through as long as we are not
5281 		 * associated or we do not have dtim period information.
5282 		 */
5283 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
5284 	else
5285 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5286 
5287 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5288 }
5289 
5290 int
5291 iwx_clear_statistics(struct iwx_softc *sc)
5292 {
5293 	struct iwx_statistics_cmd scmd = {
5294 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
5295 	};
5296 	struct iwx_host_cmd cmd = {
5297 		.id = IWX_STATISTICS_CMD,
5298 		.len[0] = sizeof(scmd),
5299 		.data[0] = &scmd,
5300 		.flags = IWX_CMD_WANT_RESP,
5301 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
5302 	};
5303 	int err;
5304 
5305 	err = iwx_send_cmd(sc, &cmd);
5306 	if (err)
5307 		return err;
5308 
5309 	iwx_free_resp(sc, &cmd);
5310 	return 0;
5311 }
5312 
5313 int
5314 iwx_update_quotas(struct iwx_softc *sc, struct iwx_node *in, int running)
5315 {
5316 	struct iwx_time_quota_cmd cmd;
5317 	int i, idx, num_active_macs, quota, quota_rem;
5318 	int colors[IWX_MAX_BINDINGS] = { -1, -1, -1, -1, };
5319 	int n_ifs[IWX_MAX_BINDINGS] = {0, };
5320 	uint16_t id;
5321 
5322 	memset(&cmd, 0, sizeof(cmd));
5323 
5324 	/* currently, PHY ID == binding ID */
5325 	if (in && in->in_phyctxt) {
5326 		id = in->in_phyctxt->id;
5327 		KASSERT(id < IWX_MAX_BINDINGS);
5328 		colors[id] = in->in_phyctxt->color;
5329 		if (running)
5330 			n_ifs[id] = 1;
5331 	}
5332 
5333 	/*
5334 	 * The FW's scheduling session consists of
5335 	 * IWX_MAX_QUOTA fragments. Divide these fragments
5336 	 * equally between all the bindings that require quota
5337 	 */
5338 	num_active_macs = 0;
5339 	for (i = 0; i < IWX_MAX_BINDINGS; i++) {
5340 		cmd.quotas[i].id_and_color = htole32(IWX_FW_CTXT_INVALID);
5341 		num_active_macs += n_ifs[i];
5342 	}
5343 
5344 	quota = 0;
5345 	quota_rem = 0;
5346 	if (num_active_macs) {
5347 		quota = IWX_MAX_QUOTA / num_active_macs;
5348 		quota_rem = IWX_MAX_QUOTA % num_active_macs;
5349 	}
5350 
5351 	for (idx = 0, i = 0; i < IWX_MAX_BINDINGS; i++) {
5352 		if (colors[i] < 0)
5353 			continue;
5354 
5355 		cmd.quotas[idx].id_and_color =
5356 			htole32(IWX_FW_CMD_ID_AND_COLOR(i, colors[i]));
5357 
5358 		if (n_ifs[i] <= 0) {
5359 			cmd.quotas[idx].quota = htole32(0);
5360 			cmd.quotas[idx].max_duration = htole32(0);
5361 		} else {
5362 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5363 			cmd.quotas[idx].max_duration = htole32(0);
5364 		}
5365 		idx++;
5366 	}
5367 
5368 	/* Give the remainder of the session to the first binding */
5369 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5370 
5371 	return iwx_send_cmd_pdu(sc, IWX_TIME_QUOTA_CMD, 0,
5372 	    sizeof(cmd), &cmd);
5373 }
5374 
5375 void
5376 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
5377 {
5378 	int s = splnet();
5379 
5380 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
5381 		splx(s);
5382 		return;
5383 	}
5384 
5385 	refcnt_take(&sc->task_refs);
5386 	if (!task_add(taskq, task))
5387 		refcnt_rele_wake(&sc->task_refs);
5388 	splx(s);
5389 }
5390 
5391 void
5392 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
5393 {
5394 	if (task_del(taskq, task))
5395 		refcnt_rele(&sc->task_refs);
5396 }
5397 
5398 int
5399 iwx_scan(struct iwx_softc *sc)
5400 {
5401 	struct ieee80211com *ic = &sc->sc_ic;
5402 	struct ifnet *ifp = IC2IFP(ic);
5403 	int err;
5404 
5405 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
5406 		err = iwx_scan_abort(sc);
5407 		if (err) {
5408 			printf("%s: could not abort background scan\n",
5409 			    DEVNAME(sc));
5410 			return err;
5411 		}
5412 	}
5413 
5414 	err = iwx_umac_scan(sc, 0);
5415 	if (err) {
5416 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5417 		return err;
5418 	}
5419 
5420 	/*
5421 	 * The current mode might have been fixed during association.
5422 	 * Ensure all channels get scanned.
5423 	 */
5424 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
5425 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
5426 
5427 	sc->sc_flags |= IWX_FLAG_SCANNING;
5428 	if (ifp->if_flags & IFF_DEBUG)
5429 		printf("%s: %s -> %s\n", ifp->if_xname,
5430 		    ieee80211_state_name[ic->ic_state],
5431 		    ieee80211_state_name[IEEE80211_S_SCAN]);
5432 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
5433 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
5434 		ieee80211_node_cleanup(ic, ic->ic_bss);
5435 	}
5436 	ic->ic_state = IEEE80211_S_SCAN;
5437 	wakeup(&ic->ic_state); /* wake iwx_init() */
5438 
5439 	return 0;
5440 }
5441 
5442 int
5443 iwx_bgscan(struct ieee80211com *ic)
5444 {
5445 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
5446 	int err;
5447 
5448 	if (sc->sc_flags & IWX_FLAG_SCANNING)
5449 		return 0;
5450 
5451 	err = iwx_umac_scan(sc, 1);
5452 	if (err) {
5453 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5454 		return err;
5455 	}
5456 
5457 	sc->sc_flags |= IWX_FLAG_BGSCAN;
5458 	return 0;
5459 }
5460 
5461 int
5462 iwx_umac_scan_abort(struct iwx_softc *sc)
5463 {
5464 	struct iwx_umac_scan_abort cmd = { 0 };
5465 
5466 	return iwx_send_cmd_pdu(sc,
5467 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
5468 	    0, sizeof(cmd), &cmd);
5469 }
5470 
5471 int
5472 iwx_scan_abort(struct iwx_softc *sc)
5473 {
5474 	int err;
5475 
5476 	err = iwx_umac_scan_abort(sc);
5477 	if (err == 0)
5478 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
5479 	return err;
5480 }
5481 
5482 int
5483 iwx_enable_data_tx_queues(struct iwx_softc *sc)
5484 {
5485 	int err, ac;
5486 
5487 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5488 		int qid = ac + IWX_DQA_AUX_QUEUE + 1;
5489 		/*
5490 		 * Regular data frames use the "MGMT" TID and queue.
5491 		 * Other TIDs and queues are reserved for frame aggregation.
5492 		 */
5493 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, IWX_TID_NON_QOS,
5494 		    IWX_TX_RING_COUNT);
5495 		if (err) {
5496 			printf("%s: could not enable Tx queue %d (error %d)\n",
5497 			    DEVNAME(sc), ac, err);
5498 			return err;
5499 		}
5500 	}
5501 
5502 	return 0;
5503 }
5504 
5505 int
5506 iwx_rs_rval2idx(uint8_t rval)
5507 {
5508 	/* Firmware expects indices which match our 11g rate set. */
5509 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
5510 	int i;
5511 
5512 	for (i = 0; i < rs->rs_nrates; i++) {
5513 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5514 			return i;
5515 	}
5516 
5517 	return -1;
5518 }
5519 
5520 uint16_t
5521 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
5522 {
5523 	struct ieee80211com *ic = &sc->sc_ic;
5524 	const struct ieee80211_ht_rateset *rs;
5525 	uint16_t htrates = 0;
5526 	int mcs;
5527 
5528 	rs = &ieee80211_std_ratesets_11n[rsidx];
5529 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
5530 		if (!isset(ni->ni_rxmcs, mcs) ||
5531 		    !isset(ic->ic_sup_mcs, mcs))
5532 			continue;
5533 		htrates |= (1 << (mcs - rs->min_mcs));
5534 	}
5535 
5536 	return htrates;
5537 }
5538 
5539 int
5540 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
5541 {
5542 	struct ieee80211_node *ni = &in->in_ni;
5543 	struct ieee80211_rateset *rs = &ni->ni_rates;
5544 	struct iwx_tlc_config_cmd cfg_cmd;
5545 	uint32_t cmd_id;
5546 	int i;
5547 
5548 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
5549 
5550 	for (i = 0; i < rs->rs_nrates; i++) {
5551 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
5552 		int idx = iwx_rs_rval2idx(rval);
5553 		if (idx == -1)
5554 			return EINVAL;
5555 		cfg_cmd.non_ht_rates |= (1 << idx);
5556 	}
5557 
5558 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5559 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
5560 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
5561 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_SISO);
5562 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
5563 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_MIMO2);
5564 	} else
5565 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
5566 
5567 	cfg_cmd.sta_id = IWX_STATION_ID;
5568 	cfg_cmd.max_ch_width = IWX_RATE_MCS_CHAN_WIDTH_20;
5569 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
5570 	cfg_cmd.max_mpdu_len = IEEE80211_MAX_LEN;
5571 	if (ieee80211_node_supports_ht_sgi20(ni))
5572 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
5573 
5574 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
5575 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, sizeof(cfg_cmd),
5576 	    &cfg_cmd);
5577 }
5578 
5579 void
5580 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
5581 {
5582 	struct ieee80211com *ic = &sc->sc_ic;
5583 	struct ieee80211_node *ni = ic->ic_bss;
5584 	struct ieee80211_rateset *rs = &ni->ni_rates;
5585 	uint32_t rate_n_flags;
5586 	int i;
5587 
5588 	if (notif->sta_id != IWX_STATION_ID ||
5589 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
5590 		return;
5591 
5592 	rate_n_flags = le32toh(notif->rate);
5593 	if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
5594 		ni->ni_txmcs = (rate_n_flags &
5595 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
5596 		    IWX_RATE_HT_MCS_NSS_MSK));
5597 	} else {
5598 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
5599 		uint8_t rval = 0;
5600 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
5601 			if (iwx_rates[i].plcp == plcp) {
5602 				rval = iwx_rates[i].rate;
5603 				break;
5604 			}
5605 		}
5606 		if (rval) {
5607 			uint8_t rv;
5608 			for (i = 0; i < rs->rs_nrates; i++) {
5609 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
5610 				if (rv == rval) {
5611 					ni->ni_txrate = i;
5612 					break;
5613 				}
5614 			}
5615 		}
5616 	}
5617 }
5618 
5619 int
5620 iwx_auth(struct iwx_softc *sc)
5621 {
5622 	struct ieee80211com *ic = &sc->sc_ic;
5623 	struct iwx_node *in = (void *)ic->ic_bss;
5624 	uint32_t duration;
5625 	int generation = sc->sc_generation, err;
5626 
5627 	splassert(IPL_NET);
5628 
5629 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5630 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
5631 	else
5632 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5633 	err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5634 	    IWX_FW_CTXT_ACTION_MODIFY, 0);
5635 	if (err) {
5636 		printf("%s: could not update PHY context (error %d)\n",
5637 		    DEVNAME(sc), err);
5638 		return err;
5639 	}
5640 	in->in_phyctxt = &sc->sc_phyctxt[0];
5641 
5642 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
5643 	if (err) {
5644 		printf("%s: could not add MAC context (error %d)\n",
5645 		    DEVNAME(sc), err);
5646 		return err;
5647  	}
5648 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
5649 
5650 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
5651 	if (err) {
5652 		printf("%s: could not add binding (error %d)\n",
5653 		    DEVNAME(sc), err);
5654 		goto rm_mac_ctxt;
5655 	}
5656 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
5657 
5658 	err = iwx_add_sta_cmd(sc, in, 0);
5659 	if (err) {
5660 		printf("%s: could not add sta (error %d)\n",
5661 		    DEVNAME(sc), err);
5662 		goto rm_binding;
5663 	}
5664 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
5665 
5666 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5667 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
5668 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
5669 		    IWX_TX_RING_COUNT);
5670 		if (err)
5671 			goto rm_sta;
5672 		return 0;
5673 	}
5674 
5675 	err = iwx_enable_data_tx_queues(sc);
5676 	if (err)
5677 		goto rm_sta;
5678 
5679 	err = iwx_clear_statistics(sc);
5680 	if (err)
5681 		goto rm_sta;
5682 
5683 	/*
5684 	 * Prevent the FW from wandering off channel during association
5685 	 * by "protecting" the session with a time event.
5686 	 */
5687 	if (in->in_ni.ni_intval)
5688 		duration = in->in_ni.ni_intval * 2;
5689 	else
5690 		duration = IEEE80211_DUR_TU;
5691 	iwx_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5692 
5693 	return 0;
5694 
5695 rm_sta:
5696 	if (generation == sc->sc_generation) {
5697 		iwx_rm_sta_cmd(sc, in);
5698 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5699 	}
5700 rm_binding:
5701 	if (generation == sc->sc_generation) {
5702 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
5703 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
5704 	}
5705 rm_mac_ctxt:
5706 	if (generation == sc->sc_generation) {
5707 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
5708 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
5709 	}
5710 	return err;
5711 }
5712 
5713 int
5714 iwx_deauth(struct iwx_softc *sc)
5715 {
5716 	struct ieee80211com *ic = &sc->sc_ic;
5717 	struct iwx_node *in = (void *)ic->ic_bss;
5718 	int err;
5719 
5720 	splassert(IPL_NET);
5721 
5722 	iwx_unprotect_session(sc, in);
5723 
5724 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
5725 		err = iwx_flush_tx_path(sc);
5726 		if (err) {
5727 			printf("%s: could not flush Tx path (error %d)\n",
5728 			    DEVNAME(sc), err);
5729 			return err;
5730 		}
5731 		err = iwx_rm_sta_cmd(sc, in);
5732 		if (err) {
5733 			printf("%s: could not remove STA (error %d)\n",
5734 			    DEVNAME(sc), err);
5735 			return err;
5736 		}
5737 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5738 		sc->sc_rx_ba_sessions = 0;
5739 	}
5740 
5741 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
5742 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
5743 		if (err) {
5744 			printf("%s: could not remove binding (error %d)\n",
5745 			    DEVNAME(sc), err);
5746 			return err;
5747 		}
5748 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
5749 	}
5750 
5751 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
5752 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
5753 		if (err) {
5754 			printf("%s: could not remove MAC context (error %d)\n",
5755 			    DEVNAME(sc), err);
5756 			return err;
5757 		}
5758 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
5759 	}
5760 
5761 	return 0;
5762 }
5763 
5764 int
5765 iwx_assoc(struct iwx_softc *sc)
5766 {
5767 	struct ieee80211com *ic = &sc->sc_ic;
5768 	struct iwx_node *in = (void *)ic->ic_bss;
5769 	int update_sta = (sc->sc_flags & IWX_FLAG_STA_ACTIVE);
5770 	int err;
5771 
5772 	splassert(IPL_NET);
5773 
5774 	err = iwx_add_sta_cmd(sc, in, update_sta);
5775 	if (err) {
5776 		printf("%s: could not %s STA (error %d)\n",
5777 		    DEVNAME(sc), update_sta ? "update" : "add", err);
5778 		return err;
5779 	}
5780 
5781 	if (!update_sta)
5782 		err = iwx_enable_data_tx_queues(sc);
5783 
5784 	return err;
5785 }
5786 
5787 int
5788 iwx_disassoc(struct iwx_softc *sc)
5789 {
5790 	struct ieee80211com *ic = &sc->sc_ic;
5791 	struct iwx_node *in = (void *)ic->ic_bss;
5792 	int err;
5793 
5794 	splassert(IPL_NET);
5795 
5796 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
5797 		err = iwx_rm_sta_cmd(sc, in);
5798 		if (err) {
5799 			printf("%s: could not remove STA (error %d)\n",
5800 			    DEVNAME(sc), err);
5801 			return err;
5802 		}
5803 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5804 		sc->sc_rx_ba_sessions = 0;
5805 	}
5806 
5807 	return 0;
5808 }
5809 
5810 int
5811 iwx_run(struct iwx_softc *sc)
5812 {
5813 	struct ieee80211com *ic = &sc->sc_ic;
5814 	struct iwx_node *in = (void *)ic->ic_bss;
5815 	int err;
5816 
5817 	splassert(IPL_NET);
5818 
5819 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5820 		/* Add a MAC context and a sniffing STA. */
5821 		err = iwx_auth(sc);
5822 		if (err)
5823 			return err;
5824 	}
5825 
5826 	/* Configure Rx chains for MIMO. */
5827 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
5828 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
5829 	    iwx_mimo_enabled(sc)) {
5830 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
5831 		    2, 2, IWX_FW_CTXT_ACTION_MODIFY, 0);
5832 		if (err) {
5833 			printf("%s: failed to update PHY\n",
5834 			    DEVNAME(sc));
5835 			return err;
5836 		}
5837 	}
5838 
5839 	/* We have now been assigned an associd by the AP. */
5840 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
5841 	if (err) {
5842 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5843 		return err;
5844 	}
5845 
5846 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
5847 	if (err) {
5848 		printf("%s: could not set sf full on (error %d)\n",
5849 		    DEVNAME(sc), err);
5850 		return err;
5851 	}
5852 
5853 	err = iwx_allow_mcast(sc);
5854 	if (err) {
5855 		printf("%s: could not allow mcast (error %d)\n",
5856 		    DEVNAME(sc), err);
5857 		return err;
5858 	}
5859 
5860 	err = iwx_power_update_device(sc);
5861 	if (err) {
5862 		printf("%s: could not send power command (error %d)\n",
5863 		    DEVNAME(sc), err);
5864 		return err;
5865 	}
5866 #ifdef notyet
5867 	/*
5868 	 * Disabled for now. Default beacon filter settings
5869 	 * prevent net80211 from getting ERP and HT protection
5870 	 * updates from beacons.
5871 	 */
5872 	err = iwx_enable_beacon_filter(sc, in);
5873 	if (err) {
5874 		printf("%s: could not enable beacon filter\n",
5875 		    DEVNAME(sc));
5876 		return err;
5877 	}
5878 #endif
5879 	err = iwx_power_mac_update_mode(sc, in);
5880 	if (err) {
5881 		printf("%s: could not update MAC power (error %d)\n",
5882 		    DEVNAME(sc), err);
5883 		return err;
5884 	}
5885 
5886 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
5887 		err = iwx_update_quotas(sc, in, 1);
5888 		if (err) {
5889 			printf("%s: could not update quotas (error %d)\n",
5890 			    DEVNAME(sc), err);
5891 			return err;
5892 		}
5893 	}
5894 
5895 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5896 		return 0;
5897 
5898 	/* Start at lowest available bit-rate. Firmware will raise. */
5899 	in->in_ni.ni_txrate = 0;
5900 	in->in_ni.ni_txmcs = 0;
5901 
5902 	err = iwx_rs_init(sc, in);
5903 	if (err) {
5904 		printf("%s: could not init rate scaling (error %d)\n",
5905 		    DEVNAME(sc), err);
5906 		return err;
5907 	}
5908 
5909 	return 0;
5910 }
5911 
5912 int
5913 iwx_run_stop(struct iwx_softc *sc)
5914 {
5915 	struct ieee80211com *ic = &sc->sc_ic;
5916 	struct iwx_node *in = (void *)ic->ic_bss;
5917 	int err;
5918 
5919 	splassert(IPL_NET);
5920 
5921 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
5922 	if (err)
5923 		return err;
5924 
5925 	err = iwx_disable_beacon_filter(sc);
5926 	if (err) {
5927 		printf("%s: could not disable beacon filter (error %d)\n",
5928 		    DEVNAME(sc), err);
5929 		return err;
5930 	}
5931 
5932 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
5933 		err = iwx_update_quotas(sc, in, 0);
5934 		if (err) {
5935 			printf("%s: could not update quotas (error %d)\n",
5936 			    DEVNAME(sc), err);
5937 			return err;
5938 		}
5939 	}
5940 
5941 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
5942 	if (err) {
5943 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5944 		return err;
5945 	}
5946 
5947 	/* Reset Tx chains in case MIMO was enabled. */
5948 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
5949 	    iwx_mimo_enabled(sc)) {
5950 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5951 		    IWX_FW_CTXT_ACTION_MODIFY, 0);
5952 		if (err) {
5953 			printf("%s: failed to update PHY\n", DEVNAME(sc));
5954 			return err;
5955 		}
5956 	}
5957 
5958 	return 0;
5959 }
5960 
5961 struct ieee80211_node *
5962 iwx_node_alloc(struct ieee80211com *ic)
5963 {
5964 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5965 }
5966 
5967 int
5968 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5969     struct ieee80211_key *k)
5970 {
5971 	struct iwx_softc *sc = ic->ic_softc;
5972 	struct iwx_add_sta_key_cmd cmd;
5973 
5974 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5975 		/* Fallback to software crypto for other ciphers. */
5976 		return (ieee80211_set_key(ic, ni, k));
5977 	}
5978 
5979 	memset(&cmd, 0, sizeof(cmd));
5980 
5981 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
5982 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
5983 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
5984 	    IWX_STA_KEY_FLG_KEYID_MSK));
5985 	if (k->k_flags & IEEE80211_KEY_GROUP) {
5986 		cmd.common.key_offset = 1;
5987 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
5988 	} else
5989 		cmd.common.key_offset = 0;
5990 
5991 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
5992 	cmd.common.sta_id = IWX_STATION_ID;
5993 
5994 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
5995 
5996 	return iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC,
5997 	    sizeof(cmd), &cmd);
5998 }
5999 
6000 void
6001 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6002     struct ieee80211_key *k)
6003 {
6004 	struct iwx_softc *sc = ic->ic_softc;
6005 	struct iwx_add_sta_key_cmd cmd;
6006 
6007 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6008 		/* Fallback to software crypto for other ciphers. */
6009                 ieee80211_delete_key(ic, ni, k);
6010 		return;
6011 	}
6012 
6013 	memset(&cmd, 0, sizeof(cmd));
6014 
6015 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
6016 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
6017 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
6018 	    IWX_STA_KEY_FLG_KEYID_MSK));
6019 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6020 	if (k->k_flags & IEEE80211_KEY_GROUP)
6021 		cmd.common.key_offset = 1;
6022 	else
6023 		cmd.common.key_offset = 0;
6024 	cmd.common.sta_id = IWX_STATION_ID;
6025 
6026 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
6027 }
6028 
6029 int
6030 iwx_media_change(struct ifnet *ifp)
6031 {
6032 	struct iwx_softc *sc = ifp->if_softc;
6033 	struct ieee80211com *ic = &sc->sc_ic;
6034 	uint8_t rate, ridx;
6035 	int err;
6036 
6037 	err = ieee80211_media_change(ifp);
6038 	if (err != ENETRESET)
6039 		return err;
6040 
6041 	if (ic->ic_fixed_mcs != -1)
6042 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
6043 	else if (ic->ic_fixed_rate != -1) {
6044 		rate = ic->ic_sup_rates[ic->ic_curmode].
6045 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6046 		/* Map 802.11 rate to HW rate index. */
6047 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
6048 			if (iwx_rates[ridx].rate == rate)
6049 				break;
6050 		sc->sc_fixed_ridx = ridx;
6051 	}
6052 
6053 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6054 	    (IFF_UP | IFF_RUNNING)) {
6055 		iwx_stop(ifp);
6056 		err = iwx_init(ifp);
6057 	}
6058 	return err;
6059 }
6060 
6061 void
6062 iwx_newstate_task(void *psc)
6063 {
6064 	struct iwx_softc *sc = (struct iwx_softc *)psc;
6065 	struct ieee80211com *ic = &sc->sc_ic;
6066 	enum ieee80211_state nstate = sc->ns_nstate;
6067 	enum ieee80211_state ostate = ic->ic_state;
6068 	int arg = sc->ns_arg;
6069 	int err = 0, s = splnet();
6070 
6071 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6072 		/* iwx_stop() is waiting for us. */
6073 		refcnt_rele_wake(&sc->task_refs);
6074 		splx(s);
6075 		return;
6076 	}
6077 
6078 	if (ostate == IEEE80211_S_SCAN) {
6079 		if (nstate == ostate) {
6080 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
6081 				refcnt_rele_wake(&sc->task_refs);
6082 				splx(s);
6083 				return;
6084 			}
6085 			/* Firmware is no longer scanning. Do another scan. */
6086 			goto next_scan;
6087 		}
6088 	}
6089 
6090 	if (nstate <= ostate) {
6091 		switch (ostate) {
6092 		case IEEE80211_S_RUN:
6093 			err = iwx_run_stop(sc);
6094 			if (err)
6095 				goto out;
6096 			/* FALLTHROUGH */
6097 		case IEEE80211_S_ASSOC:
6098 			if (nstate <= IEEE80211_S_ASSOC) {
6099 				err = iwx_disassoc(sc);
6100 				if (err)
6101 					goto out;
6102 			}
6103 			/* FALLTHROUGH */
6104 		case IEEE80211_S_AUTH:
6105 			if (nstate <= IEEE80211_S_AUTH) {
6106 				err = iwx_deauth(sc);
6107 				if (err)
6108 					goto out;
6109 			}
6110 			/* FALLTHROUGH */
6111 		case IEEE80211_S_SCAN:
6112 		case IEEE80211_S_INIT:
6113 			break;
6114 		}
6115 
6116 		/* Die now if iwx_stop() was called while we were sleeping. */
6117 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6118 			refcnt_rele_wake(&sc->task_refs);
6119 			splx(s);
6120 			return;
6121 		}
6122 	}
6123 
6124 	switch (nstate) {
6125 	case IEEE80211_S_INIT:
6126 		break;
6127 
6128 	case IEEE80211_S_SCAN:
6129 next_scan:
6130 		err = iwx_scan(sc);
6131 		if (err)
6132 			break;
6133 		refcnt_rele_wake(&sc->task_refs);
6134 		splx(s);
6135 		return;
6136 
6137 	case IEEE80211_S_AUTH:
6138 		err = iwx_auth(sc);
6139 		break;
6140 
6141 	case IEEE80211_S_ASSOC:
6142 		err = iwx_assoc(sc);
6143 		break;
6144 
6145 	case IEEE80211_S_RUN:
6146 		err = iwx_run(sc);
6147 		break;
6148 	}
6149 
6150 out:
6151 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
6152 		if (err)
6153 			task_add(systq, &sc->init_task);
6154 		else
6155 			sc->sc_newstate(ic, nstate, arg);
6156 	}
6157 	refcnt_rele_wake(&sc->task_refs);
6158 	splx(s);
6159 }
6160 
6161 int
6162 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6163 {
6164 	struct ifnet *ifp = IC2IFP(ic);
6165 	struct iwx_softc *sc = ifp->if_softc;
6166 
6167 	if (ic->ic_state == IEEE80211_S_RUN) {
6168 		iwx_del_task(sc, systq, &sc->ba_task);
6169 		iwx_del_task(sc, systq, &sc->htprot_task);
6170 	}
6171 
6172 	sc->ns_nstate = nstate;
6173 	sc->ns_arg = arg;
6174 
6175 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
6176 
6177 	return 0;
6178 }
6179 
6180 void
6181 iwx_endscan(struct iwx_softc *sc)
6182 {
6183 	struct ieee80211com *ic = &sc->sc_ic;
6184 
6185 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
6186 		return;
6187 
6188 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6189 	ieee80211_end_scan(&ic->ic_if);
6190 }
6191 
6192 /*
6193  * Aging and idle timeouts for the different possible scenarios
6194  * in default configuration
6195  */
6196 static const uint32_t
6197 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6198 	{
6199 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6200 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6201 	},
6202 	{
6203 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
6204 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6205 	},
6206 	{
6207 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
6208 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
6209 	},
6210 	{
6211 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
6212 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
6213 	},
6214 	{
6215 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
6216 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
6217 	},
6218 };
6219 
6220 /*
6221  * Aging and idle timeouts for the different possible scenarios
6222  * in single BSS MAC configuration.
6223  */
6224 static const uint32_t
6225 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6226 	{
6227 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
6228 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
6229 	},
6230 	{
6231 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
6232 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
6233 	},
6234 	{
6235 		htole32(IWX_SF_MCAST_AGING_TIMER),
6236 		htole32(IWX_SF_MCAST_IDLE_TIMER)
6237 	},
6238 	{
6239 		htole32(IWX_SF_BA_AGING_TIMER),
6240 		htole32(IWX_SF_BA_IDLE_TIMER)
6241 	},
6242 	{
6243 		htole32(IWX_SF_TX_RE_AGING_TIMER),
6244 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
6245 	},
6246 };
6247 
6248 void
6249 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
6250     struct ieee80211_node *ni)
6251 {
6252 	int i, j, watermark;
6253 
6254 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
6255 
6256 	/*
6257 	 * If we are in association flow - check antenna configuration
6258 	 * capabilities of the AP station, and choose the watermark accordingly.
6259 	 */
6260 	if (ni) {
6261 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6262 			if (ni->ni_rxmcs[1] != 0)
6263 				watermark = IWX_SF_W_MARK_MIMO2;
6264 			else
6265 				watermark = IWX_SF_W_MARK_SISO;
6266 		} else {
6267 			watermark = IWX_SF_W_MARK_LEGACY;
6268 		}
6269 	/* default watermark value for unassociated mode. */
6270 	} else {
6271 		watermark = IWX_SF_W_MARK_MIMO2;
6272 	}
6273 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
6274 
6275 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
6276 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
6277 			sf_cmd->long_delay_timeouts[i][j] =
6278 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
6279 		}
6280 	}
6281 
6282 	if (ni) {
6283 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
6284 		       sizeof(iwx_sf_full_timeout));
6285 	} else {
6286 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
6287 		       sizeof(iwx_sf_full_timeout_def));
6288 	}
6289 
6290 }
6291 
6292 int
6293 iwx_sf_config(struct iwx_softc *sc, int new_state)
6294 {
6295 	struct ieee80211com *ic = &sc->sc_ic;
6296 	struct iwx_sf_cfg_cmd sf_cmd = {
6297 		.state = htole32(new_state),
6298 	};
6299 	int err = 0;
6300 
6301 	switch (new_state) {
6302 	case IWX_SF_UNINIT:
6303 	case IWX_SF_INIT_OFF:
6304 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
6305 		break;
6306 	case IWX_SF_FULL_ON:
6307 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6308 		break;
6309 	default:
6310 		return EINVAL;
6311 	}
6312 
6313 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
6314 				   sizeof(sf_cmd), &sf_cmd);
6315 	return err;
6316 }
6317 
6318 int
6319 iwx_send_bt_init_conf(struct iwx_softc *sc)
6320 {
6321 	struct iwx_bt_coex_cmd bt_cmd;
6322 
6323 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
6324 	bt_cmd.enabled_modules = 0;
6325 
6326 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
6327 	    &bt_cmd);
6328 }
6329 
6330 int
6331 iwx_send_soc_conf(struct iwx_softc *sc)
6332 {
6333 	struct iwx_soc_configuration_cmd cmd;
6334 	int err;
6335 	uint32_t cmd_id, flags = 0;
6336 
6337 	memset(&cmd, 0, sizeof(cmd));
6338 
6339 	/*
6340 	 * In VER_1 of this command, the discrete value is considered
6341 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
6342 	 * values in VER_1, this is backwards-compatible with VER_2,
6343 	 * as long as we don't set any other flag bits.
6344 	 */
6345 	if (!sc->sc_integrated) { /* VER_1 */
6346 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
6347 	} else { /* VER_2 */
6348 		uint8_t scan_cmd_ver;
6349 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
6350 			flags |= (sc->sc_ltr_delay &
6351 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
6352 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
6353 		    IWX_SCAN_REQ_UMAC);
6354 		if (scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
6355 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
6356 	}
6357 	cmd.flags = htole32(flags);
6358 
6359 	cmd.latency = htole32(sc->sc_xtal_latency);
6360 
6361 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
6362 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
6363 	if (err)
6364 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
6365 	return err;
6366 }
6367 
6368 int
6369 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
6370 {
6371 	struct iwx_mcc_update_cmd mcc_cmd;
6372 	struct iwx_host_cmd hcmd = {
6373 		.id = IWX_MCC_UPDATE_CMD,
6374 		.flags = IWX_CMD_WANT_RESP,
6375 		.data = { &mcc_cmd },
6376 	};
6377 	struct iwx_rx_packet *pkt;
6378 	struct iwx_mcc_update_resp *resp;
6379 	size_t resp_len;
6380 	int err;
6381 
6382 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6383 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6384 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6385 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6386 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
6387 	else
6388 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
6389 
6390 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
6391 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
6392 
6393 	err = iwx_send_cmd(sc, &hcmd);
6394 	if (err)
6395 		return err;
6396 
6397 	pkt = hcmd.resp_pkt;
6398 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6399 		err = EIO;
6400 		goto out;
6401 	}
6402 
6403 	resp_len = iwx_rx_packet_payload_len(pkt);
6404 	if (resp_len < sizeof(*resp)) {
6405 		err = EIO;
6406 		goto out;
6407 	}
6408 
6409 	resp = (void *)pkt->data;
6410 	if (resp_len != sizeof(*resp) +
6411 	    resp->n_channels * sizeof(resp->channels[0])) {
6412 		err = EIO;
6413 		goto out;
6414 	}
6415 
6416 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
6417 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
6418 
6419 	/* Update channel map for net80211 and our scan configuration. */
6420 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
6421 
6422 out:
6423 	iwx_free_resp(sc, &hcmd);
6424 
6425 	return err;
6426 }
6427 
6428 int
6429 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
6430 {
6431 	struct iwx_temp_report_ths_cmd cmd;
6432 	int err;
6433 
6434 	/*
6435 	 * In order to give responsibility for critical-temperature-kill
6436 	 * and TX backoff to FW we need to send an empty temperature
6437 	 * reporting command at init time.
6438 	 */
6439 	memset(&cmd, 0, sizeof(cmd));
6440 
6441 	err = iwx_send_cmd_pdu(sc,
6442 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
6443 	    0, sizeof(cmd), &cmd);
6444 	if (err)
6445 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
6446 		    DEVNAME(sc), err);
6447 
6448 	return err;
6449 }
6450 
6451 int
6452 iwx_init_hw(struct iwx_softc *sc)
6453 {
6454 	struct ieee80211com *ic = &sc->sc_ic;
6455 	int err, i;
6456 
6457 	err = iwx_preinit(sc);
6458 	if (err)
6459 		return err;
6460 
6461 	err = iwx_start_hw(sc);
6462 	if (err) {
6463 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6464 		return err;
6465 	}
6466 
6467 	err = iwx_run_init_mvm_ucode(sc, 0);
6468 	if (err)
6469 		return err;
6470 
6471 	if (!iwx_nic_lock(sc))
6472 		return EBUSY;
6473 
6474 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
6475 	if (err) {
6476 		printf("%s: could not init tx ant config (error %d)\n",
6477 		    DEVNAME(sc), err);
6478 		goto err;
6479 	}
6480 
6481 	if (sc->sc_tx_with_siso_diversity) {
6482 		err = iwx_send_phy_cfg_cmd(sc);
6483 		if (err) {
6484 			printf("%s: could not send phy config (error %d)\n",
6485 			    DEVNAME(sc), err);
6486 			goto err;
6487 		}
6488 	}
6489 
6490 	err = iwx_send_bt_init_conf(sc);
6491 	if (err) {
6492 		printf("%s: could not init bt coex (error %d)\n",
6493 		    DEVNAME(sc), err);
6494 		return err;
6495 	}
6496 
6497 	err = iwx_send_soc_conf(sc);
6498 	if (err)
6499 		return err;
6500 
6501 	err = iwx_send_dqa_cmd(sc);
6502 	if (err)
6503 		return err;
6504 
6505 	/* Add auxiliary station for scanning */
6506 	err = iwx_add_aux_sta(sc);
6507 	if (err) {
6508 		printf("%s: could not add aux station (error %d)\n",
6509 		    DEVNAME(sc), err);
6510 		goto err;
6511 	}
6512 
6513 	for (i = 0; i < 1; i++) {
6514 		/*
6515 		 * The channel used here isn't relevant as it's
6516 		 * going to be overwritten in the other flows.
6517 		 * For now use the first channel we have.
6518 		 */
6519 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6520 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6521 		    IWX_FW_CTXT_ACTION_ADD, 0);
6522 		if (err) {
6523 			printf("%s: could not add phy context %d (error %d)\n",
6524 			    DEVNAME(sc), i, err);
6525 			goto err;
6526 		}
6527 	}
6528 
6529 	err = iwx_config_ltr(sc);
6530 	if (err) {
6531 		printf("%s: PCIe LTR configuration failed (error %d)\n",
6532 		    DEVNAME(sc), err);
6533 	}
6534 
6535 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
6536 		err = iwx_send_temp_report_ths_cmd(sc);
6537 		if (err)
6538 			goto err;
6539 	}
6540 
6541 	err = iwx_power_update_device(sc);
6542 	if (err) {
6543 		printf("%s: could not send power command (error %d)\n",
6544 		    DEVNAME(sc), err);
6545 		goto err;
6546 	}
6547 
6548 	if (sc->sc_nvm.lar_enabled) {
6549 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
6550 		if (err) {
6551 			printf("%s: could not init LAR (error %d)\n",
6552 			    DEVNAME(sc), err);
6553 			goto err;
6554 		}
6555 	}
6556 
6557 	err = iwx_config_umac_scan(sc);
6558 	if (err) {
6559 		printf("%s: could not configure scan (error %d)\n",
6560 		    DEVNAME(sc), err);
6561 		goto err;
6562 	}
6563 
6564 	err = iwx_disable_beacon_filter(sc);
6565 	if (err) {
6566 		printf("%s: could not disable beacon filter (error %d)\n",
6567 		    DEVNAME(sc), err);
6568 		goto err;
6569 	}
6570 
6571 err:
6572 	iwx_nic_unlock(sc);
6573 	return err;
6574 }
6575 
6576 /* Allow multicast from our BSSID. */
6577 int
6578 iwx_allow_mcast(struct iwx_softc *sc)
6579 {
6580 	struct ieee80211com *ic = &sc->sc_ic;
6581 	struct ieee80211_node *ni = ic->ic_bss;
6582 	struct iwx_mcast_filter_cmd *cmd;
6583 	size_t size;
6584 	int err;
6585 
6586 	size = roundup(sizeof(*cmd), 4);
6587 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6588 	if (cmd == NULL)
6589 		return ENOMEM;
6590 	cmd->filter_own = 1;
6591 	cmd->port_id = 0;
6592 	cmd->count = 0;
6593 	cmd->pass_all = 1;
6594 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6595 
6596 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
6597 	    0, size, cmd);
6598 	free(cmd, M_DEVBUF, size);
6599 	return err;
6600 }
6601 
6602 int
6603 iwx_init(struct ifnet *ifp)
6604 {
6605 	struct iwx_softc *sc = ifp->if_softc;
6606 	struct ieee80211com *ic = &sc->sc_ic;
6607 	int err, generation;
6608 
6609 	rw_assert_wrlock(&sc->ioctl_rwl);
6610 
6611 	generation = ++sc->sc_generation;
6612 
6613 	KASSERT(sc->task_refs.refs == 0);
6614 	refcnt_init(&sc->task_refs);
6615 
6616 	err = iwx_init_hw(sc);
6617 	if (err) {
6618 		if (generation == sc->sc_generation)
6619 			iwx_stop(ifp);
6620 		return err;
6621 	}
6622 
6623 	if (sc->sc_nvm.sku_cap_11n_enable)
6624 		iwx_setup_ht_rates(sc);
6625 
6626 	ifq_clr_oactive(&ifp->if_snd);
6627 	ifp->if_flags |= IFF_RUNNING;
6628 
6629 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6630 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
6631 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
6632 		return 0;
6633 	}
6634 
6635 	ieee80211_begin_scan(ifp);
6636 
6637 	/*
6638 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
6639 	 * Wait until the transition to SCAN state has completed.
6640 	 */
6641 	do {
6642 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
6643 		    SEC_TO_NSEC(1));
6644 		if (generation != sc->sc_generation)
6645 			return ENXIO;
6646 		if (err)
6647 			return err;
6648 	} while (ic->ic_state != IEEE80211_S_SCAN);
6649 
6650 	return 0;
6651 }
6652 
6653 void
6654 iwx_start(struct ifnet *ifp)
6655 {
6656 	struct iwx_softc *sc = ifp->if_softc;
6657 	struct ieee80211com *ic = &sc->sc_ic;
6658 	struct ieee80211_node *ni;
6659 	struct ether_header *eh;
6660 	struct mbuf *m;
6661 	int ac = EDCA_AC_BE; /* XXX */
6662 
6663 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
6664 		return;
6665 
6666 	for (;;) {
6667 		/* why isn't this done per-queue? */
6668 		if (sc->qfullmsk != 0) {
6669 			ifq_set_oactive(&ifp->if_snd);
6670 			break;
6671 		}
6672 
6673 		/* need to send management frames even if we're not RUNning */
6674 		m = mq_dequeue(&ic->ic_mgtq);
6675 		if (m) {
6676 			ni = m->m_pkthdr.ph_cookie;
6677 			goto sendit;
6678 		}
6679 
6680 		if (ic->ic_state != IEEE80211_S_RUN ||
6681 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
6682 			break;
6683 
6684 		m = ifq_dequeue(&ifp->if_snd);
6685 		if (!m)
6686 			break;
6687 		if (m->m_len < sizeof (*eh) &&
6688 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
6689 			ifp->if_oerrors++;
6690 			continue;
6691 		}
6692 #if NBPFILTER > 0
6693 		if (ifp->if_bpf != NULL)
6694 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
6695 #endif
6696 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
6697 			ifp->if_oerrors++;
6698 			continue;
6699 		}
6700 
6701  sendit:
6702 #if NBPFILTER > 0
6703 		if (ic->ic_rawbpf != NULL)
6704 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
6705 #endif
6706 		if (iwx_tx(sc, m, ni, ac) != 0) {
6707 			ieee80211_release_node(ic, ni);
6708 			ifp->if_oerrors++;
6709 			continue;
6710 		}
6711 
6712 		if (ifp->if_flags & IFF_UP) {
6713 			sc->sc_tx_timer = 15;
6714 			ifp->if_timer = 1;
6715 		}
6716 	}
6717 
6718 	return;
6719 }
6720 
6721 void
6722 iwx_stop(struct ifnet *ifp)
6723 {
6724 	struct iwx_softc *sc = ifp->if_softc;
6725 	struct ieee80211com *ic = &sc->sc_ic;
6726 	struct iwx_node *in = (void *)ic->ic_bss;
6727 	int i, s = splnet();
6728 
6729 	rw_assert_wrlock(&sc->ioctl_rwl);
6730 
6731 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
6732 
6733 	/* Cancel scheduled tasks and let any stale tasks finish up. */
6734 	task_del(systq, &sc->init_task);
6735 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
6736 	iwx_del_task(sc, systq, &sc->ba_task);
6737 	iwx_del_task(sc, systq, &sc->htprot_task);
6738 	KASSERT(sc->task_refs.refs >= 1);
6739 	refcnt_finalize(&sc->task_refs, "iwxstop");
6740 
6741 	iwx_stop_device(sc);
6742 
6743 	/* Reset soft state. */
6744 
6745 	sc->sc_generation++;
6746 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
6747 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
6748 		sc->sc_cmd_resp_pkt[i] = NULL;
6749 		sc->sc_cmd_resp_len[i] = 0;
6750 	}
6751 	ifp->if_flags &= ~IFF_RUNNING;
6752 	ifq_clr_oactive(&ifp->if_snd);
6753 
6754 	in->in_phyctxt = NULL;
6755 
6756 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6757 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6758 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6759 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6760 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
6761 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
6762 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
6763 
6764 	sc->sc_rx_ba_sessions = 0;
6765 
6766 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6767 
6768 	ifp->if_timer = sc->sc_tx_timer = 0;
6769 
6770 	splx(s);
6771 }
6772 
6773 void
6774 iwx_watchdog(struct ifnet *ifp)
6775 {
6776 	struct iwx_softc *sc = ifp->if_softc;
6777 
6778 	ifp->if_timer = 0;
6779 	if (sc->sc_tx_timer > 0) {
6780 		if (--sc->sc_tx_timer == 0) {
6781 			printf("%s: device timeout\n", DEVNAME(sc));
6782 #ifdef IWX_DEBUG
6783 			iwx_nic_error(sc);
6784 #endif
6785 			if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
6786 				task_add(systq, &sc->init_task);
6787 			ifp->if_oerrors++;
6788 			return;
6789 		}
6790 		ifp->if_timer = 1;
6791 	}
6792 
6793 	ieee80211_watchdog(ifp);
6794 }
6795 
6796 int
6797 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6798 {
6799 	struct iwx_softc *sc = ifp->if_softc;
6800 	int s, err = 0, generation = sc->sc_generation;
6801 
6802 	/*
6803 	 * Prevent processes from entering this function while another
6804 	 * process is tsleep'ing in it.
6805 	 */
6806 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
6807 	if (err == 0 && generation != sc->sc_generation) {
6808 		rw_exit(&sc->ioctl_rwl);
6809 		return ENXIO;
6810 	}
6811 	if (err)
6812 		return err;
6813 	s = splnet();
6814 
6815 	switch (cmd) {
6816 	case SIOCSIFADDR:
6817 		ifp->if_flags |= IFF_UP;
6818 		/* FALLTHROUGH */
6819 	case SIOCSIFFLAGS:
6820 		if (ifp->if_flags & IFF_UP) {
6821 			if (!(ifp->if_flags & IFF_RUNNING)) {
6822 				err = iwx_init(ifp);
6823 			}
6824 		} else {
6825 			if (ifp->if_flags & IFF_RUNNING)
6826 				iwx_stop(ifp);
6827 		}
6828 		break;
6829 
6830 	default:
6831 		err = ieee80211_ioctl(ifp, cmd, data);
6832 	}
6833 
6834 	if (err == ENETRESET) {
6835 		err = 0;
6836 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6837 		    (IFF_UP | IFF_RUNNING)) {
6838 			iwx_stop(ifp);
6839 			err = iwx_init(ifp);
6840 		}
6841 	}
6842 
6843 	splx(s);
6844 	rw_exit(&sc->ioctl_rwl);
6845 
6846 	return err;
6847 }
6848 
6849 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
6850 /*
6851  * Note: This structure is read from the device with IO accesses,
6852  * and the reading already does the endian conversion. As it is
6853  * read with uint32_t-sized accesses, any members with a different size
6854  * need to be ordered correctly though!
6855  */
6856 struct iwx_error_event_table {
6857 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6858 	uint32_t error_id;		/* type of error */
6859 	uint32_t trm_hw_status0;	/* TRM HW status */
6860 	uint32_t trm_hw_status1;	/* TRM HW status */
6861 	uint32_t blink2;		/* branch link */
6862 	uint32_t ilink1;		/* interrupt link */
6863 	uint32_t ilink2;		/* interrupt link */
6864 	uint32_t data1;		/* error-specific data */
6865 	uint32_t data2;		/* error-specific data */
6866 	uint32_t data3;		/* error-specific data */
6867 	uint32_t bcon_time;		/* beacon timer */
6868 	uint32_t tsf_low;		/* network timestamp function timer */
6869 	uint32_t tsf_hi;		/* network timestamp function timer */
6870 	uint32_t gp1;		/* GP1 timer register */
6871 	uint32_t gp2;		/* GP2 timer register */
6872 	uint32_t fw_rev_type;	/* firmware revision type */
6873 	uint32_t major;		/* uCode version major */
6874 	uint32_t minor;		/* uCode version minor */
6875 	uint32_t hw_ver;		/* HW Silicon version */
6876 	uint32_t brd_ver;		/* HW board version */
6877 	uint32_t log_pc;		/* log program counter */
6878 	uint32_t frame_ptr;		/* frame pointer */
6879 	uint32_t stack_ptr;		/* stack pointer */
6880 	uint32_t hcmd;		/* last host command header */
6881 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6882 				 * rxtx_flag */
6883 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6884 				 * host_flag */
6885 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6886 				 * enc_flag */
6887 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6888 				 * time_flag */
6889 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6890 				 * wico interrupt */
6891 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6892 	uint32_t wait_event;		/* wait event() caller address */
6893 	uint32_t l2p_control;	/* L2pControlField */
6894 	uint32_t l2p_duration;	/* L2pDurationField */
6895 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6896 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6897 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6898 				 * (LMPM_PMG_SEL) */
6899 	uint32_t u_timestamp;	/* indicate when the date and time of the
6900 				 * compilation */
6901 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6902 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6903 
6904 /*
6905  * UMAC error struct - relevant starting from family 8000 chip.
6906  * Note: This structure is read from the device with IO accesses,
6907  * and the reading already does the endian conversion. As it is
6908  * read with u32-sized accesses, any members with a different size
6909  * need to be ordered correctly though!
6910  */
6911 struct iwx_umac_error_event_table {
6912 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6913 	uint32_t error_id;	/* type of error */
6914 	uint32_t blink1;	/* branch link */
6915 	uint32_t blink2;	/* branch link */
6916 	uint32_t ilink1;	/* interrupt link */
6917 	uint32_t ilink2;	/* interrupt link */
6918 	uint32_t data1;		/* error-specific data */
6919 	uint32_t data2;		/* error-specific data */
6920 	uint32_t data3;		/* error-specific data */
6921 	uint32_t umac_major;
6922 	uint32_t umac_minor;
6923 	uint32_t frame_pointer;	/* core register 27*/
6924 	uint32_t stack_pointer;	/* core register 28 */
6925 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6926 	uint32_t nic_isr_pref;	/* ISR status register */
6927 } __packed;
6928 
6929 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6930 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6931 
6932 void
6933 iwx_nic_umac_error(struct iwx_softc *sc)
6934 {
6935 	struct iwx_umac_error_event_table table;
6936 	uint32_t base;
6937 
6938 	base = sc->sc_uc.uc_umac_error_event_table;
6939 
6940 	if (base < 0x800000) {
6941 		printf("%s: Invalid error log pointer 0x%08x\n",
6942 		    DEVNAME(sc), base);
6943 		return;
6944 	}
6945 
6946 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6947 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6948 		return;
6949 	}
6950 
6951 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6952 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
6953 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6954 			sc->sc_flags, table.valid);
6955 	}
6956 
6957 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
6958 		iwx_desc_lookup(table.error_id));
6959 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
6960 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
6961 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
6962 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
6963 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
6964 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
6965 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
6966 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
6967 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
6968 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
6969 	    table.frame_pointer);
6970 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
6971 	    table.stack_pointer);
6972 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
6973 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
6974 	    table.nic_isr_pref);
6975 }
6976 
6977 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
6978 static struct {
6979 	const char *name;
6980 	uint8_t num;
6981 } advanced_lookup[] = {
6982 	{ "NMI_INTERRUPT_WDG", 0x34 },
6983 	{ "SYSASSERT", 0x35 },
6984 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6985 	{ "BAD_COMMAND", 0x38 },
6986 	{ "BAD_COMMAND", 0x39 },
6987 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6988 	{ "FATAL_ERROR", 0x3D },
6989 	{ "NMI_TRM_HW_ERR", 0x46 },
6990 	{ "NMI_INTERRUPT_TRM", 0x4C },
6991 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6992 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6993 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6994 	{ "NMI_INTERRUPT_HOST", 0x66 },
6995 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
6996 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
6997 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
6998 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6999 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7000 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7001 	{ "ADVANCED_SYSASSERT", 0 },
7002 };
7003 
7004 const char *
7005 iwx_desc_lookup(uint32_t num)
7006 {
7007 	int i;
7008 
7009 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
7010 		if (advanced_lookup[i].num ==
7011 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
7012 			return advanced_lookup[i].name;
7013 
7014 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7015 	return advanced_lookup[i].name;
7016 }
7017 
7018 /*
7019  * Support for dumping the error log seemed like a good idea ...
7020  * but it's mostly hex junk and the only sensible thing is the
7021  * hw/ucode revision (which we know anyway).  Since it's here,
7022  * I'll just leave it in, just in case e.g. the Intel guys want to
7023  * help us decipher some "ADVANCED_SYSASSERT" later.
7024  */
7025 void
7026 iwx_nic_error(struct iwx_softc *sc)
7027 {
7028 	struct iwx_error_event_table table;
7029 	uint32_t base;
7030 
7031 	printf("%s: dumping device error log\n", DEVNAME(sc));
7032 	base = sc->sc_uc.uc_lmac_error_event_table[0];
7033 	if (base < 0x800000) {
7034 		printf("%s: Invalid error log pointer 0x%08x\n",
7035 		    DEVNAME(sc), base);
7036 		return;
7037 	}
7038 
7039 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
7040 		printf("%s: reading errlog failed\n", DEVNAME(sc));
7041 		return;
7042 	}
7043 
7044 	if (!table.valid) {
7045 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
7046 		return;
7047 	}
7048 
7049 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
7050 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
7051 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
7052 		    sc->sc_flags, table.valid);
7053 	}
7054 
7055 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
7056 	    iwx_desc_lookup(table.error_id));
7057 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
7058 	    table.trm_hw_status0);
7059 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
7060 	    table.trm_hw_status1);
7061 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
7062 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
7063 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
7064 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
7065 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
7066 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
7067 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
7068 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
7069 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
7070 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
7071 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
7072 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
7073 	    table.fw_rev_type);
7074 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
7075 	    table.major);
7076 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
7077 	    table.minor);
7078 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
7079 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
7080 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
7081 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
7082 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
7083 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
7084 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
7085 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
7086 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
7087 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
7088 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
7089 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
7090 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
7091 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
7092 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
7093 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
7094 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
7095 
7096 	if (sc->sc_uc.uc_umac_error_event_table)
7097 		iwx_nic_umac_error(sc);
7098 }
7099 #endif
7100 
7101 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7102 do {									\
7103 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7104 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7105 	_var_ = (void *)((_pkt_)+1);					\
7106 } while (/*CONSTCOND*/0)
7107 
7108 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7109 do {									\
7110 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7111 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7112 	_ptr_ = (void *)((_pkt_)+1);					\
7113 } while (/*CONSTCOND*/0)
7114 
7115 int
7116 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
7117 {
7118 	int qid, idx, code;
7119 
7120 	qid = pkt->hdr.qid & ~0x80;
7121 	idx = pkt->hdr.idx;
7122 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7123 
7124 	return (!(qid == 0 && idx == 0 && code == 0) &&
7125 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
7126 }
7127 
7128 void
7129 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
7130 {
7131 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7132 	struct iwx_rx_packet *pkt, *nextpkt;
7133 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
7134 	struct mbuf *m0, *m;
7135 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
7136 	size_t remain = IWX_RBUF_SIZE;
7137 	int qid, idx, code, handled = 1;
7138 
7139 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
7140 	    BUS_DMASYNC_POSTREAD);
7141 
7142 	m0 = data->m;
7143 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
7144 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
7145 		qid = pkt->hdr.qid;
7146 		idx = pkt->hdr.idx;
7147 
7148 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7149 
7150 		if (!iwx_rx_pkt_valid(pkt))
7151 			break;
7152 
7153 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
7154 		if (len < sizeof(pkt->hdr) ||
7155 		    len > (IWX_RBUF_SIZE - offset - minsz))
7156 			break;
7157 
7158 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
7159 			/* Take mbuf m0 off the RX ring. */
7160 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
7161 				ifp->if_ierrors++;
7162 				break;
7163 			}
7164 			KASSERT(data->m != m0);
7165 		}
7166 
7167 		switch (code) {
7168 		case IWX_REPLY_RX_PHY_CMD:
7169 			iwx_rx_rx_phy_cmd(sc, pkt, data);
7170 			break;
7171 
7172 		case IWX_REPLY_RX_MPDU_CMD: {
7173 			size_t maxlen = remain - minsz;
7174 			nextoff = offset +
7175 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
7176 			nextpkt = (struct iwx_rx_packet *)
7177 			    (m0->m_data + nextoff);
7178 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
7179 			    !iwx_rx_pkt_valid(nextpkt)) {
7180 				/* No need to copy last frame in buffer. */
7181 				if (offset > 0)
7182 					m_adj(m0, offset);
7183 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
7184 				m0 = NULL; /* stack owns m0 now; abort loop */
7185 			} else {
7186 				/*
7187 				 * Create an mbuf which points to the current
7188 				 * packet. Always copy from offset zero to
7189 				 * preserve m_pkthdr.
7190 				 */
7191 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
7192 				if (m == NULL) {
7193 					ifp->if_ierrors++;
7194 					m_freem(m0);
7195 					m0 = NULL;
7196 					break;
7197 				}
7198 				m_adj(m, offset);
7199 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
7200 			}
7201 
7202 			if (offset + minsz < remain)
7203 				remain -= offset;
7204 			else
7205 				remain = minsz;
7206  			break;
7207 		}
7208 
7209 		case IWX_TX_CMD:
7210 			iwx_rx_tx_cmd(sc, pkt, data);
7211 			break;
7212 
7213 		case IWX_MISSED_BEACONS_NOTIFICATION:
7214 			iwx_rx_bmiss(sc, pkt, data);
7215 			break;
7216 
7217 		case IWX_MFUART_LOAD_NOTIFICATION:
7218 			break;
7219 
7220 		case IWX_ALIVE: {
7221 			struct iwx_alive_resp_v4 *resp4;
7222 
7223 			DPRINTF(("%s: firmware alive\n", __func__));
7224 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
7225 				SYNC_RESP_STRUCT(resp4, pkt);
7226 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
7227 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
7228 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
7229 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
7230 				sc->sc_uc.uc_log_event_table = le32toh(
7231 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
7232 				sc->sched_base = le32toh(
7233 				    resp4->lmac_data[0].dbg_ptrs.scd_base_ptr);
7234 				sc->sc_uc.uc_umac_error_event_table = le32toh(
7235 				    resp4->umac_data.dbg_ptrs.error_info_addr);
7236 				if (resp4->status == IWX_ALIVE_STATUS_OK)
7237 					sc->sc_uc.uc_ok = 1;
7238 				else
7239 					sc->sc_uc.uc_ok = 0;
7240 			}
7241 
7242 			sc->sc_uc.uc_intr = 1;
7243 			wakeup(&sc->sc_uc);
7244 			break;
7245 		}
7246 
7247 		case IWX_STATISTICS_NOTIFICATION: {
7248 			struct iwx_notif_statistics *stats;
7249 			SYNC_RESP_STRUCT(stats, pkt);
7250 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7251 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
7252 			break;
7253 		}
7254 
7255 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
7256 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7257 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
7258 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7259 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
7260 			break;
7261 
7262 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7263 		    IWX_CT_KILL_NOTIFICATION): {
7264 			struct iwx_ct_kill_notif *notif;
7265 			SYNC_RESP_STRUCT(notif, pkt);
7266 			printf("%s: device at critical temperature (%u degC), "
7267 			    "stopping device\n",
7268 			    DEVNAME(sc), le16toh(notif->temperature));
7269 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7270 			task_add(systq, &sc->init_task);
7271 			break;
7272 		}
7273 
7274 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
7275 		    IWX_NVM_GET_INFO):
7276 		case IWX_ADD_STA_KEY:
7277 		case IWX_PHY_CONFIGURATION_CMD:
7278 		case IWX_TX_ANT_CONFIGURATION_CMD:
7279 		case IWX_ADD_STA:
7280 		case IWX_MAC_CONTEXT_CMD:
7281 		case IWX_REPLY_SF_CFG_CMD:
7282 		case IWX_POWER_TABLE_CMD:
7283 		case IWX_LTR_CONFIG:
7284 		case IWX_PHY_CONTEXT_CMD:
7285 		case IWX_BINDING_CONTEXT_CMD:
7286 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
7287 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
7288 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
7289 		case IWX_REPLY_BEACON_FILTERING_CMD:
7290 		case IWX_MAC_PM_POWER_TABLE:
7291 		case IWX_TIME_QUOTA_CMD:
7292 		case IWX_REMOVE_STA:
7293 		case IWX_TXPATH_FLUSH:
7294 		case IWX_BT_CONFIG:
7295 		case IWX_MCC_UPDATE_CMD:
7296 		case IWX_TIME_EVENT_CMD:
7297 		case IWX_STATISTICS_CMD:
7298 		case IWX_SCD_QUEUE_CFG: {
7299 			size_t pkt_len;
7300 
7301 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
7302 				break;
7303 
7304 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7305 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7306 
7307 			pkt_len = sizeof(pkt->len_n_flags) +
7308 			    iwx_rx_packet_len(pkt);
7309 
7310 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
7311 			    pkt_len < sizeof(*pkt) ||
7312 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
7313 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
7314 				    sc->sc_cmd_resp_len[idx]);
7315 				sc->sc_cmd_resp_pkt[idx] = NULL;
7316 				break;
7317 			}
7318 
7319 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
7320 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7321 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
7322 			break;
7323 		}
7324 
7325 		case IWX_INIT_COMPLETE_NOTIF:
7326 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
7327 			wakeup(&sc->sc_init_complete);
7328 			break;
7329 
7330 		case IWX_SCAN_COMPLETE_UMAC: {
7331 			struct iwx_umac_scan_complete *notif;
7332 			SYNC_RESP_STRUCT(notif, pkt);
7333 			iwx_endscan(sc);
7334 			break;
7335 		}
7336 
7337 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
7338 			struct iwx_umac_scan_iter_complete_notif *notif;
7339 			SYNC_RESP_STRUCT(notif, pkt);
7340 			iwx_endscan(sc);
7341 			break;
7342 		}
7343 
7344 		case IWX_MCC_CHUB_UPDATE_CMD: {
7345 			struct iwx_mcc_chub_notif *notif;
7346 			SYNC_RESP_STRUCT(notif, pkt);
7347 			iwx_mcc_update(sc, notif);
7348 			break;
7349 		}
7350 
7351 		case IWX_REPLY_ERROR: {
7352 			struct iwx_error_resp *resp;
7353 			SYNC_RESP_STRUCT(resp, pkt);
7354 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
7355 				DEVNAME(sc), le32toh(resp->error_type),
7356 				resp->cmd_id);
7357 			break;
7358 		}
7359 
7360 		case IWX_TIME_EVENT_NOTIFICATION: {
7361 			struct iwx_time_event_notif *notif;
7362 			uint32_t action;
7363 			SYNC_RESP_STRUCT(notif, pkt);
7364 
7365 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
7366 				break;
7367 			action = le32toh(notif->action);
7368 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
7369 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
7370 			break;
7371 		}
7372 
7373 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
7374 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
7375 		    break;
7376 
7377 		/*
7378 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
7379 		 * messages. Just ignore them for now.
7380 		 */
7381 		case IWX_DEBUG_LOG_MSG:
7382 			break;
7383 
7384 		case IWX_MCAST_FILTER_CMD:
7385 			break;
7386 
7387 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
7388 			break;
7389 
7390 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
7391 			break;
7392 
7393 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
7394 			break;
7395 
7396 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
7397 		    IWX_NVM_ACCESS_COMPLETE):
7398 			break;
7399 
7400 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
7401 			break; /* happens in monitor mode; ignore for now */
7402 
7403 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
7404 			break;
7405 
7406 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
7407 		    IWX_TLC_MNG_UPDATE_NOTIF): {
7408 			struct iwx_tlc_update_notif *notif;
7409 			SYNC_RESP_STRUCT(notif, pkt);
7410 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
7411 				iwx_rs_update(sc, notif);
7412 			break;
7413 		}
7414 
7415 		default:
7416 			handled = 0;
7417 			printf("%s: unhandled firmware response 0x%x/0x%x "
7418 			    "rx ring %d[%d]\n",
7419 			    DEVNAME(sc), code, pkt->len_n_flags,
7420 			    (qid & ~0x80), idx);
7421 			break;
7422 		}
7423 
7424 		/*
7425 		 * uCode sets bit 0x80 when it originates the notification,
7426 		 * i.e. when the notification is not a direct response to a
7427 		 * command sent by the driver.
7428 		 * For example, uCode issues IWX_REPLY_RX when it sends a
7429 		 * received frame to the driver.
7430 		 */
7431 		if (handled && !(qid & (1 << 7))) {
7432 			iwx_cmd_done(sc, qid, idx, code);
7433 		}
7434 
7435 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
7436 	}
7437 
7438 	if (m0 && m0 != data->m)
7439 		m_freem(m0);
7440 }
7441 
7442 void
7443 iwx_notif_intr(struct iwx_softc *sc)
7444 {
7445 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
7446 	uint16_t hw;
7447 
7448 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7449 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7450 
7451 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7452 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
7453 	while (sc->rxq.cur != hw) {
7454 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7455 		iwx_rx_pkt(sc, data, &ml);
7456 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
7457 	}
7458 	if_input(&sc->sc_ic.ic_if, &ml);
7459 
7460 	/*
7461 	 * Tell the firmware what we have processed.
7462 	 * Seems like the hardware gets upset unless we align the write by 8??
7463 	 */
7464 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
7465 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
7466 }
7467 
7468 int
7469 iwx_intr(void *arg)
7470 {
7471 	struct iwx_softc *sc = arg;
7472 	int handled = 0;
7473 	int r1, r2, rv = 0;
7474 
7475 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
7476 
7477 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
7478 		uint32_t *ict = sc->ict_dma.vaddr;
7479 		int tmp;
7480 
7481 		tmp = htole32(ict[sc->ict_cur]);
7482 		if (!tmp)
7483 			goto out_ena;
7484 
7485 		/*
7486 		 * ok, there was something.  keep plowing until we have all.
7487 		 */
7488 		r1 = r2 = 0;
7489 		while (tmp) {
7490 			r1 |= tmp;
7491 			ict[sc->ict_cur] = 0;
7492 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
7493 			tmp = htole32(ict[sc->ict_cur]);
7494 		}
7495 
7496 		/* this is where the fun begins.  don't ask */
7497 		if (r1 == 0xffffffff)
7498 			r1 = 0;
7499 
7500 		/* i am not expected to understand this */
7501 		if (r1 & 0xc0000)
7502 			r1 |= 0x8000;
7503 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7504 	} else {
7505 		r1 = IWX_READ(sc, IWX_CSR_INT);
7506 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7507 			goto out;
7508 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
7509 	}
7510 	if (r1 == 0 && r2 == 0) {
7511 		goto out_ena;
7512 	}
7513 
7514 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
7515 
7516 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
7517 		int i;
7518 
7519 		/* Firmware has now configured the RFH. */
7520 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
7521 			iwx_update_rx_desc(sc, &sc->rxq, i);
7522 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
7523 	}
7524 
7525 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
7526 
7527 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
7528 		handled |= IWX_CSR_INT_BIT_RF_KILL;
7529 		iwx_check_rfkill(sc);
7530 		task_add(systq, &sc->init_task);
7531 		rv = 1;
7532 		goto out_ena;
7533 	}
7534 
7535 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
7536 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7537 		int i;
7538 
7539 		iwx_nic_error(sc);
7540 
7541 		/* Dump driver status (TX and RX rings) while we're here. */
7542 		printf("driver status:\n");
7543 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
7544 			struct iwx_tx_ring *ring = &sc->txq[i];
7545 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
7546 			    "queued=%-3d\n",
7547 			    i, ring->qid, ring->cur, ring->queued);
7548 		}
7549 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
7550 		printf("  802.11 state %s\n",
7551 		    ieee80211_state_name[sc->sc_ic.ic_state]);
7552 #endif
7553 
7554 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7555 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7556 			task_add(systq, &sc->init_task);
7557 		rv = 1;
7558 		goto out;
7559 
7560 	}
7561 
7562 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
7563 		handled |= IWX_CSR_INT_BIT_HW_ERR;
7564 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7565 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7566 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7567 			task_add(systq, &sc->init_task);
7568 		}
7569 		rv = 1;
7570 		goto out;
7571 	}
7572 
7573 	/* firmware chunk loaded */
7574 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
7575 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
7576 		handled |= IWX_CSR_INT_BIT_FH_TX;
7577 
7578 		sc->sc_fw_chunk_done = 1;
7579 		wakeup(&sc->sc_fw);
7580 	}
7581 
7582 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
7583 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
7584 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
7585 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
7586 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
7587 		}
7588 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
7589 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
7590 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
7591 		}
7592 
7593 		/* Disable periodic interrupt; we use it as just a one-shot. */
7594 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
7595 
7596 		/*
7597 		 * Enable periodic interrupt in 8 msec only if we received
7598 		 * real RX interrupt (instead of just periodic int), to catch
7599 		 * any dangling Rx interrupt.  If it was just the periodic
7600 		 * interrupt, there was no dangling Rx activity, and no need
7601 		 * to extend the periodic interrupt; one-shot is enough.
7602 		 */
7603 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
7604 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
7605 			    IWX_CSR_INT_PERIODIC_ENA);
7606 
7607 		iwx_notif_intr(sc);
7608 	}
7609 
7610 	rv = 1;
7611 
7612  out_ena:
7613 	iwx_restore_interrupts(sc);
7614  out:
7615 	return rv;
7616 }
7617 
7618 int
7619 iwx_intr_msix(void *arg)
7620 {
7621 	struct iwx_softc *sc = arg;
7622 	uint32_t inta_fh, inta_hw;
7623 	int vector = 0;
7624 
7625 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
7626 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
7627 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
7628 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
7629 	inta_fh &= sc->sc_fh_mask;
7630 	inta_hw &= sc->sc_hw_mask;
7631 
7632 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
7633 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
7634 		iwx_notif_intr(sc);
7635 	}
7636 
7637 	/* firmware chunk loaded */
7638 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
7639 		sc->sc_fw_chunk_done = 1;
7640 		wakeup(&sc->sc_fw);
7641 	}
7642 
7643 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
7644 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
7645 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
7646 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7647 		int i;
7648 
7649 		iwx_nic_error(sc);
7650 
7651 		/* Dump driver status (TX and RX rings) while we're here. */
7652 		printf("driver status:\n");
7653 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
7654 			struct iwx_tx_ring *ring = &sc->txq[i];
7655 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
7656 			    "queued=%-3d\n",
7657 			    i, ring->qid, ring->cur, ring->queued);
7658 		}
7659 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
7660 		printf("  802.11 state %s\n",
7661 		    ieee80211_state_name[sc->sc_ic.ic_state]);
7662 #endif
7663 
7664 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7665 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7666 			task_add(systq, &sc->init_task);
7667 		return 1;
7668 	}
7669 
7670 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
7671 		iwx_check_rfkill(sc);
7672 		task_add(systq, &sc->init_task);
7673 	}
7674 
7675 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
7676 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7677 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7678 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7679 			task_add(systq, &sc->init_task);
7680 		}
7681 		return 1;
7682 	}
7683 
7684 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
7685 		int i;
7686 
7687 		/* Firmware has now configured the RFH. */
7688 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
7689 			iwx_update_rx_desc(sc, &sc->rxq, i);
7690 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
7691 	}
7692 
7693 	/*
7694 	 * Before sending the interrupt the HW disables it to prevent
7695 	 * a nested interrupt. This is done by writing 1 to the corresponding
7696 	 * bit in the mask register. After handling the interrupt, it should be
7697 	 * re-enabled by clearing this bit. This register is defined as
7698 	 * write 1 clear (W1C) register, meaning that it's being clear
7699 	 * by writing 1 to the bit.
7700 	 */
7701 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
7702 	return 1;
7703 }
7704 
7705 typedef void *iwx_match_t;
7706 
7707 static const struct pci_matchid iwx_devices[] = {
7708 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
7709 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
7710 };
7711 
7712 static const struct pci_matchid iwx_subsystem_id_ax201[] = {
7713 	{ PCI_VENDOR_INTEL,	0x0070 },
7714 	{ PCI_VENDOR_INTEL,	0x0074 },
7715 	{ PCI_VENDOR_INTEL,	0x0078 },
7716 	{ PCI_VENDOR_INTEL,	0x007c },
7717 	{ PCI_VENDOR_INTEL,	0x0310 },
7718 	{ PCI_VENDOR_INTEL,	0x2074 },
7719 	{ PCI_VENDOR_INTEL,	0x4070 },
7720 	/* TODO: There are more ax201 devices with "main" product ID 0x06f0 */
7721 };
7722 
7723 int
7724 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
7725 {
7726 	struct pci_attach_args *pa = aux;
7727 	pcireg_t subid;
7728 	pci_vendor_id_t svid;
7729 	pci_product_id_t spid;
7730 	int i;
7731 
7732 	if (!pci_matchbyid(pa, iwx_devices, nitems(iwx_devices)))
7733 		return 0;
7734 
7735 	/*
7736 	 * Some PCI product IDs are shared among devices which use distinct
7737 	 * chips or firmware. We need to match the subsystem ID as well to
7738 	 * ensure that we have in fact found a supported device.
7739 	 */
7740 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
7741 	svid = PCI_VENDOR(subid);
7742 	spid = PCI_PRODUCT(subid);
7743 
7744 	switch (PCI_PRODUCT(pa->pa_id)) {
7745 	case PCI_PRODUCT_INTEL_WL_22500_1: /* AX200 */
7746 		return 1; /* match any device */
7747 	case PCI_PRODUCT_INTEL_WL_22500_2: /* AX201 */
7748 		for (i = 0; i < nitems(iwx_subsystem_id_ax201); i++) {
7749 			if (svid == iwx_subsystem_id_ax201[i].pm_vid &&
7750 			    spid == iwx_subsystem_id_ax201[i].pm_pid)
7751 				return 1;
7752 
7753 		}
7754 		break;
7755 	default:
7756 		break;
7757 	}
7758 
7759 	return 0;
7760 }
7761 
7762 int
7763 iwx_preinit(struct iwx_softc *sc)
7764 {
7765 	struct ieee80211com *ic = &sc->sc_ic;
7766 	struct ifnet *ifp = IC2IFP(ic);
7767 	int err;
7768 	static int attached;
7769 
7770 	err = iwx_prepare_card_hw(sc);
7771 	if (err) {
7772 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7773 		return err;
7774 	}
7775 
7776 	if (attached) {
7777 		/* Update MAC in case the upper layers changed it. */
7778 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
7779 		    ((struct arpcom *)ifp)->ac_enaddr);
7780 		return 0;
7781 	}
7782 
7783 	err = iwx_start_hw(sc);
7784 	if (err) {
7785 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7786 		return err;
7787 	}
7788 
7789 	err = iwx_run_init_mvm_ucode(sc, 1);
7790 	iwx_stop_device(sc);
7791 	if (err)
7792 		return err;
7793 
7794 	/* Print version info and MAC address on first successful fw load. */
7795 	attached = 1;
7796 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
7797 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
7798 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
7799 
7800 	if (sc->sc_nvm.sku_cap_11n_enable)
7801 		iwx_setup_ht_rates(sc);
7802 
7803 	/* not all hardware can do 5GHz band */
7804 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
7805 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
7806 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
7807 
7808 	/* Configure channel information obtained from firmware. */
7809 	ieee80211_channel_init(ifp);
7810 
7811 	/* Configure MAC address. */
7812 	err = if_setlladdr(ifp, ic->ic_myaddr);
7813 	if (err)
7814 		printf("%s: could not set MAC address (error %d)\n",
7815 		    DEVNAME(sc), err);
7816 
7817 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
7818 
7819 	return 0;
7820 }
7821 
7822 void
7823 iwx_attach_hook(struct device *self)
7824 {
7825 	struct iwx_softc *sc = (void *)self;
7826 
7827 	KASSERT(!cold);
7828 
7829 	iwx_preinit(sc);
7830 }
7831 
7832 void
7833 iwx_attach(struct device *parent, struct device *self, void *aux)
7834 {
7835 	struct iwx_softc *sc = (void *)self;
7836 	struct pci_attach_args *pa = aux;
7837 	pci_intr_handle_t ih;
7838 	pcireg_t reg, memtype;
7839 	struct ieee80211com *ic = &sc->sc_ic;
7840 	struct ifnet *ifp = &ic->ic_if;
7841 	const char *intrstr;
7842 	int err;
7843 	int txq_i, i;
7844 
7845 	sc->sc_pct = pa->pa_pc;
7846 	sc->sc_pcitag = pa->pa_tag;
7847 	sc->sc_dmat = pa->pa_dmat;
7848 
7849 	rw_init(&sc->ioctl_rwl, "iwxioctl");
7850 
7851 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7852 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7853 	if (err == 0) {
7854 		printf("%s: PCIe capability structure not found!\n",
7855 		    DEVNAME(sc));
7856 		return;
7857 	}
7858 
7859 	/* Clear device-specific "PCI retry timeout" register (41h). */
7860 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7861 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7862 
7863 	/* Enable bus-mastering and hardware bug workaround. */
7864 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7865 	reg |= PCI_COMMAND_MASTER_ENABLE;
7866 	/* if !MSI */
7867 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
7868 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
7869 	}
7870 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7871 
7872 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7873 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7874 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
7875 	if (err) {
7876 		printf("%s: can't map mem space\n", DEVNAME(sc));
7877 		return;
7878 	}
7879 
7880 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
7881 		sc->sc_msix = 1;
7882 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
7883 		printf("%s: can't map interrupt\n", DEVNAME(sc));
7884 		return;
7885 	}
7886 
7887 	intrstr = pci_intr_string(sc->sc_pct, ih);
7888 	if (sc->sc_msix)
7889 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
7890 		    iwx_intr_msix, sc, DEVNAME(sc));
7891 	else
7892 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
7893 		    iwx_intr, sc, DEVNAME(sc));
7894 
7895 	if (sc->sc_ih == NULL) {
7896 		printf("\n");
7897 		printf("%s: can't establish interrupt", DEVNAME(sc));
7898 		if (intrstr != NULL)
7899 			printf(" at %s", intrstr);
7900 		printf("\n");
7901 		return;
7902 	}
7903 	printf(", %s\n", intrstr);
7904 
7905 	/* Clear pending interrupts. */
7906 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
7907 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
7908 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
7909 
7910 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
7911 	switch (PCI_PRODUCT(pa->pa_id)) {
7912 	case PCI_PRODUCT_INTEL_WL_22500_1:
7913 		sc->sc_fwname = "iwx-cc-a0-48";
7914 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
7915 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
7916 		sc->sc_integrated = 1;
7917 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
7918 		sc->sc_low_latency_xtal = 0;
7919 		sc->sc_xtal_latency = 0;
7920 		sc->sc_tx_with_siso_diversity = 0;
7921 		sc->sc_uhb_supported = 0;
7922 		break;
7923 	case PCI_PRODUCT_INTEL_WL_22500_2:
7924 		sc->sc_fwname = "iwx-QuZ-a0-hr-b0-48";
7925 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
7926 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
7927 		sc->sc_integrated = 1;
7928 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
7929 		sc->sc_low_latency_xtal = 0;
7930 		sc->sc_xtal_latency = 5000;
7931 		sc->sc_tx_with_siso_diversity = 0;
7932 		sc->sc_uhb_supported = 0;
7933 		break;
7934 	default:
7935 		printf("%s: unknown adapter type\n", DEVNAME(sc));
7936 		return;
7937 	}
7938 
7939 	/*
7940 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7941 	 * changed, and now the revision step also includes bit 0-1 (no more
7942 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7943 	 * in the old format.
7944 	 */
7945 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7946 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7947 
7948 	if (iwx_prepare_card_hw(sc) != 0) {
7949 		printf("%s: could not initialize hardware\n",
7950 		    DEVNAME(sc));
7951 		return;
7952 	}
7953 
7954 	/*
7955 	 * In order to recognize C step the driver should read the
7956 	 * chip version id located at the AUX bus MISC address.
7957 	 */
7958 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
7959 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7960 	DELAY(2);
7961 
7962 	err = iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
7963 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7964 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7965 			   25000);
7966 	if (!err) {
7967 		printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
7968 		return;
7969 	}
7970 
7971 	if (iwx_nic_lock(sc)) {
7972 		uint32_t hw_step = iwx_read_prph(sc, IWX_WFPM_CTRL_REG);
7973 		hw_step |= IWX_ENABLE_WFPM;
7974 		iwx_write_prph(sc, IWX_WFPM_CTRL_REG, hw_step);
7975 		hw_step = iwx_read_prph(sc, IWX_AUX_MISC_REG);
7976 		hw_step = (hw_step >> IWX_HW_STEP_LOCATION_BITS) & 0xF;
7977 		if (hw_step == 0x3)
7978 			sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7979 					(IWX_SILICON_C_STEP << 2);
7980 		iwx_nic_unlock(sc);
7981 	} else {
7982 		printf("%s: Failed to lock the nic\n", DEVNAME(sc));
7983 		return;
7984 	}
7985 
7986 	/* Allocate DMA memory for loading firmware. */
7987 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
7988 	    sizeof(struct iwx_context_info), 0);
7989 	if (err) {
7990 		printf("%s: could not allocate memory for loading firmware\n",
7991 		    DEVNAME(sc));
7992 		return;
7993 	}
7994 
7995 	/*
7996 	 * Allocate DMA memory for firmware transfers.
7997 	 * Must be aligned on a 16-byte boundary.
7998 	 */
7999 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
8000 	    sc->sc_fwdmasegsz, 16);
8001 	if (err) {
8002 		printf("%s: could not allocate memory for firmware transfers\n",
8003 		    DEVNAME(sc));
8004 		goto fail0;
8005 	}
8006 
8007 	/* Allocate interrupt cause table (ICT).*/
8008 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
8009 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
8010 	if (err) {
8011 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
8012 		goto fail1;
8013 	}
8014 
8015 	/* TX scheduler rings must be aligned on a 1KB boundary. */
8016 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8017 	    nitems(sc->txq) * sizeof(struct iwx_agn_scd_bc_tbl), 1024);
8018 	if (err) {
8019 		printf("%s: could not allocate TX scheduler rings\n",
8020 		    DEVNAME(sc));
8021 		goto fail3;
8022 	}
8023 
8024 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
8025 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8026 		if (err) {
8027 			printf("%s: could not allocate TX ring %d\n",
8028 			    DEVNAME(sc), txq_i);
8029 			goto fail4;
8030 		}
8031 	}
8032 
8033 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
8034 	if (err) {
8035 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
8036 		goto fail4;
8037 	}
8038 
8039 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
8040 	if (sc->sc_nswq == NULL)
8041 		goto fail4;
8042 
8043 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8044 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8045 	ic->ic_state = IEEE80211_S_INIT;
8046 
8047 	/* Set device capabilities. */
8048 	ic->ic_caps =
8049 	    IEEE80211_C_WEP |		/* WEP */
8050 	    IEEE80211_C_RSN |		/* WPA/RSN */
8051 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8052 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8053 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
8054 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8055 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8056 
8057 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8058 	ic->ic_htcaps |=
8059 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
8060 	ic->ic_htxcaps = 0;
8061 	ic->ic_txbfcaps = 0;
8062 	ic->ic_aselcaps = 0;
8063 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8064 
8065 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
8066 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8067 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8068 
8069 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
8070 		sc->sc_phyctxt[i].id = i;
8071 	}
8072 
8073 	/* IBSS channel undefined for now. */
8074 	ic->ic_ibss_chan = &ic->ic_channels[1];
8075 
8076 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
8077 
8078 	ifp->if_softc = sc;
8079 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8080 	ifp->if_ioctl = iwx_ioctl;
8081 	ifp->if_start = iwx_start;
8082 	ifp->if_watchdog = iwx_watchdog;
8083 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8084 
8085 	if_attach(ifp);
8086 	ieee80211_ifattach(ifp);
8087 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
8088 
8089 #if NBPFILTER > 0
8090 	iwx_radiotap_attach(sc);
8091 #endif
8092 	task_set(&sc->init_task, iwx_init_task, sc);
8093 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
8094 	task_set(&sc->ba_task, iwx_ba_task, sc);
8095 	task_set(&sc->htprot_task, iwx_htprot_task, sc);
8096 
8097 	ic->ic_node_alloc = iwx_node_alloc;
8098 	ic->ic_bgscan_start = iwx_bgscan;
8099 	ic->ic_set_key = iwx_set_key;
8100 	ic->ic_delete_key = iwx_delete_key;
8101 
8102 	/* Override 802.11 state transition machine. */
8103 	sc->sc_newstate = ic->ic_newstate;
8104 	ic->ic_newstate = iwx_newstate;
8105 	ic->ic_update_htprot = iwx_update_htprot;
8106 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
8107 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
8108 #ifdef notyet
8109 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
8110 	ic->ic_ampdu_tx_stop = iwx_ampdu_tx_stop;
8111 #endif
8112 	/*
8113 	 * We cannot read the MAC address without loading the
8114 	 * firmware from disk. Postpone until mountroot is done.
8115 	 */
8116 	config_mountroot(self, iwx_attach_hook);
8117 
8118 	return;
8119 
8120 fail4:	while (--txq_i >= 0)
8121 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
8122 	iwx_free_rx_ring(sc, &sc->rxq);
8123 	iwx_dma_contig_free(&sc->sched_dma);
8124 fail3:	if (sc->ict_dma.vaddr != NULL)
8125 		iwx_dma_contig_free(&sc->ict_dma);
8126 
8127 fail1:	iwx_dma_contig_free(&sc->fw_dma);
8128 fail0:	iwx_dma_contig_free(&sc->ctxt_info_dma);
8129 	return;
8130 }
8131 
8132 #if NBPFILTER > 0
8133 void
8134 iwx_radiotap_attach(struct iwx_softc *sc)
8135 {
8136 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
8137 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
8138 
8139 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8140 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8141 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
8142 
8143 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8144 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8145 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
8146 }
8147 #endif
8148 
8149 void
8150 iwx_init_task(void *arg1)
8151 {
8152 	struct iwx_softc *sc = arg1;
8153 	struct ifnet *ifp = &sc->sc_ic.ic_if;
8154 	int s = splnet();
8155 	int generation = sc->sc_generation;
8156 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
8157 
8158 	rw_enter_write(&sc->ioctl_rwl);
8159 	if (generation != sc->sc_generation) {
8160 		rw_exit(&sc->ioctl_rwl);
8161 		splx(s);
8162 		return;
8163 	}
8164 
8165 	if (ifp->if_flags & IFF_RUNNING)
8166 		iwx_stop(ifp);
8167 	else
8168 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8169 
8170 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8171 		iwx_init(ifp);
8172 
8173 	rw_exit(&sc->ioctl_rwl);
8174 	splx(s);
8175 }
8176 
8177 int
8178 iwx_resume(struct iwx_softc *sc)
8179 {
8180 	pcireg_t reg;
8181 
8182 	/* Clear device-specific "PCI retry timeout" register (41h). */
8183 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8184 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8185 
8186 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
8187 	iwx_conf_msix_hw(sc, 0);
8188 
8189 	iwx_enable_rfkill_int(sc);
8190 	iwx_check_rfkill(sc);
8191 
8192 	return iwx_prepare_card_hw(sc);
8193 }
8194 
8195 int
8196 iwx_activate(struct device *self, int act)
8197 {
8198 	struct iwx_softc *sc = (struct iwx_softc *)self;
8199 	struct ifnet *ifp = &sc->sc_ic.ic_if;
8200 	int err = 0;
8201 
8202 	switch (act) {
8203 	case DVACT_QUIESCE:
8204 		if (ifp->if_flags & IFF_RUNNING) {
8205 			rw_enter_write(&sc->ioctl_rwl);
8206 			iwx_stop(ifp);
8207 			rw_exit(&sc->ioctl_rwl);
8208 		}
8209 		break;
8210 	case DVACT_RESUME:
8211 		err = iwx_resume(sc);
8212 		if (err)
8213 			printf("%s: could not initialize hardware\n",
8214 			    DEVNAME(sc));
8215 		break;
8216 	case DVACT_WAKEUP:
8217 		/* Hardware should be up at this point. */
8218 		if (iwx_set_hw_ready(sc))
8219 			task_add(systq, &sc->init_task);
8220 		break;
8221 	}
8222 
8223 	return 0;
8224 }
8225 
8226 struct cfdriver iwx_cd = {
8227 	NULL, "iwx", DV_IFNET
8228 };
8229 
8230 struct cfattach iwx_ca = {
8231 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
8232 	NULL, iwx_activate
8233 };
8234