xref: /openbsd-src/sys/dev/pci/if_iwx.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_iwx.c,v 1.7 2020/02/29 09:42:15 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_amrr.h>
132 #include <net80211/ieee80211_mira.h>
133 #include <net80211/ieee80211_radiotap.h>
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 #define IWX_NUM_2GHZ_CHANNELS	14
164 
165 const struct iwx_rate {
166 	uint16_t rate;
167 	uint8_t plcp;
168 	uint8_t ht_plcp;
169 } iwx_rates[] = {
170 		/* Legacy */		/* HT */
171 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
172 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
173 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
174 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
175 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
176 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
177 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
178 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
179 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
180 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
181 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
182 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
183 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
184 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
185 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
186 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
187 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
188 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
189 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
190 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
191 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
192 };
193 #define IWX_RIDX_CCK	0
194 #define IWX_RIDX_OFDM	4
195 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
196 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
197 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
198 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
199 
200 /* Convert an MCS index into an iwx_rates[] index. */
201 const int iwx_mcs2ridx[] = {
202 	IWX_RATE_MCS_0_INDEX,
203 	IWX_RATE_MCS_1_INDEX,
204 	IWX_RATE_MCS_2_INDEX,
205 	IWX_RATE_MCS_3_INDEX,
206 	IWX_RATE_MCS_4_INDEX,
207 	IWX_RATE_MCS_5_INDEX,
208 	IWX_RATE_MCS_6_INDEX,
209 	IWX_RATE_MCS_7_INDEX,
210 	IWX_RATE_MCS_8_INDEX,
211 	IWX_RATE_MCS_9_INDEX,
212 	IWX_RATE_MCS_10_INDEX,
213 	IWX_RATE_MCS_11_INDEX,
214 	IWX_RATE_MCS_12_INDEX,
215 	IWX_RATE_MCS_13_INDEX,
216 	IWX_RATE_MCS_14_INDEX,
217 	IWX_RATE_MCS_15_INDEX,
218 };
219 
220 struct iwx_nvm_section {
221 	uint16_t length;
222 	uint8_t *data;
223 };
224 
225 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
226 int	iwx_is_mimo_ht_plcp(uint8_t);
227 int	iwx_is_mimo_mcs(int);
228 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
229 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
230 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
231 int	iwx_apply_debug_destination(struct iwx_softc *);
232 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
233 void	iwx_ctxt_info_free(struct iwx_softc *);
234 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
235 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
236 	    struct iwx_context_info_dram *);
237 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
238 	    uint8_t *, size_t);
239 int	iwx_set_default_calib(struct iwx_softc *, const void *);
240 void	iwx_fw_info_free(struct iwx_fw_info *);
241 int	iwx_read_firmware(struct iwx_softc *);
242 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
243 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
244 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
245 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
246 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
247 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
248 int	iwx_nic_lock(struct iwx_softc *);
249 void	iwx_nic_assert_locked(struct iwx_softc *);
250 void	iwx_nic_unlock(struct iwx_softc *);
251 void	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
252 	    uint32_t);
253 void	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
254 void	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
255 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
256 	    bus_size_t);
257 void	iwx_dma_contig_free(struct iwx_dma_info *);
258 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
259 void	iwx_disable_rx_dma(struct iwx_softc *);
260 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
261 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
262 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
263 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
264 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
265 void	iwx_enable_rfkill_int(struct iwx_softc *);
266 int	iwx_check_rfkill(struct iwx_softc *);
267 void	iwx_enable_interrupts(struct iwx_softc *);
268 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
269 void	iwx_restore_interrupts(struct iwx_softc *);
270 void	iwx_disable_interrupts(struct iwx_softc *);
271 void	iwx_ict_reset(struct iwx_softc *);
272 int	iwx_set_hw_ready(struct iwx_softc *);
273 int	iwx_prepare_card_hw(struct iwx_softc *);
274 void	iwx_apm_config(struct iwx_softc *);
275 int	iwx_apm_init(struct iwx_softc *);
276 void	iwx_apm_stop(struct iwx_softc *);
277 int	iwx_allow_mcast(struct iwx_softc *);
278 void	iwx_init_msix_hw(struct iwx_softc *);
279 void	iwx_conf_msix_hw(struct iwx_softc *, int);
280 int	iwx_start_hw(struct iwx_softc *);
281 void	iwx_stop_device(struct iwx_softc *);
282 void	iwx_nic_config(struct iwx_softc *);
283 int	iwx_nic_rx_init(struct iwx_softc *);
284 int	iwx_nic_init(struct iwx_softc *);
285 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
286 void	iwx_post_alive(struct iwx_softc *);
287 void	iwx_protect_session(struct iwx_softc *, struct iwx_node *, uint32_t,
288 	    uint32_t);
289 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
290 int	iwx_nvm_read_chunk(struct iwx_softc *, uint16_t, uint16_t, uint16_t,
291 	    uint8_t *, uint16_t *);
292 int	iwx_nvm_read_section(struct iwx_softc *, uint16_t, uint8_t *,
293 	    uint16_t *, size_t);
294 void	iwx_init_channel_map(struct iwx_softc *, const uint16_t * const,
295 	    const uint8_t *nvm_channels, int nchan);
296 void	iwx_setup_ht_rates(struct iwx_softc *);
297 void	iwx_htprot_task(void *);
298 void	iwx_update_htprot(struct ieee80211com *, struct ieee80211_node *);
299 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
300 	    uint8_t);
301 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
302 	    uint8_t);
303 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
304 	    uint16_t, uint16_t, int);
305 #ifdef notyet
306 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
307 	    uint8_t);
308 void	iwx_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
309 	    uint8_t);
310 #endif
311 void	iwx_ba_task(void *);
312 
313 int	iwx_parse_nvm_data(struct iwx_softc *, const uint16_t *,
314 	    const uint16_t *, const uint16_t *,
315 	    const uint16_t *, const uint16_t *,
316 	    const uint16_t *, int);
317 void	iwx_set_hw_address_8000(struct iwx_softc *, struct iwx_nvm_data *,
318 	    const uint16_t *, const uint16_t *);
319 int	iwx_parse_nvm_sections(struct iwx_softc *, struct iwx_nvm_section *);
320 int	iwx_nvm_init(struct iwx_softc *);
321 int	iwx_load_firmware(struct iwx_softc *);
322 int	iwx_start_fw(struct iwx_softc *);
323 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
324 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
325 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
326 int	iwx_send_dqa_cmd(struct iwx_softc *);
327 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
328 int	iwx_config_ltr(struct iwx_softc *);
329 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
330 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
331 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
332 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
333 	    struct iwx_rx_data *);
334 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
335 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, int, int, uint32_t,
336 	    struct ieee80211_rxinfo *, struct mbuf_list *);
337 void	iwx_enable_ht_cck_fallback(struct iwx_softc *, struct iwx_node *);
338 void	iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
339 	    struct iwx_node *);
340 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
341 	    struct iwx_rx_data *);
342 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
343 	    struct iwx_rx_data *);
344 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
345 void	iwx_phy_ctxt_cmd_hdr(struct iwx_softc *, struct iwx_phy_ctxt *,
346 	    struct iwx_phy_context_cmd *, uint32_t, uint32_t);
347 void	iwx_phy_ctxt_cmd_data(struct iwx_softc *, struct iwx_phy_context_cmd *,
348 	    struct ieee80211_channel *, uint8_t, uint8_t);
349 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
350 	    uint8_t, uint32_t, uint32_t);
351 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
352 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
353 	    const void *);
354 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
355 	    uint32_t *);
356 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
357 	    const void *, uint32_t *);
358 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
359 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
360 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
361 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
362 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, uint16_t, uint16_t);
363 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *, int);
364 int	iwx_flush_tx_path(struct iwx_softc *);
365 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
366 	    struct iwx_beacon_filter_cmd *);
367 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
368 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
369 	    struct iwx_mac_power_cmd *);
370 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
371 int	iwx_power_update_device(struct iwx_softc *);
372 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
373 int	iwx_disable_beacon_filter(struct iwx_softc *);
374 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
375 int	iwx_add_aux_sta(struct iwx_softc *);
376 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
377 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
378 int	iwx_config_umac_scan(struct iwx_softc *);
379 int	iwx_umac_scan(struct iwx_softc *, int);
380 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
381 int	iwx_rval2ridx(int);
382 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
383 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
384 	    struct iwx_mac_ctx_cmd *, uint32_t);
385 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
386 	    struct iwx_mac_data_sta *, int);
387 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
388 int	iwx_clear_statistics(struct iwx_softc *);
389 int	iwx_update_quotas(struct iwx_softc *, struct iwx_node *, int);
390 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
391 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
392 int	iwx_scan(struct iwx_softc *);
393 int	iwx_bgscan(struct ieee80211com *);
394 int	iwx_umac_scan_abort(struct iwx_softc *);
395 int	iwx_scan_abort(struct iwx_softc *);
396 int	iwx_enable_data_tx_queues(struct iwx_softc *);
397 int	iwx_auth(struct iwx_softc *);
398 int	iwx_deauth(struct iwx_softc *);
399 int	iwx_assoc(struct iwx_softc *);
400 int	iwx_disassoc(struct iwx_softc *);
401 int	iwx_run(struct iwx_softc *);
402 int	iwx_run_stop(struct iwx_softc *);
403 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
404 void	iwx_calib_timeout(void *);
405 int	iwx_media_change(struct ifnet *);
406 void	iwx_newstate_task(void *);
407 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
408 void	iwx_endscan(struct iwx_softc *);
409 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
410 	    struct ieee80211_node *);
411 int	iwx_sf_config(struct iwx_softc *, int);
412 int	iwx_send_bt_init_conf(struct iwx_softc *);
413 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
414 int	iwx_init_hw(struct iwx_softc *);
415 int	iwx_init(struct ifnet *);
416 void	iwx_start(struct ifnet *);
417 void	iwx_stop(struct ifnet *);
418 void	iwx_watchdog(struct ifnet *);
419 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
420 const char *iwx_desc_lookup(uint32_t);
421 void	iwx_nic_error(struct iwx_softc *);
422 void	iwx_nic_umac_error(struct iwx_softc *);
423 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
424 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
425 	    struct mbuf_list *);
426 void	iwx_notif_intr(struct iwx_softc *);
427 int	iwx_intr(void *);
428 int	iwx_intr_msix(void *);
429 int	iwx_match(struct device *, void *, void *);
430 int	iwx_preinit(struct iwx_softc *);
431 void	iwx_attach_hook(struct device *);
432 void	iwx_attach(struct device *, struct device *, void *);
433 void	iwx_init_task(void *);
434 int	iwx_activate(struct device *, int);
435 int	iwx_resume(struct iwx_softc *);
436 
437 #if NBPFILTER > 0
438 void	iwx_radiotap_attach(struct iwx_softc *);
439 #endif
440 
441 #ifdef notyet
442 uint8_t
443 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
444 {
445 	const struct iwx_fw_cmd_version *entry;
446 	int i;
447 
448 	for (i = 0; i < sc->n_cmd_versions; i++) {
449 		entry = &sc->cmd_versions[i];
450 		if (entry->group == grp && entry->cmd == cmd)
451 			return entry->cmd_ver;
452 	}
453 
454 	return IWX_FW_CMD_VER_UNKNOWN;
455 }
456 #endif
457 
458 int
459 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
460 {
461 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
462 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
463 }
464 
465 int
466 iwx_is_mimo_mcs(int mcs)
467 {
468 	int ridx = iwx_mcs2ridx[mcs];
469 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
470 
471 }
472 
473 int
474 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
475 {
476 	struct iwx_fw_cscheme_list *l = (void *)data;
477 
478 	if (dlen < sizeof(*l) ||
479 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
480 		return EINVAL;
481 
482 	/* we don't actually store anything for now, always use s/w crypto */
483 
484 	return 0;
485 }
486 
487 int
488 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
489     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
490 {
491 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
492 	if (err) {
493 		printf("%s: could not allocate context info DMA memory\n",
494 		    DEVNAME(sc));
495 		return err;
496 	}
497 
498 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
499 
500 	return 0;
501 }
502 
503 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
504 {
505 	struct iwx_self_init_dram *dram = &sc->init_dram;
506 	int i;
507 
508 	if (!dram->paging)
509 		return;
510 
511 	/* free paging*/
512 	for (i = 0; i < dram->paging_cnt; i++)
513 		iwx_dma_contig_free(dram->paging);
514 
515 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
516 	dram->paging_cnt = 0;
517 	dram->paging = NULL;
518 }
519 
520 int
521 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
522 {
523 	int i = 0;
524 
525 	while (start < fws->fw_count &&
526 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
527 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
528 		start++;
529 		i++;
530 	}
531 
532 	return i;
533 }
534 
535 int
536 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
537     struct iwx_context_info_dram *ctxt_dram)
538 {
539 	struct iwx_self_init_dram *dram = &sc->init_dram;
540 	int i, ret, lmac_cnt, umac_cnt, paging_cnt;
541 
542 	KASSERT(dram->paging == NULL);
543 
544 	lmac_cnt = iwx_get_num_sections(fws, 0);
545 	/* add 1 due to separator */
546 	umac_cnt = iwx_get_num_sections(fws, lmac_cnt + 1);
547 	/* add 2 due to separators */
548 	paging_cnt = iwx_get_num_sections(fws, lmac_cnt + umac_cnt + 2);
549 
550 	dram->fw = mallocarray(umac_cnt + lmac_cnt, sizeof(*dram->fw),
551 	    M_DEVBUF,  M_ZERO | M_NOWAIT);
552 	if (!dram->fw)
553 		return ENOMEM;
554 	dram->paging = mallocarray(paging_cnt, sizeof(*dram->paging),
555 	    M_DEVBUF, M_ZERO | M_NOWAIT);
556 	if (!dram->paging)
557 		return ENOMEM;
558 
559 	/* initialize lmac sections */
560 	for (i = 0; i < lmac_cnt; i++) {
561 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
562 						   &dram->fw[dram->fw_cnt]);
563 		if (ret)
564 			return ret;
565 		ctxt_dram->lmac_img[i] =
566 			htole64(dram->fw[dram->fw_cnt].paddr);
567 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
568 		    (unsigned long long)dram->fw[dram->fw_cnt].paddr,
569 		    (unsigned long long)dram->fw[dram->fw_cnt].size));
570 		dram->fw_cnt++;
571 	}
572 
573 	/* initialize umac sections */
574 	for (i = 0; i < umac_cnt; i++) {
575 		/* access FW with +1 to make up for lmac separator */
576 		ret = iwx_ctxt_info_alloc_dma(sc,
577 		    &fws->fw_sect[dram->fw_cnt + 1], &dram->fw[dram->fw_cnt]);
578 		if (ret)
579 			return ret;
580 		ctxt_dram->umac_img[i] =
581 			htole64(dram->fw[dram->fw_cnt].paddr);
582 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
583 			(unsigned long long)dram->fw[dram->fw_cnt].paddr,
584 			(unsigned long long)dram->fw[dram->fw_cnt].size));
585 		dram->fw_cnt++;
586 	}
587 
588 	/*
589 	 * Initialize paging.
590 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
591 	 * stored separately.
592 	 * This is since the timing of its release is different -
593 	 * while fw memory can be released on alive, the paging memory can be
594 	 * freed only when the device goes down.
595 	 * Given that, the logic here in accessing the fw image is a bit
596 	 * different - fw_cnt isn't changing so loop counter is added to it.
597 	 */
598 	for (i = 0; i < paging_cnt; i++) {
599 		/* access FW with +2 to make up for lmac & umac separators */
600 		int fw_idx = dram->fw_cnt + i + 2;
601 
602 		ret = iwx_ctxt_info_alloc_dma(sc,
603 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
604 		if (ret)
605 			return ret;
606 
607 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
608 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
609 		    (unsigned long long)dram->paging[i].paddr,
610 		    (unsigned long long)dram->paging[i].size));
611 		dram->paging_cnt++;
612 	}
613 
614 	return 0;
615 }
616 
617 int
618 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
619     uint8_t min_power)
620 {
621 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
622 	uint32_t size = 0;
623 	uint8_t power;
624 	int err;
625 
626 	if (fw_mon->size)
627 		return 0;
628 
629 	for (power = max_power; power >= min_power; power--) {
630 		size = (1 << power);
631 
632 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
633 		if (err)
634 			continue;
635 
636 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
637 			 DEVNAME(sc), size));
638 		break;
639 	}
640 
641 	if (err) {
642 		fw_mon->size = 0;
643 		return err;
644 	}
645 
646 	if (power != max_power)
647 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
648 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
649 			(unsigned long)(1 << (max_power - 10))));
650 
651 	return 0;
652 }
653 
654 int
655 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
656 {
657 	if (!max_power) {
658 		/* default max_power is maximum */
659 		max_power = 26;
660 	} else {
661 		max_power += 11;
662 	}
663 
664 	if (max_power > 26) {
665 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
666 		     "check the FW TLV\n", DEVNAME(sc), max_power));
667 		return 0;
668 	}
669 
670 	if (sc->fw_mon.size)
671 		return 0;
672 
673 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
674 }
675 
676 int
677 iwx_apply_debug_destination(struct iwx_softc *sc)
678 {
679 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
680 	int i, err;
681 	uint8_t mon_mode, size_power, base_shift, end_shift;
682 	uint32_t base_reg, end_reg;
683 
684 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
685 	mon_mode = dest_v1->monitor_mode;
686 	size_power = dest_v1->size_power;
687 	base_reg = le32toh(dest_v1->base_reg);
688 	end_reg = le32toh(dest_v1->end_reg);
689 	base_shift = dest_v1->base_shift;
690 	end_shift = dest_v1->end_shift;
691 
692 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
693 
694 	if (mon_mode == EXTERNAL_MODE) {
695 		err = iwx_alloc_fw_monitor(sc, size_power);
696 		if (err)
697 			return err;
698 	}
699 
700 	if (!iwx_nic_lock(sc))
701 		return EBUSY;
702 
703 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
704 		uint32_t addr, val;
705 		uint8_t op;
706 
707 		addr = le32toh(dest_v1->reg_ops[i].addr);
708 		val = le32toh(dest_v1->reg_ops[i].val);
709 		op = dest_v1->reg_ops[i].op;
710 
711 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
712 		switch (op) {
713 		case CSR_ASSIGN:
714 			IWX_WRITE(sc, addr, val);
715 			break;
716 		case CSR_SETBIT:
717 			IWX_SETBITS(sc, addr, (1 << val));
718 			break;
719 		case CSR_CLEARBIT:
720 			IWX_CLRBITS(sc, addr, (1 << val));
721 			break;
722 		case PRPH_ASSIGN:
723 			iwx_write_prph(sc, addr, val);
724 			break;
725 		case PRPH_SETBIT:
726 			iwx_set_bits_prph(sc, addr, (1 << val));
727 			break;
728 		case PRPH_CLEARBIT:
729 			iwx_clear_bits_prph(sc, addr, (1 << val));
730 			break;
731 		case PRPH_BLOCKBIT:
732 			if (iwx_read_prph(sc, addr) & (1 << val))
733 				goto monitor;
734 			break;
735 		default:
736 			DPRINTF(("%s: FW debug - unknown OP %d\n",
737 			    DEVNAME(sc), op));
738 			break;
739 		}
740 	}
741 
742 monitor:
743 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
744 		iwx_write_prph(sc, le32toh(base_reg),
745 		    sc->fw_mon.paddr >> base_shift);
746 		iwx_write_prph(sc, end_reg,
747 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
748 		    >> end_shift);
749 	}
750 
751 	iwx_nic_unlock(sc);
752 	return 0;
753 }
754 
755 int
756 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
757 {
758 	struct iwx_context_info *ctxt_info;
759 	struct iwx_context_info_rbd_cfg *rx_cfg;
760 	uint32_t control_flags = 0, rb_size;
761 	int err;
762 
763 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
764 	    sizeof(*ctxt_info), 0);
765 	if (err) {
766 		printf("%s: could not allocate context info DMA memory\n",
767 		    DEVNAME(sc));
768 		return err;
769 	}
770 	ctxt_info = sc->ctxt_info_dma.vaddr;
771 
772 	ctxt_info->version.version = 0;
773 	ctxt_info->version.mac_id =
774 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
775 	/* size is in DWs */
776 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
777 
778 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
779 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
780 	else
781 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
782 
783 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
784 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
785 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
786 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
787 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
788 	ctxt_info->control.control_flags = htole32(control_flags);
789 
790 	/* initialize RX default queue */
791 	rx_cfg = &ctxt_info->rbd_cfg;
792 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
793 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
794 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
795 
796 	/* initialize TX command queue */
797 	ctxt_info->hcmd_cfg.cmd_queue_addr =
798 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
799 	ctxt_info->hcmd_cfg.cmd_queue_size =
800 		IWX_TFD_QUEUE_CB_SIZE(IWX_CMD_QUEUE_SIZE);
801 
802 	/* allocate ucode sections in dram and set addresses */
803 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
804 	if (err) {
805 		iwx_ctxt_info_free(sc);
806 		return err;
807 	}
808 
809 	/* Configure debug, if exists */
810 	if (sc->sc_fw.dbg_dest_tlv_v1) {
811 		err = iwx_apply_debug_destination(sc);
812 		if (err)
813 			return err;
814 	}
815 
816 	/* kick FW self load */
817 	IWX_WRITE_8(sc, IWX_CSR_CTXT_INFO_BA, sc->ctxt_info_dma.paddr);
818 	if (!iwx_nic_lock(sc))
819 		return EBUSY;
820 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
821 	iwx_nic_unlock(sc);
822 
823 	/* Context info will be released upon alive or failure to get one */
824 
825 	return 0;
826 }
827 
828 void
829 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
830 {
831 	struct iwx_self_init_dram *dram = &sc->init_dram;
832 	int i;
833 
834 	if (!dram->fw) {
835 		KASSERT(dram->fw_cnt == 0);
836 		return;
837 	}
838 
839 	for (i = 0; i < dram->fw_cnt; i++)
840 		iwx_dma_contig_free(&dram->fw[i]);
841 
842 	free(dram->fw, M_DEVBUF, dram->fw_cnt * sizeof(dram->fw[0]));
843 	dram->fw_cnt = 0;
844 	dram->fw = NULL;
845 }
846 
847 void
848 iwx_ctxt_info_free(struct iwx_softc *sc)
849 {
850 	iwx_dma_contig_free(&sc->ctxt_info_dma);
851 	iwx_ctxt_info_free_fw_img(sc);
852 }
853 
854 int
855 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
856     uint8_t *data, size_t dlen)
857 {
858 	struct iwx_fw_sects *fws;
859 	struct iwx_fw_onesect *fwone;
860 
861 	if (type >= IWX_UCODE_TYPE_MAX)
862 		return EINVAL;
863 	if (dlen < sizeof(uint32_t))
864 		return EINVAL;
865 
866 	fws = &sc->sc_fw.fw_sects[type];
867 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
868 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
869 		return EINVAL;
870 
871 	fwone = &fws->fw_sect[fws->fw_count];
872 
873 	/* first 32bit are device load offset */
874 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
875 
876 	/* rest is data */
877 	fwone->fws_data = data + sizeof(uint32_t);
878 	fwone->fws_len = dlen - sizeof(uint32_t);
879 
880 	fws->fw_count++;
881 	fws->fw_totlen += fwone->fws_len;
882 
883 	return 0;
884 }
885 
886 #define IWX_DEFAULT_SCAN_CHANNELS	40
887 /* Newer firmware might support more channels. Raise this value if needed. */
888 #define IWX_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
889 
890 struct iwx_tlv_calib_data {
891 	uint32_t ucode_type;
892 	struct iwx_tlv_calib_ctrl calib;
893 } __packed;
894 
895 int
896 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
897 {
898 	const struct iwx_tlv_calib_data *def_calib = data;
899 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
900 
901 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
902 		return EINVAL;
903 
904 	sc->sc_default_calib[ucode_type].flow_trigger =
905 	    def_calib->calib.flow_trigger;
906 	sc->sc_default_calib[ucode_type].event_trigger =
907 	    def_calib->calib.event_trigger;
908 
909 	return 0;
910 }
911 
912 void
913 iwx_fw_info_free(struct iwx_fw_info *fw)
914 {
915 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
916 	fw->fw_rawdata = NULL;
917 	fw->fw_rawsize = 0;
918 	/* don't touch fw->fw_status */
919 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
920 }
921 
922 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
923 
924 int
925 iwx_read_firmware(struct iwx_softc *sc)
926 {
927 	struct iwx_fw_info *fw = &sc->sc_fw;
928 	struct iwx_tlv_ucode_header *uhdr;
929 	struct iwx_ucode_tlv tlv;
930 	uint32_t tlv_type;
931 	uint8_t *data;
932 	int err;
933 	size_t len;
934 
935 	if (fw->fw_status == IWX_FW_STATUS_DONE)
936 		return 0;
937 
938 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
939 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
940 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
941 
942 	if (fw->fw_rawdata != NULL)
943 		iwx_fw_info_free(fw);
944 
945 	err = loadfirmware(sc->sc_fwname,
946 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
947 	if (err) {
948 		printf("%s: could not read firmware %s (error %d)\n",
949 		    DEVNAME(sc), sc->sc_fwname, err);
950 		goto out;
951 	}
952 
953 	sc->sc_capaflags = 0;
954 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
955 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
956 
957 	uhdr = (void *)fw->fw_rawdata;
958 	if (*(uint32_t *)fw->fw_rawdata != 0
959 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
960 		printf("%s: invalid firmware %s\n",
961 		    DEVNAME(sc), sc->sc_fwname);
962 		err = EINVAL;
963 		goto out;
964 	}
965 
966 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
967 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
968 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
969 	    IWX_UCODE_API(le32toh(uhdr->ver)));
970 	data = uhdr->data;
971 	len = fw->fw_rawsize - sizeof(*uhdr);
972 
973 	while (len >= sizeof(tlv)) {
974 		size_t tlv_len;
975 		void *tlv_data;
976 
977 		memcpy(&tlv, data, sizeof(tlv));
978 		tlv_len = le32toh(tlv.length);
979 		tlv_type = le32toh(tlv.type);
980 
981 		len -= sizeof(tlv);
982 		data += sizeof(tlv);
983 		tlv_data = data;
984 
985 		if (len < tlv_len) {
986 			printf("%s: firmware too short: %zu bytes\n",
987 			    DEVNAME(sc), len);
988 			err = EINVAL;
989 			goto parse_out;
990 		}
991 
992 		switch (tlv_type) {
993 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
994 			if (tlv_len < sizeof(uint32_t)) {
995 				err = EINVAL;
996 				goto parse_out;
997 			}
998 			sc->sc_capa_max_probe_len
999 			    = le32toh(*(uint32_t *)tlv_data);
1000 			if (sc->sc_capa_max_probe_len >
1001 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1002 				err = EINVAL;
1003 				goto parse_out;
1004 			}
1005 			break;
1006 		case IWX_UCODE_TLV_PAN:
1007 			if (tlv_len) {
1008 				err = EINVAL;
1009 				goto parse_out;
1010 			}
1011 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1012 			break;
1013 		case IWX_UCODE_TLV_FLAGS:
1014 			if (tlv_len < sizeof(uint32_t)) {
1015 				err = EINVAL;
1016 				goto parse_out;
1017 			}
1018 			/*
1019 			 * Apparently there can be many flags, but Linux driver
1020 			 * parses only the first one, and so do we.
1021 			 *
1022 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1023 			 * Intentional or a bug?  Observations from
1024 			 * current firmware file:
1025 			 *  1) TLV_PAN is parsed first
1026 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1027 			 * ==> this resets TLV_PAN to itself... hnnnk
1028 			 */
1029 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1030 			break;
1031 		case IWX_UCODE_TLV_CSCHEME:
1032 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1033 			if (err)
1034 				goto parse_out;
1035 			break;
1036 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1037 			uint32_t num_cpu;
1038 			if (tlv_len != sizeof(uint32_t)) {
1039 				err = EINVAL;
1040 				goto parse_out;
1041 			}
1042 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1043 			if (num_cpu < 1 || num_cpu > 2) {
1044 				err = EINVAL;
1045 				goto parse_out;
1046 			}
1047 			break;
1048 		}
1049 		case IWX_UCODE_TLV_SEC_RT:
1050 			err = iwx_firmware_store_section(sc,
1051 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1052 			if (err)
1053 				goto parse_out;
1054 			break;
1055 		case IWX_UCODE_TLV_SEC_INIT:
1056 			err = iwx_firmware_store_section(sc,
1057 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1058 			if (err)
1059 				goto parse_out;
1060 			break;
1061 		case IWX_UCODE_TLV_SEC_WOWLAN:
1062 			err = iwx_firmware_store_section(sc,
1063 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1064 			if (err)
1065 				goto parse_out;
1066 			break;
1067 		case IWX_UCODE_TLV_DEF_CALIB:
1068 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1069 				err = EINVAL;
1070 				goto parse_out;
1071 			}
1072 			err = iwx_set_default_calib(sc, tlv_data);
1073 			if (err)
1074 				goto parse_out;
1075 			break;
1076 		case IWX_UCODE_TLV_PHY_SKU:
1077 			if (tlv_len != sizeof(uint32_t)) {
1078 				err = EINVAL;
1079 				goto parse_out;
1080 			}
1081 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1082 			break;
1083 
1084 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1085 			struct iwx_ucode_api *api;
1086 			int idx, i;
1087 			if (tlv_len != sizeof(*api)) {
1088 				err = EINVAL;
1089 				goto parse_out;
1090 			}
1091 			api = (struct iwx_ucode_api *)tlv_data;
1092 			idx = le32toh(api->api_index);
1093 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1094 				err = EINVAL;
1095 				goto parse_out;
1096 			}
1097 			for (i = 0; i < 32; i++) {
1098 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1099 					continue;
1100 				setbit(sc->sc_ucode_api, i + (32 * idx));
1101 			}
1102 			break;
1103 		}
1104 
1105 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1106 			struct iwx_ucode_capa *capa;
1107 			int idx, i;
1108 			if (tlv_len != sizeof(*capa)) {
1109 				err = EINVAL;
1110 				goto parse_out;
1111 			}
1112 			capa = (struct iwx_ucode_capa *)tlv_data;
1113 			idx = le32toh(capa->api_index);
1114 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1115 				goto parse_out;
1116 			}
1117 			for (i = 0; i < 32; i++) {
1118 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1119 					continue;
1120 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1121 			}
1122 			break;
1123 		}
1124 
1125 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1126 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1127 			/* ignore, not used by current driver */
1128 			break;
1129 
1130 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1131 			err = iwx_firmware_store_section(sc,
1132 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1133 			    tlv_len);
1134 			if (err)
1135 				goto parse_out;
1136 			break;
1137 
1138 		case IWX_UCODE_TLV_PAGING:
1139 			if (tlv_len != sizeof(uint32_t)) {
1140 				err = EINVAL;
1141 				goto parse_out;
1142 			}
1143 			break;
1144 
1145 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1146 			if (tlv_len != sizeof(uint32_t)) {
1147 				err = EINVAL;
1148 				goto parse_out;
1149 			}
1150 			sc->sc_capa_n_scan_channels =
1151 			  le32toh(*(uint32_t *)tlv_data);
1152 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1153 				err = ERANGE;
1154 				goto parse_out;
1155 			}
1156 			break;
1157 
1158 		case IWX_UCODE_TLV_FW_VERSION:
1159 			if (tlv_len != sizeof(uint32_t) * 3) {
1160 				err = EINVAL;
1161 				goto parse_out;
1162 			}
1163 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
1164 			    "%u.%u.%u",
1165 			    le32toh(((uint32_t *)tlv_data)[0]),
1166 			    le32toh(((uint32_t *)tlv_data)[1]),
1167 			    le32toh(((uint32_t *)tlv_data)[2]));
1168 			break;
1169 
1170 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1171 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1172 
1173 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1174 			if (*fw->dbg_dest_ver != 0) {
1175 				err = EINVAL;
1176 				goto parse_out;
1177 			}
1178 
1179 			if (fw->dbg_dest_tlv_init)
1180 				break;
1181 			fw->dbg_dest_tlv_init = true;
1182 
1183 			dest_v1 = (void *)tlv_data;
1184 			fw->dbg_dest_tlv_v1 = dest_v1;
1185 			fw->n_dest_reg = tlv_len -
1186 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1187 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1188 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1189 			break;
1190 		}
1191 
1192 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1193 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1194 
1195 			if (!fw->dbg_dest_tlv_init ||
1196 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1197 			    fw->dbg_conf_tlv[conf->id] != NULL)
1198 				break;
1199 
1200 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1201 			fw->dbg_conf_tlv[conf->id] = conf;
1202 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1203 			break;
1204 		}
1205 
1206 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1207 			struct iwx_umac_debug_addrs *dbg_ptrs =
1208 				(void *)tlv_data;
1209 
1210 			if (tlv_len != sizeof(*dbg_ptrs)) {
1211 				err = EINVAL;
1212 				goto parse_out;
1213 			}
1214 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1215 				break;
1216 			sc->sc_uc.uc_umac_error_event_table =
1217 				le32toh(dbg_ptrs->error_info_addr) &
1218 				~IWX_FW_ADDR_CACHE_CONTROL;
1219 			sc->sc_uc.error_event_table_tlv_status |=
1220 				IWX_ERROR_EVENT_TABLE_UMAC;
1221 			break;
1222 		}
1223 
1224 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1225 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1226 				(void *)tlv_data;
1227 
1228 			if (tlv_len != sizeof(*dbg_ptrs)) {
1229 				err = EINVAL;
1230 				goto parse_out;
1231 			}
1232 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1233 				break;
1234 			sc->sc_uc.uc_lmac_error_event_table[0] =
1235 				le32toh(dbg_ptrs->error_event_table_ptr) &
1236 				~IWX_FW_ADDR_CACHE_CONTROL;
1237 			sc->sc_uc.error_event_table_tlv_status |=
1238 				IWX_ERROR_EVENT_TABLE_LMAC1;
1239 			break;
1240 		}
1241 
1242 		case IWX_UCODE_TLV_FW_MEM_SEG:
1243 			break;
1244 
1245 		case IWX_UCODE_TLV_CMD_VERSIONS:
1246 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1247 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1248 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1249 			}
1250 			if (sc->n_cmd_versions != 0) {
1251 				err = EINVAL;
1252 				goto parse_out;
1253 			}
1254 			if (tlv_len > sizeof(sc->cmd_versions)) {
1255 				err = EINVAL;
1256 				goto parse_out;
1257 			}
1258 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1259 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1260 			break;
1261 
1262 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1263 			break;
1264 
1265 		/* undocumented TLVs found in ax200-cc-a0-46 image */
1266 		case 58:
1267 		case 0x1000003:
1268 		case 0x1000004:
1269 			break;
1270 
1271 		default:
1272 			err = EINVAL;
1273 			goto parse_out;
1274 		}
1275 
1276 		len -= roundup(tlv_len, 4);
1277 		data += roundup(tlv_len, 4);
1278 	}
1279 
1280 	KASSERT(err == 0);
1281 
1282  parse_out:
1283 	if (err) {
1284 		printf("%s: firmware parse error %d, "
1285 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1286 	}
1287 
1288  out:
1289 	if (err) {
1290 		fw->fw_status = IWX_FW_STATUS_NONE;
1291 		if (fw->fw_rawdata != NULL)
1292 			iwx_fw_info_free(fw);
1293 	} else
1294 		fw->fw_status = IWX_FW_STATUS_DONE;
1295 	wakeup(&sc->sc_fw);
1296 
1297 	return err;
1298 }
1299 
1300 uint32_t
1301 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1302 {
1303 	iwx_nic_assert_locked(sc);
1304 	IWX_WRITE(sc,
1305 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1306 	IWX_BARRIER_READ_WRITE(sc);
1307 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1308 }
1309 
1310 void
1311 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1312 {
1313 	iwx_nic_assert_locked(sc);
1314 	IWX_WRITE(sc,
1315 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1316 	IWX_BARRIER_WRITE(sc);
1317 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1318 }
1319 
1320 void
1321 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1322 {
1323 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1324 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1325 }
1326 
1327 int
1328 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1329 {
1330 	int offs, err = 0;
1331 	uint32_t *vals = buf;
1332 
1333 	if (iwx_nic_lock(sc)) {
1334 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1335 		for (offs = 0; offs < dwords; offs++)
1336 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1337 		iwx_nic_unlock(sc);
1338 	} else {
1339 		err = EBUSY;
1340 	}
1341 	return err;
1342 }
1343 
1344 int
1345 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1346 {
1347 	int offs;
1348 	const uint32_t *vals = buf;
1349 
1350 	if (iwx_nic_lock(sc)) {
1351 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1352 		/* WADDR auto-increments */
1353 		for (offs = 0; offs < dwords; offs++) {
1354 			uint32_t val = vals ? vals[offs] : 0;
1355 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1356 		}
1357 		iwx_nic_unlock(sc);
1358 	} else {
1359 		return EBUSY;
1360 	}
1361 	return 0;
1362 }
1363 
1364 int
1365 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1366 {
1367 	return iwx_write_mem(sc, addr, &val, 1);
1368 }
1369 
1370 int
1371 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1372     int timo)
1373 {
1374 	for (;;) {
1375 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1376 			return 1;
1377 		}
1378 		if (timo < 10) {
1379 			return 0;
1380 		}
1381 		timo -= 10;
1382 		DELAY(10);
1383 	}
1384 }
1385 
1386 int
1387 iwx_nic_lock(struct iwx_softc *sc)
1388 {
1389 	if (sc->sc_nic_locks > 0) {
1390 		iwx_nic_assert_locked(sc);
1391 		sc->sc_nic_locks++;
1392 		return 1; /* already locked */
1393 	}
1394 
1395 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1396 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1397 
1398 	DELAY(2);
1399 
1400 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1401 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1402 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1403 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1404 		sc->sc_nic_locks++;
1405 		return 1;
1406 	}
1407 
1408 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1409 	return 0;
1410 }
1411 
1412 void
1413 iwx_nic_assert_locked(struct iwx_softc *sc)
1414 {
1415 	uint32_t reg = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1416 	if ((reg & IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1417 		panic("%s: mac clock not ready", DEVNAME(sc));
1418 	if (reg & IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1419 		panic("%s: mac gone to sleep", DEVNAME(sc));
1420 	if (sc->sc_nic_locks <= 0)
1421 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1422 }
1423 
1424 void
1425 iwx_nic_unlock(struct iwx_softc *sc)
1426 {
1427 	if (sc->sc_nic_locks > 0) {
1428 		if (--sc->sc_nic_locks == 0)
1429 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1430 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1431 	} else
1432 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1433 }
1434 
1435 void
1436 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1437     uint32_t mask)
1438 {
1439 	uint32_t val;
1440 
1441 	/* XXX: no error path? */
1442 	if (iwx_nic_lock(sc)) {
1443 		val = iwx_read_prph(sc, reg) & mask;
1444 		val |= bits;
1445 		iwx_write_prph(sc, reg, val);
1446 		iwx_nic_unlock(sc);
1447 	}
1448 }
1449 
1450 void
1451 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1452 {
1453 	iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1454 }
1455 
1456 void
1457 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1458 {
1459 	iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1460 }
1461 
1462 int
1463 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1464     bus_size_t size, bus_size_t alignment)
1465 {
1466 	int nsegs, err;
1467 	caddr_t va;
1468 
1469 	dma->tag = tag;
1470 	dma->size = size;
1471 
1472 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1473 	    &dma->map);
1474 	if (err)
1475 		goto fail;
1476 
1477 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1478 	    BUS_DMA_NOWAIT);
1479 	if (err)
1480 		goto fail;
1481 
1482 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1483 	    BUS_DMA_NOWAIT);
1484 	if (err)
1485 		goto fail;
1486 	dma->vaddr = va;
1487 
1488 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1489 	    BUS_DMA_NOWAIT);
1490 	if (err)
1491 		goto fail;
1492 
1493 	memset(dma->vaddr, 0, size);
1494 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1495 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1496 
1497 	return 0;
1498 
1499 fail:	iwx_dma_contig_free(dma);
1500 	return err;
1501 }
1502 
1503 void
1504 iwx_dma_contig_free(struct iwx_dma_info *dma)
1505 {
1506 	if (dma->map != NULL) {
1507 		if (dma->vaddr != NULL) {
1508 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1509 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1510 			bus_dmamap_unload(dma->tag, dma->map);
1511 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1512 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1513 			dma->vaddr = NULL;
1514 		}
1515 		bus_dmamap_destroy(dma->tag, dma->map);
1516 		dma->map = NULL;
1517 	}
1518 }
1519 
1520 int
1521 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1522 {
1523 	bus_size_t size;
1524 	int i, err;
1525 
1526 	ring->cur = 0;
1527 
1528 	/* Allocate RX descriptors (256-byte aligned). */
1529 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1530 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1531 	if (err) {
1532 		printf("%s: could not allocate RX ring DMA memory\n",
1533 		    DEVNAME(sc));
1534 		goto fail;
1535 	}
1536 	ring->desc = ring->free_desc_dma.vaddr;
1537 
1538 	/* Allocate RX status area (16-byte aligned). */
1539 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1540 	    sizeof(*ring->stat), 16);
1541 	if (err) {
1542 		printf("%s: could not allocate RX status DMA memory\n",
1543 		    DEVNAME(sc));
1544 		goto fail;
1545 	}
1546 	ring->stat = ring->stat_dma.vaddr;
1547 
1548 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1549 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1550 	    size, 256);
1551 	if (err) {
1552 		printf("%s: could not allocate RX ring DMA memory\n",
1553 		    DEVNAME(sc));
1554 		goto fail;
1555 	}
1556 
1557 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1558 		struct iwx_rx_data *data = &ring->data[i];
1559 
1560 		memset(data, 0, sizeof(*data));
1561 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1562 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1563 		    &data->map);
1564 		if (err) {
1565 			printf("%s: could not create RX buf DMA map\n",
1566 			    DEVNAME(sc));
1567 			goto fail;
1568 		}
1569 
1570 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1571 		if (err)
1572 			goto fail;
1573 	}
1574 	return 0;
1575 
1576 fail:	iwx_free_rx_ring(sc, ring);
1577 	return err;
1578 }
1579 
1580 void
1581 iwx_disable_rx_dma(struct iwx_softc *sc)
1582 {
1583 	int ntries;
1584 
1585 	if (iwx_nic_lock(sc)) {
1586 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1587 		for (ntries = 0; ntries < 1000; ntries++) {
1588 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1589 			    IWX_RXF_DMA_IDLE)
1590 				break;
1591 			DELAY(10);
1592 		}
1593 		iwx_nic_unlock(sc);
1594 	}
1595 }
1596 
1597 void
1598 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1599 {
1600 	ring->cur = 0;
1601 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1602 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1603 	memset(ring->stat, 0, sizeof(*ring->stat));
1604 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1605 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1606 
1607 }
1608 
1609 void
1610 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1611 {
1612 	int i;
1613 
1614 	iwx_dma_contig_free(&ring->free_desc_dma);
1615 	iwx_dma_contig_free(&ring->stat_dma);
1616 	iwx_dma_contig_free(&ring->used_desc_dma);
1617 
1618 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1619 		struct iwx_rx_data *data = &ring->data[i];
1620 
1621 		if (data->m != NULL) {
1622 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1623 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1624 			bus_dmamap_unload(sc->sc_dmat, data->map);
1625 			m_freem(data->m);
1626 			data->m = NULL;
1627 		}
1628 		if (data->map != NULL)
1629 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1630 	}
1631 }
1632 
1633 int
1634 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1635 {
1636 	bus_addr_t paddr;
1637 	bus_size_t size;
1638 	int i, err, qlen;
1639 
1640 	ring->qid = qid;
1641 	ring->queued = 0;
1642 	ring->cur = 0;
1643 	ring->tail = 0;
1644 
1645 	if (qid == IWX_DQA_CMD_QUEUE)
1646 		qlen = IWX_CMD_QUEUE_SIZE;
1647 	else
1648 		qlen = IWX_TX_RING_COUNT;
1649 
1650 	/* Allocate TX descriptors (256-byte aligned). */
1651 	size = qlen * sizeof (struct iwx_tfh_tfd);
1652 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1653 	if (err) {
1654 		printf("%s: could not allocate TX ring DMA memory\n",
1655 		    DEVNAME(sc));
1656 		goto fail;
1657 	}
1658 	ring->desc = ring->desc_dma.vaddr;
1659 
1660 	/*
1661 	 * There is no need to allocate DMA buffers for unused rings.
1662 	 * The hardware supports up to 31 Tx rings which is more
1663 	 * than we currently need.
1664 	 *
1665 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1666 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1667 	 * are sc->tqx[IWX_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1668 	 * in order to provide one queue per EDCA category.
1669 	 *
1670 	 * Tx aggregation will require additional queues (one queue per TID
1671 	 * for which aggregation is enabled) but we do not implement this yet.
1672 	 */
1673 	if (qid > IWX_DQA_MAX_MGMT_QUEUE)
1674 		return 0;
1675 
1676 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1677 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1678 	if (err) {
1679 		printf("%s: could not allocate byte count table DMA memory\n",
1680 		    DEVNAME(sc));
1681 		goto fail;
1682 	}
1683 
1684 	size = qlen * sizeof(struct iwx_device_cmd);
1685 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1686 	    IWX_FIRST_TB_SIZE_ALIGN);
1687 	if (err) {
1688 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1689 		goto fail;
1690 	}
1691 	ring->cmd = ring->cmd_dma.vaddr;
1692 
1693 	paddr = ring->cmd_dma.paddr;
1694 	for (i = 0; i < qlen; i++) {
1695 		struct iwx_tx_data *data = &ring->data[i];
1696 		size_t mapsize;
1697 
1698 		data->cmd_paddr = paddr;
1699 		paddr += sizeof(struct iwx_device_cmd);
1700 
1701 		/* FW commands may require more mapped space than packets. */
1702 		if (qid == IWX_DQA_CMD_QUEUE)
1703 			mapsize = (sizeof(struct iwx_cmd_header) +
1704 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1705 		else
1706 			mapsize = MCLBYTES;
1707 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1708 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1709 		    &data->map);
1710 		if (err) {
1711 			printf("%s: could not create TX buf DMA map\n",
1712 			    DEVNAME(sc));
1713 			goto fail;
1714 		}
1715 	}
1716 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1717 	return 0;
1718 
1719 fail:	iwx_free_tx_ring(sc, ring);
1720 	return err;
1721 }
1722 
1723 void
1724 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1725 {
1726 	int i, qlen;
1727 
1728 	if (ring->qid == IWX_DQA_CMD_QUEUE)
1729 		qlen = IWX_CMD_QUEUE_SIZE;
1730 	else
1731 		qlen = IWX_TX_RING_COUNT;
1732 
1733 	for (i = 0; i < qlen; i++) {
1734 		struct iwx_tx_data *data = &ring->data[i];
1735 
1736 		if (data->m != NULL) {
1737 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1738 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1739 			bus_dmamap_unload(sc->sc_dmat, data->map);
1740 			m_freem(data->m);
1741 			data->m = NULL;
1742 		}
1743 	}
1744 	/* Clear TX descriptors. */
1745 	memset(ring->desc, 0, ring->desc_dma.size);
1746 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1747 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1748 	sc->qfullmsk &= ~(1 << ring->qid);
1749 	ring->queued = 0;
1750 	ring->cur = 0;
1751 	ring->tail = 0;
1752 }
1753 
1754 void
1755 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1756 {
1757 	int i, qlen;
1758 
1759 	iwx_dma_contig_free(&ring->desc_dma);
1760 	iwx_dma_contig_free(&ring->cmd_dma);
1761 	iwx_dma_contig_free(&ring->bc_tbl);
1762 
1763 	if (ring->qid == IWX_DQA_CMD_QUEUE)
1764 		qlen = IWX_CMD_QUEUE_SIZE;
1765 	else
1766 		qlen = IWX_TX_RING_COUNT;
1767 
1768 	for (i = 0; i < qlen; i++) {
1769 		struct iwx_tx_data *data = &ring->data[i];
1770 
1771 		if (data->m != NULL) {
1772 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1773 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1774 			bus_dmamap_unload(sc->sc_dmat, data->map);
1775 			m_freem(data->m);
1776 			data->m = NULL;
1777 		}
1778 		if (data->map != NULL)
1779 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1780 	}
1781 }
1782 
1783 void
1784 iwx_enable_rfkill_int(struct iwx_softc *sc)
1785 {
1786 	if (!sc->sc_msix) {
1787 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1788 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1789 	} else {
1790 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1791 		    sc->sc_fh_init_mask);
1792 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1793 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1794 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1795 	}
1796 
1797 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1798 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1799 }
1800 
1801 int
1802 iwx_check_rfkill(struct iwx_softc *sc)
1803 {
1804 	uint32_t v;
1805 	int s;
1806 	int rv;
1807 
1808 	s = splnet();
1809 
1810 	/*
1811 	 * "documentation" is not really helpful here:
1812 	 *  27:	HW_RF_KILL_SW
1813 	 *	Indicates state of (platform's) hardware RF-Kill switch
1814 	 *
1815 	 * But apparently when it's off, it's on ...
1816 	 */
1817 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1818 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1819 	if (rv) {
1820 		sc->sc_flags |= IWX_FLAG_RFKILL;
1821 	} else {
1822 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1823 	}
1824 
1825 	splx(s);
1826 	return rv;
1827 }
1828 
1829 void
1830 iwx_enable_interrupts(struct iwx_softc *sc)
1831 {
1832 	if (!sc->sc_msix) {
1833 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1834 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1835 	} else {
1836 		/*
1837 		 * fh/hw_mask keeps all the unmasked causes.
1838 		 * Unlike msi, in msix cause is enabled when it is unset.
1839 		 */
1840 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1841 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1842 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1843 		    ~sc->sc_fh_mask);
1844 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1845 		    ~sc->sc_hw_mask);
1846 	}
1847 }
1848 
1849 void
1850 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1851 {
1852 	if (!sc->sc_msix) {
1853 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1854 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1855 	} else {
1856 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1857 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1858 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1859 		/*
1860 		 * Leave all the FH causes enabled to get the ALIVE
1861 		 * notification.
1862 		 */
1863 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1864 		    ~sc->sc_fh_init_mask);
1865 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1866 	}
1867 }
1868 
1869 void
1870 iwx_restore_interrupts(struct iwx_softc *sc)
1871 {
1872 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1873 }
1874 
1875 void
1876 iwx_disable_interrupts(struct iwx_softc *sc)
1877 {
1878 	int s = splnet();
1879 
1880 	if (!sc->sc_msix) {
1881 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
1882 
1883 		/* acknowledge all interrupts */
1884 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
1885 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
1886 	} else {
1887 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1888 		    sc->sc_fh_init_mask);
1889 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1890 		    sc->sc_hw_init_mask);
1891 	}
1892 
1893 	splx(s);
1894 }
1895 
1896 void
1897 iwx_ict_reset(struct iwx_softc *sc)
1898 {
1899 	iwx_disable_interrupts(sc);
1900 
1901 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
1902 	sc->ict_cur = 0;
1903 
1904 	/* Set physical address of ICT (4KB aligned). */
1905 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
1906 	    IWX_CSR_DRAM_INT_TBL_ENABLE
1907 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
1908 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
1909 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
1910 
1911 	/* Switch to ICT interrupt mode in driver. */
1912 	sc->sc_flags |= IWX_FLAG_USE_ICT;
1913 
1914 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
1915 	iwx_enable_interrupts(sc);
1916 }
1917 
1918 #define IWX_HW_READY_TIMEOUT 50
1919 int
1920 iwx_set_hw_ready(struct iwx_softc *sc)
1921 {
1922 	int ready;
1923 
1924 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1925 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1926 
1927 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
1928 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1929 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1930 	    IWX_HW_READY_TIMEOUT);
1931 	if (ready)
1932 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
1933 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
1934 
1935 	return ready;
1936 }
1937 #undef IWX_HW_READY_TIMEOUT
1938 
1939 int
1940 iwx_prepare_card_hw(struct iwx_softc *sc)
1941 {
1942 	int t = 0;
1943 
1944 	if (iwx_set_hw_ready(sc))
1945 		return 0;
1946 
1947 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
1948 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1949 	DELAY(1000);
1950 
1951 
1952 	/* If HW is not ready, prepare the conditions to check again */
1953 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1954 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
1955 
1956 	do {
1957 		if (iwx_set_hw_ready(sc))
1958 			return 0;
1959 		DELAY(200);
1960 		t += 200;
1961 	} while (t < 150000);
1962 
1963 	return ETIMEDOUT;
1964 }
1965 
1966 void
1967 iwx_apm_config(struct iwx_softc *sc)
1968 {
1969 	pcireg_t lctl, cap;
1970 
1971 	/*
1972 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1973 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1974 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1975 	 *    costs negligible amount of power savings.
1976 	 * If not (unlikely), enable L0S, so there is at least some
1977 	 *    power savings, even without L1.
1978 	 */
1979 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1980 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1981 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1982 		IWX_SETBITS(sc, IWX_CSR_GIO_REG,
1983 		    IWX_CSR_GIO_REG_VAL_L0S_ENABLED);
1984 	} else {
1985 		IWX_CLRBITS(sc, IWX_CSR_GIO_REG,
1986 		    IWX_CSR_GIO_REG_VAL_L0S_ENABLED);
1987 	}
1988 
1989 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1990 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1991 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1992 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1993 	    DEVNAME(sc),
1994 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1995 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1996 }
1997 
1998 /*
1999  * Start up NIC's basic functionality after it has been reset
2000  * e.g. after platform boot or shutdown.
2001  * NOTE:  This does not load uCode nor start the embedded processor
2002  */
2003 int
2004 iwx_apm_init(struct iwx_softc *sc)
2005 {
2006 	int err = 0;
2007 
2008 	/*
2009 	 * Disable L0s without affecting L1;
2010 	 *  don't wait for ICH L0s (ICH bug W/A)
2011 	 */
2012 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2013 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2014 
2015 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2016 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2017 
2018 	/*
2019 	 * Enable HAP INTA (interrupt from management bus) to
2020 	 * wake device's PCI Express link L1a -> L0s
2021 	 */
2022 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2023 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2024 
2025 	iwx_apm_config(sc);
2026 
2027 	/*
2028 	 * Set "initialization complete" bit to move adapter from
2029 	 * D0U* --> D0A* (powered-up active) state.
2030 	 */
2031 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2032 
2033 	/*
2034 	 * Wait for clock stabilization; once stabilized, access to
2035 	 * device-internal resources is supported, e.g. iwx_write_prph()
2036 	 * and accesses to uCode SRAM.
2037 	 */
2038 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2039 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2040 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2041 		printf("%s: timeout waiting for clock stabilization\n",
2042 		    DEVNAME(sc));
2043 		err = ETIMEDOUT;
2044 		goto out;
2045 	}
2046  out:
2047 	if (err)
2048 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2049 	return err;
2050 }
2051 
2052 void
2053 iwx_apm_stop(struct iwx_softc *sc)
2054 {
2055 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2056 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2057 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2058 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2059 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2060 	DELAY(1000);
2061 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2062 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2063 	DELAY(5000);
2064 
2065 	/* stop device's busmaster DMA activity */
2066 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2067 
2068 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2069 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2070 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2071 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2072 
2073 	/*
2074 	 * Clear "initialization complete" bit to move adapter from
2075 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2076 	 */
2077 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2078 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2079 }
2080 
2081 void
2082 iwx_init_msix_hw(struct iwx_softc *sc)
2083 {
2084 	iwx_conf_msix_hw(sc, 0);
2085 
2086 	if (!sc->sc_msix)
2087 		return;
2088 
2089 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2090 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2091 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2092 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2093 }
2094 
2095 void
2096 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2097 {
2098 	int vector = 0;
2099 
2100 	if (!sc->sc_msix) {
2101 		/* Newer chips default to MSIX. */
2102 		if (!stopped && iwx_nic_lock(sc)) {
2103 			iwx_write_prph(sc, IWX_UREG_CHICK,
2104 			    IWX_UREG_CHICK_MSI_ENABLE);
2105 			iwx_nic_unlock(sc);
2106 		}
2107 		return;
2108 	}
2109 
2110 	if (!stopped && iwx_nic_lock(sc)) {
2111 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2112 		iwx_nic_unlock(sc);
2113 	}
2114 
2115 	/* Disable all interrupts */
2116 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2117 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2118 
2119 	/* Map fallback-queue (command/mgmt) to a single vector */
2120 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2121 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2122 	/* Map RSS queue (data) to the same vector */
2123 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2124 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2125 
2126 	/* Enable the RX queues cause interrupts */
2127 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2128 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2129 
2130 	/* Map non-RX causes to the same vector */
2131 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2132 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2133 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2134 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2135 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2136 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2137 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2138 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2139 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2140 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2141 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2142 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2143 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2144 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2145 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2146 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2147 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2148 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2149 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2150 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2151 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2152 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2153 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2154 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2155 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2156 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2157 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2158 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2159 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2160 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2161 
2162 	/* Enable non-RX causes interrupts */
2163 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2164 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2165 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2166 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2167 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2168 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2169 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2170 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2171 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2172 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2173 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2174 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2175 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2176 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2177 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2178 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2179 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2180 }
2181 
2182 int
2183 iwx_start_hw(struct iwx_softc *sc)
2184 {
2185 	int err;
2186 
2187 	err = iwx_prepare_card_hw(sc);
2188 	if (err)
2189 		return err;
2190 
2191 	/* Reset the entire device */
2192 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2193 	DELAY(5000);
2194 
2195 	err = iwx_apm_init(sc);
2196 	if (err)
2197 		return err;
2198 
2199 	iwx_init_msix_hw(sc);
2200 
2201 	iwx_enable_rfkill_int(sc);
2202 	iwx_check_rfkill(sc);
2203 
2204 	return 0;
2205 }
2206 
2207 
2208 void
2209 iwx_stop_device(struct iwx_softc *sc)
2210 {
2211 	int qid;
2212 
2213 	iwx_disable_interrupts(sc);
2214 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2215 
2216 	iwx_disable_rx_dma(sc);
2217 	iwx_reset_rx_ring(sc, &sc->rxq);
2218 	for (qid = 0; qid < nitems(sc->txq); qid++)
2219 		iwx_reset_tx_ring(sc, &sc->txq[qid]);
2220 
2221 	/* Make sure (redundant) we've released our request to stay awake */
2222 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2223 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2224 	if (sc->sc_nic_locks > 0)
2225 		printf("%s: %d active NIC locks forcefully cleared\n",
2226 		    DEVNAME(sc), sc->sc_nic_locks);
2227 	sc->sc_nic_locks = 0;
2228 
2229 	/* Stop the device, and put it in low power state */
2230 	iwx_apm_stop(sc);
2231 
2232 	/* Reset the on-board processor. */
2233 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2234 	DELAY(5000);
2235 
2236 	/*
2237 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2238 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2239 	 * that enables radio won't fire on the correct irq, and the
2240 	 * driver won't be able to handle the interrupt.
2241 	 * Configure the IVAR table again after reset.
2242 	 */
2243 	iwx_conf_msix_hw(sc, 1);
2244 
2245 	/*
2246 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2247 	 * Clear the interrupt again.
2248 	 */
2249 	iwx_disable_interrupts(sc);
2250 
2251 	/* Even though we stop the HW we still want the RF kill interrupt. */
2252 	iwx_enable_rfkill_int(sc);
2253 	iwx_check_rfkill(sc);
2254 
2255 	iwx_prepare_card_hw(sc);
2256 
2257 	iwx_ctxt_info_free_paging(sc);
2258 }
2259 
2260 void
2261 iwx_nic_config(struct iwx_softc *sc)
2262 {
2263 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2264 	uint32_t mask, val, reg_val = 0;
2265 
2266 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2267 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2268 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2269 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2270 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2271 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2272 
2273 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2274 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2275 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2276 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2277 
2278 	/* radio configuration */
2279 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2280 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2281 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2282 
2283 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2284 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2285 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2286 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2287 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2288 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2289 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2290 
2291 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2292 	val &= ~mask;
2293 	val |= reg_val;
2294 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2295 }
2296 
2297 int
2298 iwx_nic_rx_init(struct iwx_softc *sc)
2299 {
2300 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2301 
2302 	/*
2303 	 * We don't configure the RFH; the firmware will do that.
2304 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2305 	 */
2306 	return 0;
2307 }
2308 
2309 int
2310 iwx_nic_init(struct iwx_softc *sc)
2311 {
2312 	int err;
2313 
2314 	iwx_apm_init(sc);
2315 	iwx_nic_config(sc);
2316 
2317 	err = iwx_nic_rx_init(sc);
2318 	if (err)
2319 		return err;
2320 
2321 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2322 
2323 	return 0;
2324 }
2325 
2326 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2327 const uint8_t iwx_ac_to_tx_fifo[] = {
2328 	IWX_TX_FIFO_BE,
2329 	IWX_TX_FIFO_BK,
2330 	IWX_TX_FIFO_VI,
2331 	IWX_TX_FIFO_VO,
2332 };
2333 
2334 int
2335 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2336     int num_slots)
2337 {
2338 	struct iwx_tx_queue_cfg_cmd cmd;
2339 	struct iwx_rx_packet *pkt;
2340 	struct iwx_tx_queue_cfg_rsp *resp;
2341 	struct iwx_host_cmd hcmd = {
2342 		.id = IWX_SCD_QUEUE_CFG,
2343 		.flags = IWX_CMD_WANT_RESP,
2344 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2345 	};
2346 	struct iwx_tx_ring *ring = &sc->txq[qid];
2347 	int err, fwqid;
2348 	uint32_t wr_idx;
2349 	size_t resp_len;
2350 
2351 	iwx_reset_tx_ring(sc, ring);
2352 
2353 	memset(&cmd, 0, sizeof(cmd));
2354 	cmd.sta_id = sta_id;
2355 	cmd.tid = tid;
2356 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2357 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2358 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2359 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2360 
2361 	hcmd.data[0] = &cmd;
2362 	hcmd.len[0] = sizeof(cmd);
2363 
2364 	err = iwx_send_cmd(sc, &hcmd);
2365 	if (err)
2366 		return err;
2367 
2368 	pkt = hcmd.resp_pkt;
2369 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2370 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2371 		err = EIO;
2372 		goto out;
2373 	}
2374 
2375 	resp_len = iwx_rx_packet_payload_len(pkt);
2376 	if (resp_len != sizeof(*resp)) {
2377 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2378 		err = EIO;
2379 		goto out;
2380 	}
2381 
2382 	resp = (void *)pkt->data;
2383 	fwqid = le16toh(resp->queue_number);
2384 	wr_idx = le16toh(resp->write_pointer);
2385 
2386 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2387 	if (fwqid != qid) {
2388 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2389 		err = EIO;
2390 		goto out;
2391 	}
2392 
2393 	if (wr_idx != ring->cur) {
2394 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2395 		err = EIO;
2396 		goto out;
2397 	}
2398 out:
2399 	iwx_free_resp(sc, &hcmd);
2400 	return err;
2401 }
2402 
2403 void
2404 iwx_post_alive(struct iwx_softc *sc)
2405 {
2406 	iwx_ict_reset(sc);
2407 	iwx_ctxt_info_free(sc);
2408 }
2409 
2410 /*
2411  * For the high priority TE use a time event type that has similar priority to
2412  * the FW's action scan priority.
2413  */
2414 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2415 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2416 
2417 int
2418 iwx_send_time_event_cmd(struct iwx_softc *sc,
2419     const struct iwx_time_event_cmd *cmd)
2420 {
2421 	struct iwx_rx_packet *pkt;
2422 	struct iwx_time_event_resp *resp;
2423 	struct iwx_host_cmd hcmd = {
2424 		.id = IWX_TIME_EVENT_CMD,
2425 		.flags = IWX_CMD_WANT_RESP,
2426 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2427 	};
2428 	uint32_t resp_len;
2429 	int err;
2430 
2431 	hcmd.data[0] = cmd;
2432 	hcmd.len[0] = sizeof(*cmd);
2433 	err = iwx_send_cmd(sc, &hcmd);
2434 	if (err)
2435 		return err;
2436 
2437 	pkt = hcmd.resp_pkt;
2438 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2439 		err = EIO;
2440 		goto out;
2441 	}
2442 
2443 	resp_len = iwx_rx_packet_payload_len(pkt);
2444 	if (resp_len != sizeof(*resp)) {
2445 		err = EIO;
2446 		goto out;
2447 	}
2448 
2449 	resp = (void *)pkt->data;
2450 	if (le32toh(resp->status) == 0)
2451 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2452 	else
2453 		err = EIO;
2454 out:
2455 	iwx_free_resp(sc, &hcmd);
2456 	return err;
2457 }
2458 
2459 void
2460 iwx_protect_session(struct iwx_softc *sc, struct iwx_node *in,
2461     uint32_t duration, uint32_t max_delay)
2462 {
2463 	struct iwx_time_event_cmd time_cmd;
2464 
2465 	/* Do nothing if a time event is already scheduled. */
2466 	if (sc->sc_flags & IWX_FLAG_TE_ACTIVE)
2467 		return;
2468 
2469 	memset(&time_cmd, 0, sizeof(time_cmd));
2470 
2471 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_ADD);
2472 	time_cmd.id_and_color =
2473 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2474 	time_cmd.id = htole32(IWX_TE_BSS_STA_AGGRESSIVE_ASSOC);
2475 
2476 	time_cmd.apply_time = htole32(0);
2477 
2478 	time_cmd.max_frags = IWX_TE_V2_FRAG_NONE;
2479 	time_cmd.max_delay = htole32(max_delay);
2480 	/* TODO: why do we need to interval = bi if it is not periodic? */
2481 	time_cmd.interval = htole32(1);
2482 	time_cmd.duration = htole32(duration);
2483 	time_cmd.repeat = 1;
2484 	time_cmd.policy
2485 	    = htole16(IWX_TE_V2_NOTIF_HOST_EVENT_START |
2486 	        IWX_TE_V2_NOTIF_HOST_EVENT_END |
2487 		IWX_T2_V2_START_IMMEDIATELY);
2488 
2489 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2490 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2491 
2492 	DELAY(100);
2493 }
2494 
2495 void
2496 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
2497 {
2498 	struct iwx_time_event_cmd time_cmd;
2499 
2500 	/* Do nothing if the time event has already ended. */
2501 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
2502 		return;
2503 
2504 	memset(&time_cmd, 0, sizeof(time_cmd));
2505 
2506 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_REMOVE);
2507 	time_cmd.id_and_color =
2508 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2509 	time_cmd.id = htole32(sc->sc_time_event_uid);
2510 
2511 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2512 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
2513 
2514 	DELAY(100);
2515 }
2516 
2517 /*
2518  * NVM read access and content parsing.  We do not support
2519  * external NVM or writing NVM.
2520  */
2521 
2522 /* list of NVM sections we are allowed/need to read */
2523 const int iwx_nvm_to_read[] = {
2524 	IWX_NVM_SECTION_TYPE_SW,
2525 	IWX_NVM_SECTION_TYPE_REGULATORY,
2526 	IWX_NVM_SECTION_TYPE_CALIBRATION,
2527 	IWX_NVM_SECTION_TYPE_PRODUCTION,
2528 	IWX_NVM_SECTION_TYPE_REGULATORY_SDP,
2529 	IWX_NVM_SECTION_TYPE_HW_8000,
2530 	IWX_NVM_SECTION_TYPE_MAC_OVERRIDE,
2531 	IWX_NVM_SECTION_TYPE_PHY_SKU,
2532 };
2533 
2534 #define IWX_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2535 
2536 #define IWX_NVM_WRITE_OPCODE 1
2537 #define IWX_NVM_READ_OPCODE 0
2538 
2539 int
2540 iwx_nvm_read_chunk(struct iwx_softc *sc, uint16_t section, uint16_t offset,
2541     uint16_t length, uint8_t *data, uint16_t *len)
2542 {
2543 	offset = 0;
2544 	struct iwx_nvm_access_cmd nvm_access_cmd = {
2545 		.offset = htole16(offset),
2546 		.length = htole16(length),
2547 		.type = htole16(section),
2548 		.op_code = IWX_NVM_READ_OPCODE,
2549 	};
2550 	struct iwx_nvm_access_resp *nvm_resp;
2551 	struct iwx_rx_packet *pkt;
2552 	struct iwx_host_cmd cmd = {
2553 		.id = IWX_NVM_ACCESS_CMD,
2554 		.flags = (IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL),
2555 		.resp_pkt_len = IWX_CMD_RESP_MAX,
2556 		.data = { &nvm_access_cmd, },
2557 	};
2558 	int err, offset_read;
2559 	size_t bytes_read;
2560 	uint8_t *resp_data;
2561 
2562 	cmd.len[0] = sizeof(struct iwx_nvm_access_cmd);
2563 
2564 	err = iwx_send_cmd(sc, &cmd);
2565 	if (err)
2566 		return err;
2567 
2568 	pkt = cmd.resp_pkt;
2569 	if (pkt->hdr.flags & IWX_CMD_FAILED_MSK) {
2570 		err = EIO;
2571 		goto exit;
2572 	}
2573 
2574 	/* Extract NVM response */
2575 	nvm_resp = (void *)pkt->data;
2576 	if (nvm_resp == NULL)
2577 		return EIO;
2578 
2579 	err = le16toh(nvm_resp->status);
2580 	bytes_read = le16toh(nvm_resp->length);
2581 	offset_read = le16toh(nvm_resp->offset);
2582 	resp_data = nvm_resp->data;
2583 	if (err) {
2584 		err = EINVAL;
2585 		goto exit;
2586 	}
2587 
2588 	if (offset_read != offset) {
2589 		err = EINVAL;
2590 		goto exit;
2591 	}
2592 
2593 	if (bytes_read > length) {
2594 		err = EINVAL;
2595 		goto exit;
2596 	}
2597 
2598 	memcpy(data + offset, resp_data, bytes_read);
2599 	*len = bytes_read;
2600 
2601  exit:
2602 	iwx_free_resp(sc, &cmd);
2603 	return err;
2604 }
2605 
2606 /*
2607  * Reads an NVM section completely.
2608  * NICs prior to 7000 family doesn't have a real NVM, but just read
2609  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2610  * by uCode, we need to manually check in this case that we don't
2611  * overflow and try to read more than the EEPROM size.
2612  */
2613 int
2614 iwx_nvm_read_section(struct iwx_softc *sc, uint16_t section, uint8_t *data,
2615     uint16_t *len, size_t max_len)
2616 {
2617 	uint16_t chunklen, seglen;
2618 	int err = 0;
2619 
2620 	chunklen = seglen = IWX_NVM_DEFAULT_CHUNK_SIZE;
2621 	*len = 0;
2622 
2623 	/* Read NVM chunks until exhausted (reading less than requested) */
2624 	while (seglen == chunklen && *len < max_len) {
2625 		err = iwx_nvm_read_chunk(sc,
2626 		    section, *len, chunklen, data, &seglen);
2627 		if (err)
2628 			return err;
2629 
2630 		*len += seglen;
2631 	}
2632 
2633 	return err;
2634 }
2635 
2636 uint8_t
2637 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2638 {
2639 	uint8_t tx_ant;
2640 
2641 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2642 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2643 
2644 	if (sc->sc_nvm.valid_tx_ant)
2645 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2646 
2647 	return tx_ant;
2648 }
2649 
2650 uint8_t
2651 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2652 {
2653 	uint8_t rx_ant;
2654 
2655 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2656 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2657 
2658 	if (sc->sc_nvm.valid_rx_ant)
2659 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2660 
2661 	return rx_ant;
2662 }
2663 
2664 void
2665 iwx_init_channel_map(struct iwx_softc *sc, const uint16_t * const nvm_ch_flags,
2666     const uint8_t *nvm_channels, int nchan)
2667 {
2668 	struct ieee80211com *ic = &sc->sc_ic;
2669 	struct iwx_nvm_data *data = &sc->sc_nvm;
2670 	int ch_idx;
2671 	struct ieee80211_channel *channel;
2672 	uint16_t ch_flags;
2673 	int is_5ghz;
2674 	int flags, hw_value;
2675 
2676 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2677 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2678 
2679 		if (ch_idx >= IWX_NUM_2GHZ_CHANNELS &&
2680 		    !data->sku_cap_band_52GHz_enable)
2681 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2682 
2683 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID))
2684 			continue;
2685 
2686 		hw_value = nvm_channels[ch_idx];
2687 		channel = &ic->ic_channels[hw_value];
2688 
2689 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2690 		if (!is_5ghz) {
2691 			flags = IEEE80211_CHAN_2GHZ;
2692 			channel->ic_flags
2693 			    = IEEE80211_CHAN_CCK
2694 			    | IEEE80211_CHAN_OFDM
2695 			    | IEEE80211_CHAN_DYN
2696 			    | IEEE80211_CHAN_2GHZ;
2697 		} else {
2698 			flags = IEEE80211_CHAN_5GHZ;
2699 			channel->ic_flags =
2700 			    IEEE80211_CHAN_A;
2701 		}
2702 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2703 
2704 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2705 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2706 
2707 		if (data->sku_cap_11n_enable)
2708 			channel->ic_flags |= IEEE80211_CHAN_HT;
2709 	}
2710 }
2711 
2712 void
2713 iwx_setup_ht_rates(struct iwx_softc *sc)
2714 {
2715 	struct ieee80211com *ic = &sc->sc_ic;
2716 	uint8_t rx_ant;
2717 
2718 	/* TX is supported with the same MCS as RX. */
2719 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2720 
2721 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2722 
2723 	if (sc->sc_nvm.sku_cap_mimo_disable)
2724 		return;
2725 
2726 	rx_ant = iwx_fw_valid_rx_ant(sc);
2727 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2728 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2729 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2730 }
2731 
2732 #define IWX_MAX_RX_BA_SESSIONS 16
2733 
2734 void
2735 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2736     uint16_t ssn, uint16_t winsize, int start)
2737 {
2738 	struct ieee80211com *ic = &sc->sc_ic;
2739 	struct iwx_add_sta_cmd cmd;
2740 	struct iwx_node *in = (void *)ni;
2741 	int err, s;
2742 	uint32_t status;
2743 
2744 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
2745 		ieee80211_addba_req_refuse(ic, ni, tid);
2746 		return;
2747 	}
2748 
2749 	memset(&cmd, 0, sizeof(cmd));
2750 
2751 	cmd.sta_id = IWX_STATION_ID;
2752 	cmd.mac_id_n_color
2753 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2754 	cmd.add_modify = IWX_STA_MODE_MODIFY;
2755 
2756 	if (start) {
2757 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2758 		cmd.add_immediate_ba_ssn = htole16(ssn);
2759 		cmd.rx_ba_window = htole16(winsize);
2760 	} else {
2761 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2762 	}
2763 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
2764 	    IWX_STA_MODIFY_REMOVE_BA_TID;
2765 
2766 	status = IWX_ADD_STA_SUCCESS;
2767 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
2768 	    &status);
2769 
2770 	s = splnet();
2771 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) == IWX_ADD_STA_SUCCESS) {
2772 		if (start) {
2773 			sc->sc_rx_ba_sessions++;
2774 			ieee80211_addba_req_accept(ic, ni, tid);
2775 		} else if (sc->sc_rx_ba_sessions > 0)
2776 			sc->sc_rx_ba_sessions--;
2777 	} else if (start)
2778 		ieee80211_addba_req_refuse(ic, ni, tid);
2779 
2780 	splx(s);
2781 }
2782 
2783 void
2784 iwx_htprot_task(void *arg)
2785 {
2786 	struct iwx_softc *sc = arg;
2787 	struct ieee80211com *ic = &sc->sc_ic;
2788 	struct iwx_node *in = (void *)ic->ic_bss;
2789 	int err, s = splnet();
2790 
2791 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2792 		refcnt_rele_wake(&sc->task_refs);
2793 		splx(s);
2794 		return;
2795 	}
2796 
2797 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2798 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
2799 	if (err)
2800 		printf("%s: could not change HT protection: error %d\n",
2801 		    DEVNAME(sc), err);
2802 
2803 	refcnt_rele_wake(&sc->task_refs);
2804 	splx(s);
2805 }
2806 
2807 /*
2808  * This function is called by upper layer when HT protection settings in
2809  * beacons have changed.
2810  */
2811 void
2812 iwx_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2813 {
2814 	struct iwx_softc *sc = ic->ic_softc;
2815 
2816 	/* assumes that ni == ic->ic_bss */
2817 	iwx_add_task(sc, systq, &sc->htprot_task);
2818 }
2819 
2820 void
2821 iwx_ba_task(void *arg)
2822 {
2823 	struct iwx_softc *sc = arg;
2824 	struct ieee80211com *ic = &sc->sc_ic;
2825 	struct ieee80211_node *ni = ic->ic_bss;
2826 	int s = splnet();
2827 
2828 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2829 		refcnt_rele_wake(&sc->task_refs);
2830 		splx(s);
2831 		return;
2832 	}
2833 
2834 	if (sc->ba_start)
2835 		iwx_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
2836 		    sc->ba_winsize, 1);
2837 	else
2838 		iwx_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
2839 
2840 	refcnt_rele_wake(&sc->task_refs);
2841 	splx(s);
2842 }
2843 
2844 /*
2845  * This function is called by upper layer when an ADDBA request is received
2846  * from another STA and before the ADDBA response is sent.
2847  */
2848 int
2849 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2850     uint8_t tid)
2851 {
2852 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2853 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
2854 
2855 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS)
2856 		return ENOSPC;
2857 
2858 	sc->ba_start = 1;
2859 	sc->ba_tid = tid;
2860 	sc->ba_ssn = htole16(ba->ba_winstart);
2861 	sc->ba_winsize = htole16(ba->ba_winsize);
2862 	iwx_add_task(sc, systq, &sc->ba_task);
2863 
2864 	return EBUSY;
2865 }
2866 
2867 /*
2868  * This function is called by upper layer on teardown of an HT-immediate
2869  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2870  */
2871 void
2872 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2873     uint8_t tid)
2874 {
2875 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
2876 
2877 	sc->ba_start = 0;
2878 	sc->ba_tid = tid;
2879 	iwx_add_task(sc, systq, &sc->ba_task);
2880 }
2881 
2882 void
2883 iwx_set_hw_address_8000(struct iwx_softc *sc, struct iwx_nvm_data *data,
2884     const uint16_t *mac_override, const uint16_t *nvm_hw)
2885 {
2886 	const uint8_t *hw_addr;
2887 
2888 	if (mac_override) {
2889 		static const uint8_t reserved_mac[] = {
2890 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2891 		};
2892 
2893 		hw_addr = (const uint8_t *)(mac_override +
2894 				 IWX_MAC_ADDRESS_OVERRIDE_8000);
2895 
2896 		/*
2897 		 * Store the MAC address from MAO section.
2898 		 * No byte swapping is required in MAO section
2899 		 */
2900 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2901 
2902 		/*
2903 		 * Force the use of the OTP MAC address in case of reserved MAC
2904 		 * address in the NVM, or if address is given but invalid.
2905 		 */
2906 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2907 		    (memcmp(etherbroadcastaddr, data->hw_addr,
2908 		    sizeof(etherbroadcastaddr)) != 0) &&
2909 		    (memcmp(etheranyaddr, data->hw_addr,
2910 		    sizeof(etheranyaddr)) != 0) &&
2911 		    !ETHER_IS_MULTICAST(data->hw_addr))
2912 			return;
2913 	}
2914 
2915 	if (nvm_hw) {
2916 		/* Read the mac address from WFMP registers. */
2917 		uint32_t mac_addr0, mac_addr1;
2918 
2919 		if (!iwx_nic_lock(sc))
2920 			goto out;
2921 		mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
2922 		mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
2923 		iwx_nic_unlock(sc);
2924 
2925 		hw_addr = (const uint8_t *)&mac_addr0;
2926 		data->hw_addr[0] = hw_addr[3];
2927 		data->hw_addr[1] = hw_addr[2];
2928 		data->hw_addr[2] = hw_addr[1];
2929 		data->hw_addr[3] = hw_addr[0];
2930 
2931 		hw_addr = (const uint8_t *)&mac_addr1;
2932 		data->hw_addr[4] = hw_addr[1];
2933 		data->hw_addr[5] = hw_addr[0];
2934 
2935 		return;
2936 	}
2937 out:
2938 	printf("%s: mac address not found\n", DEVNAME(sc));
2939 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2940 }
2941 
2942 int
2943 iwx_parse_nvm_data(struct iwx_softc *sc, const uint16_t *nvm_hw,
2944     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2945     const uint16_t *mac_override, const uint16_t *phy_sku,
2946     const uint16_t *regulatory, int n_regulatory)
2947 {
2948 	struct iwx_nvm_data *data = &sc->sc_nvm;
2949 	uint32_t sku, radio_cfg;
2950 	uint16_t lar_config, lar_offset;
2951 
2952 	data->nvm_version = le16_to_cpup(nvm_sw + IWX_NVM_VERSION);
2953 
2954 	radio_cfg = le32_to_cpup((uint32_t *)(phy_sku + IWX_RADIO_CFG_8000));
2955 	data->radio_cfg_type = IWX_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2956 	data->radio_cfg_step = IWX_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2957 	data->radio_cfg_dash = IWX_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2958 	data->radio_cfg_pnum = IWX_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2959 	data->valid_tx_ant = IWX_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2960 	data->valid_rx_ant = IWX_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2961 
2962 	sku = le32_to_cpup((uint32_t *)(phy_sku + IWX_SKU_8000));
2963 	data->sku_cap_band_24GHz_enable = sku & IWX_NVM_SKU_CAP_BAND_24GHZ;
2964 	data->sku_cap_band_52GHz_enable = sku & IWX_NVM_SKU_CAP_BAND_52GHZ;
2965 	data->sku_cap_11n_enable = sku & IWX_NVM_SKU_CAP_11N_ENABLE;
2966 	data->sku_cap_mimo_disable = sku & IWX_NVM_SKU_CAP_MIMO_DISABLE;
2967 
2968 	lar_offset = data->nvm_version < 0xE39 ?
2969 			       IWX_NVM_LAR_OFFSET_8000_OLD :
2970 			       IWX_NVM_LAR_OFFSET_8000;
2971 
2972 	lar_config = le16_to_cpup(regulatory + lar_offset);
2973 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWX_N_HW_ADDRS_8000);
2974 	iwx_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2975 
2976 	iwx_init_channel_map(sc, &regulatory[IWX_NVM_CHANNELS_8000],
2977 	    iwx_nvm_channels_8000,
2978 	    MIN(n_regulatory, nitems(iwx_nvm_channels_8000)));
2979 
2980 	data->calib_version = 255;   /* TODO:
2981 					this value will prevent some checks from
2982 					failing, we need to check if this
2983 					field is still needed, and if it does,
2984 					where is it in the NVM */
2985 
2986 	return 0;
2987 }
2988 
2989 int
2990 iwx_parse_nvm_sections(struct iwx_softc *sc, struct iwx_nvm_section *sections)
2991 {
2992 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2993 	const uint16_t *regulatory = NULL;
2994 	int n_regulatory = 0;
2995 
2996 	/* Checking for required sections */
2997 
2998 	/* SW and REGULATORY sections are mandatory */
2999 	if (!sections[IWX_NVM_SECTION_TYPE_SW].data ||
3000 	    !sections[IWX_NVM_SECTION_TYPE_REGULATORY].data) {
3001 		return ENOENT;
3002 	}
3003 	/* MAC_OVERRIDE or at least HW section must exist */
3004 	if (!sections[IWX_NVM_SECTION_TYPE_HW_8000].data &&
3005 	    !sections[IWX_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3006 		return ENOENT;
3007 	}
3008 
3009 	/* PHY_SKU section is mandatory in B0 */
3010 	if (!sections[IWX_NVM_SECTION_TYPE_PHY_SKU].data) {
3011 		return ENOENT;
3012 	}
3013 
3014 	regulatory = (const uint16_t *)
3015 	    sections[IWX_NVM_SECTION_TYPE_REGULATORY].data;
3016 	n_regulatory = sections[IWX_NVM_SECTION_TYPE_REGULATORY].length;
3017 	hw = (const uint16_t *)
3018 	    sections[IWX_NVM_SECTION_TYPE_HW_8000].data;
3019 	mac_override =
3020 		(const uint16_t *)
3021 		sections[IWX_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3022 	phy_sku = (const uint16_t *)
3023 	    sections[IWX_NVM_SECTION_TYPE_PHY_SKU].data;
3024 
3025 	sw = (const uint16_t *)sections[IWX_NVM_SECTION_TYPE_SW].data;
3026 	calib = (const uint16_t *)
3027 	    sections[IWX_NVM_SECTION_TYPE_CALIBRATION].data;
3028 
3029 	/* XXX should pass in the length of every section */
3030 	return iwx_parse_nvm_data(sc, hw, sw, calib, mac_override,
3031 	    phy_sku, regulatory, n_regulatory);
3032 }
3033 
3034 int
3035 iwx_nvm_init(struct iwx_softc *sc)
3036 {
3037 	struct iwx_nvm_section nvm_sections[IWX_NVM_NUM_OF_SECTIONS];
3038 	int i, section, err;
3039 	uint16_t len;
3040 	uint8_t *buf;
3041 	const size_t bufsz = sc->sc_nvm_max_section_size;
3042 
3043 	memset(nvm_sections, 0, sizeof(nvm_sections));
3044 
3045 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3046 	if (buf == NULL)
3047 		return ENOMEM;
3048 
3049 	for (i = 0; i < nitems(iwx_nvm_to_read); i++) {
3050 		section = iwx_nvm_to_read[i];
3051 		KASSERT(section <= nitems(nvm_sections));
3052 
3053 		err = iwx_nvm_read_section(sc, section, buf, &len, bufsz);
3054 		if (err) {
3055 			err = 0;
3056 			continue;
3057 		}
3058 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3059 		if (nvm_sections[section].data == NULL) {
3060 			err = ENOMEM;
3061 			break;
3062 		}
3063 		memcpy(nvm_sections[section].data, buf, len);
3064 		nvm_sections[section].length = len;
3065 	}
3066 	free(buf, M_DEVBUF, bufsz);
3067 	if (err == 0)
3068 		err = iwx_parse_nvm_sections(sc, nvm_sections);
3069 
3070 	for (i = 0; i < IWX_NVM_NUM_OF_SECTIONS; i++) {
3071 		if (nvm_sections[i].data != NULL)
3072 			free(nvm_sections[i].data, M_DEVBUF,
3073 			    nvm_sections[i].length);
3074 	}
3075 
3076 	return err;
3077 }
3078 
3079 int
3080 iwx_load_firmware(struct iwx_softc *sc)
3081 {
3082 	struct iwx_fw_sects *fws;
3083 	int err, w;
3084 
3085 	sc->sc_uc.uc_intr = 0;
3086 
3087 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3088 	err = iwx_ctxt_info_init(sc, fws);
3089 	if (err) {
3090 		printf("%s: could not init context info\n", DEVNAME(sc));
3091 		return err;
3092 	}
3093 
3094 	/* wait for the firmware to load */
3095 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3096 		err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", MSEC_TO_NSEC(100));
3097 	}
3098 	if (err || !sc->sc_uc.uc_ok)
3099 		printf("%s: could not load firmware\n", DEVNAME(sc));
3100 	if (!sc->sc_uc.uc_ok)
3101 		return EINVAL;
3102 
3103 	return err;
3104 }
3105 
3106 int
3107 iwx_start_fw(struct iwx_softc *sc)
3108 {
3109 	int err;
3110 
3111 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3112 
3113 	err = iwx_nic_init(sc);
3114 	if (err) {
3115 		printf("%s: unable to init nic\n", DEVNAME(sc));
3116 		return err;
3117 	}
3118 
3119 	/* make sure rfkill handshake bits are cleared */
3120 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3121 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3122 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3123 
3124 	/* clear (again), then enable firwmare load interrupt */
3125 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3126 	iwx_enable_fwload_interrupt(sc);
3127 
3128 	return iwx_load_firmware(sc);
3129 }
3130 
3131 int
3132 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3133 {
3134 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3135 		.valid = htole32(valid_tx_ant),
3136 	};
3137 
3138 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3139 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3140 }
3141 
3142 int
3143 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3144 {
3145 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3146 
3147 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3148 	phy_cfg_cmd.calib_control.event_trigger =
3149 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3150 	phy_cfg_cmd.calib_control.flow_trigger =
3151 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3152 
3153 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3154 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3155 }
3156 
3157 int
3158 iwx_send_dqa_cmd(struct iwx_softc *sc)
3159 {
3160 	struct iwx_dqa_enable_cmd dqa_cmd = {
3161 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3162 	};
3163 	uint32_t cmd_id;
3164 
3165 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3166 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3167 }
3168 
3169 int
3170 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3171 {
3172 	int err;
3173 
3174 	err = iwx_read_firmware(sc);
3175 	if (err)
3176 		return err;
3177 
3178 	err = iwx_start_fw(sc);
3179 	if (err)
3180 		return err;
3181 
3182 	iwx_post_alive(sc);
3183 
3184 	return 0;
3185 }
3186 
3187 int
3188 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3189 {
3190 	const int wait_flags = IWX_INIT_COMPLETE;
3191 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3192 	struct iwx_init_extended_cfg_cmd init_cfg = {
3193 		.init_flags = htole32(IWX_INIT_NVM),
3194 	};
3195 	int err;
3196 
3197 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3198 		printf("%s: radio is disabled by hardware switch\n",
3199 		    DEVNAME(sc));
3200 		return EPERM;
3201 	}
3202 
3203 	sc->sc_init_complete = 0;
3204 	err = iwx_load_ucode_wait_alive(sc);
3205 	if (err) {
3206 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3207 		return err;
3208 	}
3209 
3210 	/*
3211 	 * Send init config command to mark that we are sending NVM
3212 	 * access commands
3213 	 */
3214 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3215 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3216 	if (err)
3217 		return err;
3218 
3219 	if (readnvm) {
3220 		err = iwx_nvm_init(sc);
3221 		if (err) {
3222 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3223 			return err;
3224 		}
3225 	}
3226 
3227 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3228 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3229 	if (err)
3230 		return err;
3231 
3232 	/* Wait for the init complete notification from the firmware. */
3233 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3234 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3235 		    SEC_TO_NSEC(2));
3236 		if (err)
3237 			return err;
3238 	}
3239 
3240 	if (readnvm && IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3241 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3242 		    sc->sc_nvm.hw_addr);
3243 	return 0;
3244 }
3245 
3246 int
3247 iwx_config_ltr(struct iwx_softc *sc)
3248 {
3249 	struct iwx_ltr_config_cmd cmd = {
3250 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3251 	};
3252 
3253 	if (!sc->sc_ltr_enabled)
3254 		return 0;
3255 
3256 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3257 }
3258 
3259 void
3260 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3261 {
3262 	struct iwx_rx_data *data = &ring->data[idx];
3263 
3264 	((uint64_t *)ring->desc)[idx] =
3265 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3266 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3267 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3268 	    BUS_DMASYNC_PREWRITE);
3269 }
3270 
3271 int
3272 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3273 {
3274 	struct iwx_rx_ring *ring = &sc->rxq;
3275 	struct iwx_rx_data *data = &ring->data[idx];
3276 	struct mbuf *m;
3277 	int err;
3278 	int fatal = 0;
3279 
3280 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3281 	if (m == NULL)
3282 		return ENOBUFS;
3283 
3284 	if (size <= MCLBYTES) {
3285 		MCLGET(m, M_DONTWAIT);
3286 	} else {
3287 		MCLGETI(m, M_DONTWAIT, NULL, IWX_RBUF_SIZE);
3288 	}
3289 	if ((m->m_flags & M_EXT) == 0) {
3290 		m_freem(m);
3291 		return ENOBUFS;
3292 	}
3293 
3294 	if (data->m != NULL) {
3295 		bus_dmamap_unload(sc->sc_dmat, data->map);
3296 		fatal = 1;
3297 	}
3298 
3299 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3300 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3301 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3302 	if (err) {
3303 		/* XXX */
3304 		if (fatal)
3305 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3306 		m_freem(m);
3307 		return err;
3308 	}
3309 	data->m = m;
3310 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3311 
3312 	/* Update RX descriptor. */
3313 	iwx_update_rx_desc(sc, ring, idx);
3314 
3315 	return 0;
3316 }
3317 
3318 int
3319 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3320     struct iwx_rx_mpdu_desc *desc)
3321 {
3322 	int energy_a, energy_b;
3323 
3324 	energy_a = desc->v1.energy_a;
3325 	energy_b = desc->v1.energy_b;
3326 	energy_a = energy_a ? -energy_a : -256;
3327 	energy_b = energy_b ? -energy_b : -256;
3328 	return MAX(energy_a, energy_b);
3329 }
3330 
3331 void
3332 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3333     struct iwx_rx_data *data)
3334 {
3335 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3336 
3337 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3338 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3339 
3340 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3341 }
3342 
3343 /*
3344  * Retrieve the average noise (in dBm) among receivers.
3345  */
3346 int
3347 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3348 {
3349 	int i, total, nbant, noise;
3350 
3351 	total = nbant = noise = 0;
3352 	for (i = 0; i < 3; i++) {
3353 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3354 		if (noise) {
3355 			total += noise;
3356 			nbant++;
3357 		}
3358 	}
3359 
3360 	/* There should be at least one antenna but check anyway. */
3361 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3362 }
3363 
3364 void
3365 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3366      int is_shortpre, int rate_n_flags, uint32_t device_timestamp,
3367      struct ieee80211_rxinfo *rxi, struct mbuf_list *ml)
3368 {
3369 	struct ieee80211com *ic = &sc->sc_ic;
3370 	struct ieee80211_frame *wh;
3371 	struct ieee80211_node *ni;
3372 	struct ieee80211_channel *bss_chan;
3373 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3374 
3375 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3376 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3377 
3378 	wh = mtod(m, struct ieee80211_frame *);
3379 	ni = ieee80211_find_rxnode(ic, wh);
3380 	if (ni == ic->ic_bss) {
3381 		/*
3382 		 * We may switch ic_bss's channel during scans.
3383 		 * Record the current channel so we can restore it later.
3384 		 */
3385 		bss_chan = ni->ni_chan;
3386 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3387 	}
3388 	ni->ni_chan = &ic->ic_channels[chanidx];
3389 
3390 #if NBPFILTER > 0
3391 	if (sc->sc_drvbpf != NULL) {
3392 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
3393 		uint16_t chan_flags;
3394 
3395 		tap->wr_flags = 0;
3396 		if (is_shortpre)
3397 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3398 		tap->wr_chan_freq =
3399 		    htole16(ic->ic_channels[chanidx].ic_freq);
3400 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3401 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3402 			chan_flags &= ~IEEE80211_CHAN_HT;
3403 		tap->wr_chan_flags = htole16(chan_flags);
3404 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3405 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3406 		tap->wr_tsft = device_timestamp;
3407 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
3408 			uint8_t mcs = (rate_n_flags &
3409 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
3410 			    IWX_RATE_HT_MCS_NSS_MSK));
3411 			tap->wr_rate = (0x80 | mcs);
3412 		} else {
3413 			uint8_t rate = (rate_n_flags &
3414 			    IWX_RATE_LEGACY_RATE_MSK);
3415 			switch (rate) {
3416 			/* CCK rates. */
3417 			case  10: tap->wr_rate =   2; break;
3418 			case  20: tap->wr_rate =   4; break;
3419 			case  55: tap->wr_rate =  11; break;
3420 			case 110: tap->wr_rate =  22; break;
3421 			/* OFDM rates. */
3422 			case 0xd: tap->wr_rate =  12; break;
3423 			case 0xf: tap->wr_rate =  18; break;
3424 			case 0x5: tap->wr_rate =  24; break;
3425 			case 0x7: tap->wr_rate =  36; break;
3426 			case 0x9: tap->wr_rate =  48; break;
3427 			case 0xb: tap->wr_rate =  72; break;
3428 			case 0x1: tap->wr_rate =  96; break;
3429 			case 0x3: tap->wr_rate = 108; break;
3430 			/* Unknown rate: should not happen. */
3431 			default:  tap->wr_rate =   0;
3432 			}
3433 		}
3434 
3435 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
3436 		    m, BPF_DIRECTION_IN);
3437 	}
3438 #endif
3439 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
3440 	/*
3441 	 * ieee80211_inputm() might have changed our BSS.
3442 	 * Restore ic_bss's channel if we are still in the same BSS.
3443 	 */
3444 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3445 		ni->ni_chan = bss_chan;
3446 	ieee80211_release_node(ic, ni);
3447 }
3448 
3449 void
3450 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
3451     size_t maxlen, struct mbuf_list *ml)
3452 {
3453 	struct ieee80211com *ic = &sc->sc_ic;
3454 	struct ieee80211_rxinfo rxi;
3455 	struct iwx_rx_mpdu_desc *desc;
3456 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
3457 	int rssi;
3458 	uint8_t chanidx;
3459 	uint16_t phy_info;
3460 
3461 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
3462 
3463 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
3464 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3465 		m_freem(m);
3466 		return; /* drop */
3467 	}
3468 
3469 	len = le16toh(desc->mpdu_len);
3470 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
3471 		/* Allow control frames in monitor mode. */
3472 		if (len < sizeof(struct ieee80211_frame_cts)) {
3473 			ic->ic_stats.is_rx_tooshort++;
3474 			IC2IFP(ic)->if_ierrors++;
3475 			m_freem(m);
3476 			return;
3477 		}
3478 	} else if (len < sizeof(struct ieee80211_frame)) {
3479 		ic->ic_stats.is_rx_tooshort++;
3480 		IC2IFP(ic)->if_ierrors++;
3481 		m_freem(m);
3482 		return;
3483 	}
3484 	if (len > maxlen - sizeof(*desc)) {
3485 		IC2IFP(ic)->if_ierrors++;
3486 		m_freem(m);
3487 		return;
3488 	}
3489 
3490 	m->m_data = pktdata + sizeof(*desc);
3491 	m->m_pkthdr.len = m->m_len = len;
3492 
3493 	/* Account for padding following the frame header. */
3494 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
3495 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3496 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3497 		if (type == IEEE80211_FC0_TYPE_CTL) {
3498 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
3499 			case IEEE80211_FC0_SUBTYPE_CTS:
3500 				hdrlen = sizeof(struct ieee80211_frame_cts);
3501 				break;
3502 			case IEEE80211_FC0_SUBTYPE_ACK:
3503 				hdrlen = sizeof(struct ieee80211_frame_ack);
3504 				break;
3505 			default:
3506 				hdrlen = sizeof(struct ieee80211_frame_min);
3507 				break;
3508 			}
3509 		} else
3510 			hdrlen = ieee80211_get_hdrlen(wh);
3511 		memmove(m->m_data + 2, m->m_data, hdrlen);
3512 		m_adj(m, 2);
3513 	}
3514 
3515 	phy_info = le16toh(desc->phy_info);
3516 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
3517 	chanidx = desc->v1.channel;
3518 	device_timestamp = desc->v1.gp2_on_air_rise;
3519 
3520 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
3521 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
3522 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
3523 
3524 	memset(&rxi, 0, sizeof(rxi));
3525 	rxi.rxi_rssi = rssi;
3526 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
3527 
3528 	iwx_rx_frame(sc, m, chanidx,
3529 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
3530 	    rate_n_flags, device_timestamp, &rxi, ml);
3531 }
3532 
3533 void
3534 iwx_enable_ht_cck_fallback(struct iwx_softc *sc, struct iwx_node *in)
3535 {
3536 	struct ieee80211com *ic = &sc->sc_ic;
3537 	struct ieee80211_node *ni = &in->in_ni;
3538 	struct ieee80211_rateset *rs = &ni->ni_rates;
3539 	uint8_t rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
3540 	uint8_t min_rval = ieee80211_min_basic_rate(ic);
3541 	int i;
3542 
3543 	/* Are CCK frames forbidden in our BSS? */
3544 	if (IWX_RVAL_IS_OFDM(min_rval))
3545 		return;
3546 
3547 	in->ht_force_cck = 1;
3548 
3549 	ieee80211_mira_cancel_timeouts(&in->in_mn);
3550 	ieee80211_mira_node_init(&in->in_mn);
3551 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
3552 
3553 	/* Choose initial CCK Tx rate. */
3554 	ni->ni_txrate = 0;
3555 	for (i = 0; i < rs->rs_nrates; i++) {
3556 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
3557 		if (rval == min_rval) {
3558 			ni->ni_txrate = i;
3559 			break;
3560 		}
3561 	}
3562 }
3563 
3564 void
3565 iwx_rx_tx_cmd_single(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3566     struct iwx_node *in)
3567 {
3568 	struct ieee80211com *ic = &sc->sc_ic;
3569 	struct ieee80211_node *ni = &in->in_ni;
3570 	struct ifnet *ifp = IC2IFP(ic);
3571 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
3572 	int status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
3573 	int txfail;
3574 
3575 	KASSERT(tx_resp->frame_count == 1);
3576 
3577 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
3578 	    status != IWX_TX_STATUS_DIRECT_DONE);
3579 
3580 	/* Update rate control statistics. */
3581 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) {
3582 		in->in_amn.amn_txcnt++;
3583 		if (in->ht_force_cck) {
3584 			/*
3585 			 * We want to move back to OFDM quickly if possible.
3586 			 * Only show actual Tx failures to AMRR, not retries.
3587 			 */
3588 			if (txfail)
3589 				in->in_amn.amn_retrycnt++;
3590 		} else if (tx_resp->failure_frame > 0)
3591 			in->in_amn.amn_retrycnt++;
3592 	} else if (ic->ic_fixed_mcs == -1) {
3593 		in->in_mn.frames += tx_resp->frame_count;
3594 		in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
3595 		in->in_mn.agglen = tx_resp->frame_count;
3596 		if (tx_resp->failure_frame > 0)
3597 			in->in_mn.retries += tx_resp->failure_frame;
3598 		if (txfail)
3599 			in->in_mn.txfail += tx_resp->frame_count;
3600 		if (ic->ic_state == IEEE80211_S_RUN && !in->ht_force_cck) {
3601 			int otxmcs = ni->ni_txmcs;
3602 
3603 			ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
3604 
3605 			/* Fall back to CCK rates if MCS 0 is failing. */
3606 			if (txfail && IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) &&
3607 			    otxmcs == 0 && ni->ni_txmcs == 0)
3608 				iwx_enable_ht_cck_fallback(sc, in);
3609 		}
3610 	}
3611 
3612 	if (txfail)
3613 		ifp->if_oerrors++;
3614 }
3615 
3616 void
3617 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
3618 {
3619 	struct ieee80211com *ic = &sc->sc_ic;
3620 
3621 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3622 	    BUS_DMASYNC_POSTWRITE);
3623 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3624 	m_freem(txd->m);
3625 	txd->m = NULL;
3626 
3627 	KASSERT(txd->in);
3628 	ieee80211_release_node(ic, &txd->in->in_ni);
3629 	txd->in = NULL;
3630 
3631 	KASSERT(txd->done == 0);
3632 	txd->done = 1;
3633 }
3634 
3635 void
3636 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3637     struct iwx_rx_data *data)
3638 {
3639 	struct ieee80211com *ic = &sc->sc_ic;
3640 	struct ifnet *ifp = IC2IFP(ic);
3641 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
3642 	int idx = cmd_hdr->idx;
3643 	int qid = cmd_hdr->qid;
3644 	struct iwx_tx_ring *ring = &sc->txq[qid];
3645 	struct iwx_tx_data *txd;
3646 
3647 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
3648 	    BUS_DMASYNC_POSTREAD);
3649 
3650 	sc->sc_tx_timer = 0;
3651 
3652 	txd = &ring->data[idx];
3653 	if (txd->done)
3654 		return;
3655 
3656 	iwx_rx_tx_cmd_single(sc, pkt, txd->in);
3657 	iwx_txd_done(sc, txd);
3658 
3659 	/*
3660 	 * XXX Sometimes we miss Tx completion interrupts.
3661 	 * We cannot check Tx success/failure for affected frames; just free
3662 	 * the associated mbuf and release the associated node reference.
3663 	 */
3664 	while (ring->tail != idx) {
3665 		txd = &ring->data[ring->tail];
3666 		if (!txd->done) {
3667 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
3668 			    __func__, ring->tail, idx));
3669 			iwx_txd_done(sc, txd);
3670 			ring->queued--;
3671 		}
3672 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
3673 	}
3674 
3675 	if (--ring->queued < IWX_TX_RING_LOMARK) {
3676 		sc->qfullmsk &= ~(1 << ring->qid);
3677 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
3678 			ifq_clr_oactive(&ifp->if_snd);
3679 			/*
3680 			 * Well, we're in interrupt context, but then again
3681 			 * I guess net80211 does all sorts of stunts in
3682 			 * interrupt context, so maybe this is no biggie.
3683 			 */
3684 			(*ifp->if_start)(ifp);
3685 		}
3686 	}
3687 }
3688 
3689 void
3690 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3691     struct iwx_rx_data *data)
3692 {
3693 	struct ieee80211com *ic = &sc->sc_ic;
3694 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
3695 	uint32_t missed;
3696 
3697 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
3698 	    (ic->ic_state != IEEE80211_S_RUN))
3699 		return;
3700 
3701 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3702 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
3703 
3704 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
3705 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
3706 		if (ic->ic_if.if_flags & IFF_DEBUG)
3707 			printf("%s: receiving no beacons from %s; checking if "
3708 			    "this AP is still responding to probe requests\n",
3709 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
3710 		/*
3711 		 * Rather than go directly to scan state, try to send a
3712 		 * directed probe request first. If that fails then the
3713 		 * state machine will drop us into scanning after timing
3714 		 * out waiting for a probe response.
3715 		 */
3716 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3717 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3718 	}
3719 
3720 }
3721 
3722 int
3723 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
3724 {
3725 	struct iwx_binding_cmd cmd;
3726 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
3727 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
3728 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
3729 	uint32_t status;
3730 
3731 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
3732 		panic("binding already added");
3733 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
3734 		panic("binding already removed");
3735 
3736 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
3737 		return EINVAL;
3738 
3739 	memset(&cmd, 0, sizeof(cmd));
3740 
3741 	cmd.id_and_color
3742 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3743 	cmd.action = htole32(action);
3744 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3745 
3746 	cmd.macs[0] = htole32(mac_id);
3747 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
3748 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
3749 
3750 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel))
3751 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
3752 	else
3753 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
3754 
3755 	status = 0;
3756 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
3757 	    &cmd, &status);
3758 	if (err == 0 && status != 0)
3759 		err = EIO;
3760 
3761 	return err;
3762 }
3763 
3764 void
3765 iwx_phy_ctxt_cmd_hdr(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
3766     struct iwx_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3767 {
3768 	memset(cmd, 0, sizeof(struct iwx_phy_context_cmd));
3769 
3770 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
3771 	    ctxt->color));
3772 	cmd->action = htole32(action);
3773 	cmd->apply_time = htole32(apply_time);
3774 }
3775 
3776 void
3777 iwx_phy_ctxt_cmd_data(struct iwx_softc *sc, struct iwx_phy_context_cmd *cmd,
3778     struct ieee80211_channel *chan, uint8_t chains_static,
3779     uint8_t chains_dynamic)
3780 {
3781 	struct ieee80211com *ic = &sc->sc_ic;
3782 	uint8_t active_cnt, idle_cnt;
3783 
3784 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
3785 		cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3786 		    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
3787 		cmd->ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
3788 		cmd->ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
3789 		cmd->ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
3790 	} else {
3791 		struct iwx_fw_channel_info_v1 *ci_v1;
3792 		ci_v1 = (struct iwx_fw_channel_info_v1 *)&cmd->ci;
3793 		ci_v1->band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3794 		    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
3795 		ci_v1->channel = ieee80211_chan2ieee(ic, chan);
3796 		ci_v1->width = IWX_PHY_VHT_CHANNEL_MODE20;
3797 		ci_v1->ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
3798 	}
3799 	/* Set rx the chains */
3800 	idle_cnt = chains_static;
3801 	active_cnt = chains_dynamic;
3802 
3803 	cmd->rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
3804 					IWX_PHY_RX_CHAIN_VALID_POS);
3805 	cmd->rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
3806 	cmd->rxchain_info |= htole32(active_cnt <<
3807 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
3808 
3809 	cmd->txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
3810 }
3811 
3812 int
3813 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
3814     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3815     uint32_t apply_time)
3816 {
3817 	struct iwx_phy_context_cmd cmd;
3818 	size_t len;
3819 
3820 	iwx_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3821 
3822 	/*
3823 	 * Intel resized fw_channel_info struct and neglected to resize the
3824 	 * phy_context_cmd struct which contains it; so magic happens with
3825 	 * command length adjustments at run-time... :(
3826 	 */
3827 	iwx_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3828 	    chains_static, chains_dynamic);
3829 	len = sizeof(struct iwx_phy_context_cmd);
3830 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
3831 		len -= (sizeof(struct iwx_fw_channel_info) -
3832 		    sizeof(struct iwx_fw_channel_info_v1));
3833 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, len, &cmd);
3834 }
3835 
3836 int
3837 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
3838 {
3839 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
3840 	struct iwx_tfh_tfd *desc;
3841 	struct iwx_tx_data *txdata;
3842 	struct iwx_device_cmd *cmd;
3843 	struct mbuf *m;
3844 	bus_addr_t paddr;
3845 	uint64_t addr;
3846 	int err = 0, i, paylen, off, s;
3847 	int idx, code, async, group_id;
3848 	size_t hdrlen, datasz;
3849 	uint8_t *data;
3850 	int generation = sc->sc_generation;
3851 
3852 	code = hcmd->id;
3853 	async = hcmd->flags & IWX_CMD_ASYNC;
3854 	idx = ring->cur;
3855 
3856 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3857 		paylen += hcmd->len[i];
3858 	}
3859 
3860 	/* If this command waits for a response, allocate response buffer. */
3861 	hcmd->resp_pkt = NULL;
3862 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
3863 		uint8_t *resp_buf;
3864 		KASSERT(!async);
3865 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
3866 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
3867 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
3868 			return ENOSPC;
3869 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
3870 		    M_NOWAIT | M_ZERO);
3871 		if (resp_buf == NULL)
3872 			return ENOMEM;
3873 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
3874 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
3875 	} else {
3876 		sc->sc_cmd_resp_pkt[idx] = NULL;
3877 	}
3878 
3879 	s = splnet();
3880 
3881 	desc = &ring->desc[idx];
3882 	txdata = &ring->data[idx];
3883 
3884 	group_id = iwx_cmd_groupid(code);
3885 	if (group_id != 0) {
3886 		hdrlen = sizeof(cmd->hdr_wide);
3887 		datasz = sizeof(cmd->data_wide);
3888 	} else {
3889 		hdrlen = sizeof(cmd->hdr);
3890 		datasz = sizeof(cmd->data);
3891 	}
3892 
3893 	if (paylen > datasz) {
3894 		/* Command is too large to fit in pre-allocated space. */
3895 		size_t totlen = hdrlen + paylen;
3896 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
3897 			printf("%s: firmware command too long (%zd bytes)\n",
3898 			    DEVNAME(sc), totlen);
3899 			err = EINVAL;
3900 			goto out;
3901 		}
3902 		m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
3903 		if (m == NULL) {
3904 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
3905 			    DEVNAME(sc), totlen);
3906 			err = ENOMEM;
3907 			goto out;
3908 		}
3909 		cmd = mtod(m, struct iwx_device_cmd *);
3910 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3911 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3912 		if (err) {
3913 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
3914 			    DEVNAME(sc), totlen);
3915 			m_freem(m);
3916 			goto out;
3917 		}
3918 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
3919 		paddr = txdata->map->dm_segs[0].ds_addr;
3920 	} else {
3921 		cmd = &ring->cmd[idx];
3922 		paddr = txdata->cmd_paddr;
3923 	}
3924 
3925 	if (group_id != 0) {
3926 		cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
3927 		cmd->hdr_wide.group_id = group_id;
3928 		cmd->hdr_wide.qid = ring->qid;
3929 		cmd->hdr_wide.idx = idx;
3930 		cmd->hdr_wide.length = htole16(paylen);
3931 		cmd->hdr_wide.version = iwx_cmd_version(code);
3932 		data = cmd->data_wide;
3933 	} else {
3934 		cmd->hdr.code = code;
3935 		cmd->hdr.flags = 0;
3936 		cmd->hdr.qid = ring->qid;
3937 		cmd->hdr.idx = idx;
3938 		data = cmd->data;
3939 	}
3940 
3941 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3942 		if (hcmd->len[i] == 0)
3943 			continue;
3944 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3945 		off += hcmd->len[i];
3946 	}
3947 	KASSERT(off == paylen);
3948 
3949 	desc->tbs[0].tb_len = htole16(hdrlen + paylen);
3950 	addr = htole64((uint64_t)paddr);
3951 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
3952 	desc->num_tbs = 1;
3953 
3954 	if (paylen > datasz) {
3955 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3956 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3957 	} else {
3958 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3959 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3960 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3961 	}
3962 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3963 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3964 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3965 	/* Kick command ring. */
3966 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
3967 	ring->queued++;
3968 	ring->cur = (ring->cur + 1) % IWX_CMD_QUEUE_SIZE;
3969 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
3970 
3971 	if (!async) {
3972 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
3973 		if (err == 0) {
3974 			/* if hardware is no longer up, return error */
3975 			if (generation != sc->sc_generation) {
3976 				err = ENXIO;
3977 				goto out;
3978 			}
3979 
3980 			/* Response buffer will be freed in iwx_free_resp(). */
3981 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
3982 			sc->sc_cmd_resp_pkt[idx] = NULL;
3983 		} else if (generation == sc->sc_generation) {
3984 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
3985 			    sc->sc_cmd_resp_len[idx]);
3986 			sc->sc_cmd_resp_pkt[idx] = NULL;
3987 		}
3988 	}
3989  out:
3990 	splx(s);
3991 
3992 	return err;
3993 }
3994 
3995 int
3996 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
3997     uint16_t len, const void *data)
3998 {
3999 	struct iwx_host_cmd cmd = {
4000 		.id = id,
4001 		.len = { len, },
4002 		.data = { data, },
4003 		.flags = flags,
4004 	};
4005 
4006 	return iwx_send_cmd(sc, &cmd);
4007 }
4008 
4009 int
4010 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
4011     uint32_t *status)
4012 {
4013 	struct iwx_rx_packet *pkt;
4014 	struct iwx_cmd_response *resp;
4015 	int err, resp_len;
4016 
4017 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
4018 	cmd->flags |= IWX_CMD_WANT_RESP;
4019 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
4020 
4021 	err = iwx_send_cmd(sc, cmd);
4022 	if (err)
4023 		return err;
4024 
4025 	pkt = cmd->resp_pkt;
4026 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
4027 		return EIO;
4028 
4029 	resp_len = iwx_rx_packet_payload_len(pkt);
4030 	if (resp_len != sizeof(*resp)) {
4031 		iwx_free_resp(sc, cmd);
4032 		return EIO;
4033 	}
4034 
4035 	resp = (void *)pkt->data;
4036 	*status = le32toh(resp->status);
4037 	iwx_free_resp(sc, cmd);
4038 	return err;
4039 }
4040 
4041 int
4042 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
4043     const void *data, uint32_t *status)
4044 {
4045 	struct iwx_host_cmd cmd = {
4046 		.id = id,
4047 		.len = { len, },
4048 		.data = { data, },
4049 	};
4050 
4051 	return iwx_send_cmd_status(sc, &cmd, status);
4052 }
4053 
4054 void
4055 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4056 {
4057 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
4058 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4059 	hcmd->resp_pkt = NULL;
4060 }
4061 
4062 void
4063 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
4064 {
4065 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
4066 	struct iwx_tx_data *data;
4067 
4068 	if (qid != IWX_DQA_CMD_QUEUE) {
4069 		return;	/* Not a command ack. */
4070 	}
4071 
4072 	data = &ring->data[idx];
4073 
4074 	if (data->m != NULL) {
4075 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4076 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4077 		bus_dmamap_unload(sc->sc_dmat, data->map);
4078 		m_freem(data->m);
4079 		data->m = NULL;
4080 	}
4081 	wakeup(&ring->desc[idx]);
4082 
4083 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
4084 	if (ring->queued == 0) {
4085 		if (code != IWX_NVM_ACCESS_CMD)
4086 			DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4087 				DEVNAME(sc), code));
4088 	} else if (ring->queued > 0)
4089 		ring->queued--;
4090 }
4091 
4092 /*
4093  * Fill in various bit for management frames, and leave them
4094  * unfilled for data frames (firmware takes care of that).
4095  * Return the selected TX rate.
4096  */
4097 const struct iwx_rate *
4098 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
4099     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
4100 {
4101 	struct ieee80211com *ic = &sc->sc_ic;
4102 	struct ieee80211_node *ni = &in->in_ni;
4103 	struct ieee80211_rateset *rs = &ni->ni_rates;
4104 	const struct iwx_rate *rinfo;
4105 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4106 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
4107 	int ridx, rate_flags;
4108 	uint32_t flags = 0;
4109 
4110 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4111 	    type != IEEE80211_FC0_TYPE_DATA) {
4112 		/* for non-data, use the lowest supported rate */
4113 		ridx = min_ridx;
4114 	} else if (ic->ic_fixed_mcs != -1) {
4115 		ridx = sc->sc_fixed_ridx;
4116 	} else if (ic->ic_fixed_rate != -1) {
4117 		ridx = sc->sc_fixed_ridx;
4118 	} else if ((ni->ni_flags & IEEE80211_NODE_HT) && !in->ht_force_cck) {
4119 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
4120 	} else {
4121 		uint8_t rval;
4122 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4123 		ridx = iwx_rval2ridx(rval);
4124 		if (ridx < min_ridx)
4125 			ridx = min_ridx;
4126 	}
4127 
4128 	flags = (IWX_TX_FLAGS_CMD_RATE | IWX_TX_FLAGS_ENCRYPT_DIS);
4129 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
4130 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
4131 		flags |= IWX_TX_FLAGS_HIGH_PRI;
4132 	tx->flags = htole32(flags);
4133 
4134 	rinfo = &iwx_rates[ridx];
4135 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
4136 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
4137 	else
4138 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
4139 	if (IWX_RIDX_IS_CCK(ridx))
4140 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
4141 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4142 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4143 		rate_flags |= IWX_RATE_MCS_HT_MSK;
4144 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4145 	} else
4146 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4147 
4148 	return rinfo;
4149 }
4150 
4151 #if 0
4152 /*
4153  * necessary only for block ack mode
4154  */
4155 void
4156 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, uint16_t byte_cnt,
4157     uint16_t num_tbs)
4158 {
4159 	uint8_t filled_tfd_size, num_fetch_chunks;
4160 	uint16_t len = byte_cnt;
4161 	uint16_t bc_ent;
4162 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
4163 
4164 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
4165 			  num_tbs * sizeof(struct iwx_tfh_tb);
4166 	/*
4167 	 * filled_tfd_size contains the number of filled bytes in the TFD.
4168 	 * Dividing it by 64 will give the number of chunks to fetch
4169 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
4170 	 * If, for example, TFD contains only 3 TBs then 32 bytes
4171 	 * of the TFD are used, and only one chunk of 64 bytes should
4172 	 * be fetched
4173 	 */
4174 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
4175 
4176 	/* Before AX210, the HW expects DW */
4177 	len = howmany(len, 4);
4178 	bc_ent = htole16(len | (num_fetch_chunks << 12));
4179 	scd_bc_tbl->tfd_offset[txq->cur] = bc_ent;
4180 }
4181 #endif
4182 
4183 int
4184 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4185 {
4186 	struct ieee80211com *ic = &sc->sc_ic;
4187 	struct iwx_node *in = (void *)ni;
4188 	struct iwx_tx_ring *ring;
4189 	struct iwx_tx_data *data;
4190 	struct iwx_tfh_tfd *desc;
4191 	struct iwx_device_cmd *cmd;
4192 	struct iwx_tx_cmd_gen2 *tx;
4193 	struct ieee80211_frame *wh;
4194 	struct ieee80211_key *k = NULL;
4195 	const struct iwx_rate *rinfo;
4196 	uint64_t paddr;
4197 	u_int hdrlen;
4198 	bus_dma_segment_t *seg;
4199 	uint16_t num_tbs;
4200 	uint8_t type;
4201 	int i, totlen, err, pad;
4202 
4203 	wh = mtod(m, struct ieee80211_frame *);
4204 	hdrlen = ieee80211_get_hdrlen(wh);
4205 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4206 
4207 	/*
4208 	 * Map EDCA categories to Tx data queues.
4209 	 *
4210 	 * We use static data queue assignments even in DQA mode. We do not
4211 	 * need to share Tx queues between stations because we only implement
4212 	 * client mode; the firmware's station table contains only one entry
4213 	 * which represents our access point.
4214 	 *
4215 	 * Tx aggregation will require additional queues (one queue per TID
4216 	 * for which aggregation is enabled) but we do not implement this yet.
4217 	 */
4218 	ring = &sc->txq[IWX_DQA_MIN_MGMT_QUEUE + ac];
4219 	desc = &ring->desc[ring->cur];
4220 	memset(desc, 0, sizeof(*desc));
4221 	data = &ring->data[ring->cur];
4222 
4223 	cmd = &ring->cmd[ring->cur];
4224 	cmd->hdr.code = IWX_TX_CMD;
4225 	cmd->hdr.flags = 0;
4226 	cmd->hdr.qid = ring->qid;
4227 	cmd->hdr.idx = ring->cur;
4228 
4229 	tx = (void *)cmd->data;
4230 	memset(tx, 0, sizeof(*tx));
4231 
4232 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
4233 
4234 #if NBPFILTER > 0
4235 	if (sc->sc_drvbpf != NULL) {
4236 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
4237 		uint16_t chan_flags;
4238 
4239 		tap->wt_flags = 0;
4240 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4241 		chan_flags = ni->ni_chan->ic_flags;
4242 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4243 			chan_flags &= ~IEEE80211_CHAN_HT;
4244 		tap->wt_chan_flags = htole16(chan_flags);
4245 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4246 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4247 		    type == IEEE80211_FC0_TYPE_DATA &&
4248 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4249 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4250 		} else
4251 			tap->wt_rate = rinfo->rate;
4252 		tap->wt_hwqueue = ac;
4253 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4254 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4255 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4256 
4257 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4258 		    m, BPF_DIRECTION_OUT);
4259 	}
4260 #endif
4261 
4262 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4263                 k = ieee80211_get_txkey(ic, wh, ni);
4264 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4265 			return ENOBUFS;
4266 		/* 802.11 header may have moved. */
4267 		wh = mtod(m, struct ieee80211_frame *);
4268 	}
4269 	totlen = m->m_pkthdr.len;
4270 
4271 	if (hdrlen & 3) {
4272 		/* First segment length must be a multiple of 4. */
4273 		pad = 4 - (hdrlen & 3);
4274 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
4275 	} else
4276 		pad = 0;
4277 
4278 	tx->len = htole16(totlen);
4279 
4280 	/* Copy 802.11 header in TX command. */
4281 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4282 
4283 	/* Trim 802.11 header. */
4284 	m_adj(m, hdrlen);
4285 
4286 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4287 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4288 	if (err && err != EFBIG) {
4289 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4290 		m_freem(m);
4291 		return err;
4292 	}
4293 	if (err) {
4294 		/* Too many DMA segments, linearize mbuf. */
4295 		if (m_defrag(m, M_DONTWAIT)) {
4296 			m_freem(m);
4297 			return ENOBUFS;
4298 		}
4299 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4300 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4301 		if (err) {
4302 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4303 			    err);
4304 			m_freem(m);
4305 			return err;
4306 		}
4307 	}
4308 	data->m = m;
4309 	data->in = in;
4310 	data->done = 0;
4311 
4312 	/* Fill TX descriptor. */
4313 	num_tbs = 2 + data->map->dm_nsegs;
4314 	desc->num_tbs = htole16(num_tbs);
4315 
4316 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
4317 	paddr = htole64(data->cmd_paddr);
4318 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
4319 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
4320 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
4321 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
4322 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
4323 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
4324 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
4325 
4326 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
4327 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
4328 
4329 	/* Other DMA segments are for data payload. */
4330 	seg = data->map->dm_segs;
4331 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4332 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
4333 		paddr = htole64(seg->ds_addr);
4334 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
4335 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
4336 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
4337 	}
4338 
4339 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4340 	    BUS_DMASYNC_PREWRITE);
4341 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4342 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4343 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4344 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4345 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4346 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4347 
4348 #if 0
4349 	iwx_tx_update_byte_tbl(ring, totlen, num_tbs);
4350 #endif
4351 
4352 	/* Kick TX ring. */
4353 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
4354 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
4355 
4356 	/* Mark TX ring as full if we reach a certain threshold. */
4357 	if (++ring->queued > IWX_TX_RING_HIMARK) {
4358 		sc->qfullmsk |= 1 << ring->qid;
4359 	}
4360 
4361 	return 0;
4362 }
4363 
4364 int
4365 iwx_flush_tx_path(struct iwx_softc *sc)
4366 {
4367 	struct iwx_tx_path_flush_cmd flush_cmd = {
4368 		.sta_id = htole32(IWX_STATION_ID),
4369 		.tid_mask = htole16(0xffff),
4370 	};
4371 	int err;
4372 
4373 	err = iwx_send_cmd_pdu(sc, IWX_TXPATH_FLUSH, 0,
4374 	    sizeof(flush_cmd), &flush_cmd);
4375 	if (err)
4376                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4377 	return err;
4378 }
4379 
4380 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
4381 
4382 int
4383 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
4384     struct iwx_beacon_filter_cmd *cmd)
4385 {
4386 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
4387 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
4388 }
4389 
4390 int
4391 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
4392 {
4393 	struct iwx_beacon_filter_cmd cmd = {
4394 		IWX_BF_CMD_CONFIG_DEFAULTS,
4395 		.bf_enable_beacon_filter = htole32(1),
4396 		.ba_enable_beacon_abort = htole32(enable),
4397 	};
4398 
4399 	if (!sc->sc_bf.bf_enabled)
4400 		return 0;
4401 
4402 	sc->sc_bf.ba_enabled = enable;
4403 	return iwx_beacon_filter_send_cmd(sc, &cmd);
4404 }
4405 
4406 void
4407 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
4408     struct iwx_mac_power_cmd *cmd)
4409 {
4410 	struct ieee80211com *ic = &sc->sc_ic;
4411 	struct ieee80211_node *ni = &in->in_ni;
4412 	int dtim_period, dtim_msec, keep_alive;
4413 
4414 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
4415 	    in->in_color));
4416 	if (ni->ni_dtimperiod)
4417 		dtim_period = ni->ni_dtimperiod;
4418 	else
4419 		dtim_period = 1;
4420 
4421 	/*
4422 	 * Regardless of power management state the driver must set
4423 	 * keep alive period. FW will use it for sending keep alive NDPs
4424 	 * immediately after association. Check that keep alive period
4425 	 * is at least 3 * DTIM.
4426 	 */
4427 	dtim_msec = dtim_period * ni->ni_intval;
4428 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
4429 	keep_alive = roundup(keep_alive, 1000) / 1000;
4430 	cmd->keep_alive_seconds = htole16(keep_alive);
4431 
4432 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
4433 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4434 }
4435 
4436 int
4437 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
4438 {
4439 	int err;
4440 	int ba_enable;
4441 	struct iwx_mac_power_cmd cmd;
4442 
4443 	memset(&cmd, 0, sizeof(cmd));
4444 
4445 	iwx_power_build_cmd(sc, in, &cmd);
4446 
4447 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
4448 	    sizeof(cmd), &cmd);
4449 	if (err != 0)
4450 		return err;
4451 
4452 	ba_enable = !!(cmd.flags &
4453 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4454 	return iwx_update_beacon_abort(sc, in, ba_enable);
4455 }
4456 
4457 int
4458 iwx_power_update_device(struct iwx_softc *sc)
4459 {
4460 	struct iwx_device_power_cmd cmd = { };
4461 	struct ieee80211com *ic = &sc->sc_ic;
4462 
4463 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
4464 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4465 
4466 	return iwx_send_cmd_pdu(sc,
4467 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4468 }
4469 
4470 int
4471 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
4472 {
4473 	struct iwx_beacon_filter_cmd cmd = {
4474 		IWX_BF_CMD_CONFIG_DEFAULTS,
4475 		.bf_enable_beacon_filter = htole32(1),
4476 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
4477 	};
4478 	int err;
4479 
4480 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
4481 	if (err == 0)
4482 		sc->sc_bf.bf_enabled = 1;
4483 
4484 	return err;
4485 }
4486 
4487 int
4488 iwx_disable_beacon_filter(struct iwx_softc *sc)
4489 {
4490 	struct iwx_beacon_filter_cmd cmd;
4491 	int err;
4492 
4493 	memset(&cmd, 0, sizeof(cmd));
4494 
4495 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
4496 	if (err == 0)
4497 		sc->sc_bf.bf_enabled = 0;
4498 
4499 	return err;
4500 }
4501 
4502 int
4503 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
4504 {
4505 	struct iwx_add_sta_cmd add_sta_cmd;
4506 	int err;
4507 	uint32_t status;
4508 	struct ieee80211com *ic = &sc->sc_ic;
4509 
4510 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
4511 		panic("STA already added");
4512 
4513 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4514 
4515 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4516 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
4517 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
4518 	} else {
4519 		add_sta_cmd.sta_id = IWX_STATION_ID;
4520 		add_sta_cmd.station_type = IWX_STA_LINK;
4521 	}
4522 	add_sta_cmd.mac_id_n_color
4523 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4524 	if (!update) {
4525 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
4526 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4527 			    etheranyaddr);
4528 		else
4529 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4530 			    in->in_ni.ni_bssid);
4531 	}
4532 	add_sta_cmd.add_modify = update ? 1 : 0;
4533 	add_sta_cmd.station_flags_msk
4534 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
4535 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4536 	if (update)
4537 		add_sta_cmd.modify_mask |= (IWX_STA_MODIFY_TID_DISABLE_TX);
4538 
4539 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4540 		add_sta_cmd.station_flags_msk
4541 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
4542 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
4543 
4544 		add_sta_cmd.station_flags
4545 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
4546 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4547 		case IEEE80211_AMPDU_PARAM_SS_2:
4548 			add_sta_cmd.station_flags
4549 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
4550 			break;
4551 		case IEEE80211_AMPDU_PARAM_SS_4:
4552 			add_sta_cmd.station_flags
4553 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
4554 			break;
4555 		case IEEE80211_AMPDU_PARAM_SS_8:
4556 			add_sta_cmd.station_flags
4557 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
4558 			break;
4559 		case IEEE80211_AMPDU_PARAM_SS_16:
4560 			add_sta_cmd.station_flags
4561 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
4562 			break;
4563 		default:
4564 			break;
4565 		}
4566 	}
4567 
4568 	status = IWX_ADD_STA_SUCCESS;
4569 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
4570 	    &add_sta_cmd, &status);
4571 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
4572 		err = EIO;
4573 
4574 	return err;
4575 }
4576 
4577 int
4578 iwx_add_aux_sta(struct iwx_softc *sc)
4579 {
4580 	struct iwx_add_sta_cmd cmd;
4581 	int err, qid = IWX_DQA_AUX_QUEUE;
4582 	uint32_t status;
4583 
4584 	memset(&cmd, 0, sizeof(cmd));
4585 	cmd.sta_id = IWX_AUX_STA_ID;
4586 	cmd.station_type = IWX_STA_AUX_ACTIVITY;
4587 	cmd.mac_id_n_color =
4588 	    htole32(IWX_FW_CMD_ID_AND_COLOR(IWX_MAC_INDEX_AUX, 0));
4589 	cmd.tid_disable_tx = htole16(0xffff);
4590 
4591 	status = IWX_ADD_STA_SUCCESS;
4592 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
4593 	    &status);
4594 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
4595 		return EIO;
4596 
4597 	return iwx_enable_txq(sc, IWX_AUX_STA_ID, qid, IWX_MGMT_TID,
4598 	    IWX_TX_RING_COUNT);
4599 }
4600 
4601 int
4602 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
4603 {
4604 	struct ieee80211com *ic = &sc->sc_ic;
4605 	struct iwx_rm_sta_cmd rm_sta_cmd;
4606 	int err;
4607 
4608 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
4609 		panic("sta already removed");
4610 
4611 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
4612 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
4613 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
4614 	else
4615 		rm_sta_cmd.sta_id = IWX_STATION_ID;
4616 
4617 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
4618 	    &rm_sta_cmd);
4619 
4620 	return err;
4621 }
4622 
4623 uint8_t
4624 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
4625     struct iwx_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
4626 {
4627 	struct ieee80211com *ic = &sc->sc_ic;
4628 	struct ieee80211_channel *c;
4629 	uint8_t nchan;
4630 
4631 	for (nchan = 0, c = &ic->ic_channels[1];
4632 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4633 	    nchan < sc->sc_capa_n_scan_channels;
4634 	    c++) {
4635 		if (c->ic_flags == 0)
4636 			continue;
4637 
4638 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4639 		chan->iter_count = 1;
4640 		chan->iter_interval = htole16(0);
4641 		if (n_ssids != 0 && !bgscan)
4642 			chan->flags = htole32(1 << 0); /* select SSID 0 */
4643 		chan++;
4644 		nchan++;
4645 	}
4646 
4647 	return nchan;
4648 }
4649 
4650 int
4651 iwx_fill_probe_req_v1(struct iwx_softc *sc, struct iwx_scan_probe_req_v1 *preq1)
4652 {
4653 	struct iwx_scan_probe_req preq2;
4654 	int err, i;
4655 
4656 	err = iwx_fill_probe_req(sc, &preq2);
4657 	if (err)
4658 		return err;
4659 
4660 	preq1->mac_header = preq2.mac_header;
4661 	for (i = 0; i < nitems(preq1->band_data); i++)
4662 		preq1->band_data[i] = preq2.band_data[i];
4663 	preq1->common_data = preq2.common_data;
4664 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
4665 	return 0;
4666 }
4667 
4668 int
4669 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
4670 {
4671 	struct ieee80211com *ic = &sc->sc_ic;
4672 	struct ifnet *ifp = IC2IFP(ic);
4673 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4674 	struct ieee80211_rateset *rs;
4675 	size_t remain = sizeof(preq->buf);
4676 	uint8_t *frm, *pos;
4677 
4678 	memset(preq, 0, sizeof(*preq));
4679 
4680 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4681 		return ENOBUFS;
4682 
4683 	/*
4684 	 * Build a probe request frame.  Most of the following code is a
4685 	 * copy & paste of what is done in net80211.
4686 	 */
4687 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4688 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4689 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4690 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
4691 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4692 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4693 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4694 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4695 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4696 
4697 	frm = (uint8_t *)(wh + 1);
4698 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4699 
4700 	/* Tell the firmware where the MAC header is. */
4701 	preq->mac_header.offset = 0;
4702 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4703 	remain -= frm - (uint8_t *)wh;
4704 
4705 	/* Fill in 2GHz IEs and tell firmware where they are. */
4706 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4707 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4708 		if (remain < 4 + rs->rs_nrates)
4709 			return ENOBUFS;
4710 	} else if (remain < 2 + rs->rs_nrates)
4711 		return ENOBUFS;
4712 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4713 	pos = frm;
4714 	frm = ieee80211_add_rates(frm, rs);
4715 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4716 		frm = ieee80211_add_xrates(frm, rs);
4717 	preq->band_data[0].len = htole16(frm - pos);
4718 	remain -= frm - pos;
4719 
4720 	if (isset(sc->sc_enabled_capa,
4721 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4722 		if (remain < 3)
4723 			return ENOBUFS;
4724 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4725 		*frm++ = 1;
4726 		*frm++ = 0;
4727 		remain -= 3;
4728 	}
4729 
4730 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4731 		/* Fill in 5GHz IEs. */
4732 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4733 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4734 			if (remain < 4 + rs->rs_nrates)
4735 				return ENOBUFS;
4736 		} else if (remain < 2 + rs->rs_nrates)
4737 			return ENOBUFS;
4738 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4739 		pos = frm;
4740 		frm = ieee80211_add_rates(frm, rs);
4741 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4742 			frm = ieee80211_add_xrates(frm, rs);
4743 		preq->band_data[1].len = htole16(frm - pos);
4744 		remain -= frm - pos;
4745 	}
4746 
4747 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4748 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4749 	pos = frm;
4750 	if (ic->ic_flags & IEEE80211_F_HTON) {
4751 		if (remain < 28)
4752 			return ENOBUFS;
4753 		frm = ieee80211_add_htcaps(frm, ic);
4754 		/* XXX add WME info? */
4755 	}
4756 	preq->common_data.len = htole16(frm - pos);
4757 
4758 	return 0;
4759 }
4760 
4761 int
4762 iwx_config_umac_scan(struct iwx_softc *sc)
4763 {
4764 	struct ieee80211com *ic = &sc->sc_ic;
4765 	struct iwx_scan_config *scan_config;
4766 	int err, nchan;
4767 	size_t cmd_size;
4768 	struct ieee80211_channel *c;
4769 	struct iwx_host_cmd hcmd = {
4770 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
4771 		.flags = 0,
4772 	};
4773 	static const uint32_t rates = (IWX_SCAN_CONFIG_RATE_1M |
4774 	    IWX_SCAN_CONFIG_RATE_2M | IWX_SCAN_CONFIG_RATE_5M |
4775 	    IWX_SCAN_CONFIG_RATE_11M | IWX_SCAN_CONFIG_RATE_6M |
4776 	    IWX_SCAN_CONFIG_RATE_9M | IWX_SCAN_CONFIG_RATE_12M |
4777 	    IWX_SCAN_CONFIG_RATE_18M | IWX_SCAN_CONFIG_RATE_24M |
4778 	    IWX_SCAN_CONFIG_RATE_36M | IWX_SCAN_CONFIG_RATE_48M |
4779 	    IWX_SCAN_CONFIG_RATE_54M);
4780 
4781 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4782 
4783 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4784 	if (scan_config == NULL)
4785 		return ENOMEM;
4786 
4787 	scan_config->tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
4788 	scan_config->rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
4789 	scan_config->legacy_rates = htole32(rates |
4790 	    IWX_SCAN_CONFIG_SUPPORTED_RATE(rates));
4791 
4792 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4793 	scan_config->dwell.active = 10;
4794 	scan_config->dwell.passive = 110;
4795 	scan_config->dwell.fragmented = 44;
4796 	scan_config->dwell.extended = 90;
4797 	scan_config->out_of_channel_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
4798 	scan_config->out_of_channel_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
4799 	scan_config->suspend_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
4800 	scan_config->suspend_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
4801 
4802 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
4803 
4804 	scan_config->bcast_sta_id = IWX_AUX_STA_ID;
4805 	scan_config->channel_flags = 0;
4806 
4807 	for (c = &ic->ic_channels[1], nchan = 0;
4808 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4809 	    nchan < sc->sc_capa_n_scan_channels; c++) {
4810 		if (c->ic_flags == 0)
4811 			continue;
4812 		scan_config->channel_array[nchan++] =
4813 		    ieee80211_mhz2ieee(c->ic_freq, 0);
4814 	}
4815 
4816 	scan_config->flags = htole32(IWX_SCAN_CONFIG_FLAG_ACTIVATE |
4817 	    IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
4818 	    IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
4819 	    IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
4820 	    IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
4821 	    IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
4822 	    IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
4823 	    IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
4824 	    IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
4825 	    IWX_SCAN_CONFIG_N_CHANNELS(nchan) |
4826 	    IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
4827 
4828 	hcmd.data[0] = scan_config;
4829 	hcmd.len[0] = cmd_size;
4830 
4831 	err = iwx_send_cmd(sc, &hcmd);
4832 	free(scan_config, M_DEVBUF, cmd_size);
4833 	return err;
4834 }
4835 
4836 int
4837 iwx_umac_scan_size(struct iwx_softc *sc)
4838 {
4839 	int base_size = IWX_SCAN_REQ_UMAC_SIZE_V1;
4840 	int tail_size;
4841 
4842 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4843 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V8;
4844 	else if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4845 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V7;
4846 #ifdef notyet
4847 	else if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4848 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V6;
4849 #endif
4850 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
4851 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v2);
4852 	else
4853 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v1);
4854 
4855 	return base_size + sizeof(struct iwx_scan_channel_cfg_umac) *
4856 	    sc->sc_capa_n_scan_channels + tail_size;
4857 }
4858 
4859 struct iwx_scan_umac_chan_param *
4860 iwx_get_scan_req_umac_chan_param(struct iwx_softc *sc,
4861     struct iwx_scan_req_umac *req)
4862 {
4863 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4864 		return &req->v8.channel;
4865 
4866 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4867 		return &req->v7.channel;
4868 #ifdef notyet
4869 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4870 		return &req->v6.channel;
4871 #endif
4872 	return &req->v1.channel;
4873 }
4874 
4875 void *
4876 iwx_get_scan_req_umac_data(struct iwx_softc *sc, struct iwx_scan_req_umac *req)
4877 {
4878 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4879 		return (void *)&req->v8.data;
4880 
4881 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4882 		return (void *)&req->v7.data;
4883 #ifdef notyet
4884 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4885 		return (void *)&req->v6.data;
4886 #endif
4887 	return (void *)&req->v1.data;
4888 
4889 }
4890 
4891 /* adaptive dwell max budget time [TU] for full scan */
4892 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
4893 /* adaptive dwell max budget time [TU] for directed scan */
4894 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
4895 /* adaptive dwell default high band APs number */
4896 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
4897 /* adaptive dwell default low band APs number */
4898 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
4899 /* adaptive dwell default APs number in social channels (1, 6, 11) */
4900 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
4901 
4902 int
4903 iwx_umac_scan(struct iwx_softc *sc, int bgscan)
4904 {
4905 	struct ieee80211com *ic = &sc->sc_ic;
4906 	struct iwx_host_cmd hcmd = {
4907 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
4908 		.len = { 0, },
4909 		.data = { NULL, },
4910 		.flags = 0,
4911 	};
4912 	struct iwx_scan_req_umac *req;
4913 	void *cmd_data, *tail_data;
4914 	struct iwx_scan_req_umac_tail_v2 *tail;
4915 	struct iwx_scan_req_umac_tail_v1 *tailv1;
4916 	struct iwx_scan_umac_chan_param *chanparam;
4917 	size_t req_len;
4918 	int err, async = bgscan;
4919 
4920 	req_len = iwx_umac_scan_size(sc);
4921 	if ((req_len < IWX_SCAN_REQ_UMAC_SIZE_V1 +
4922 	    sizeof(struct iwx_scan_req_umac_tail_v1)) ||
4923 	    req_len > IWX_MAX_CMD_PAYLOAD_SIZE)
4924 		return ERANGE;
4925 	req = malloc(req_len, M_DEVBUF,
4926 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
4927 	if (req == NULL)
4928 		return ENOMEM;
4929 
4930 	hcmd.len[0] = (uint16_t)req_len;
4931 	hcmd.data[0] = (void *)req;
4932 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
4933 
4934 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4935 		req->v7.adwell_default_n_aps_social =
4936 			IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
4937 		req->v7.adwell_default_n_aps =
4938 			IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
4939 
4940 		if (ic->ic_des_esslen != 0)
4941 			req->v7.adwell_max_budget =
4942 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
4943 		else
4944 			req->v7.adwell_max_budget =
4945 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
4946 
4947 		req->v7.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4948 		req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = 0;
4949 		req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = 0;
4950 
4951 		if (isset(sc->sc_ucode_api,
4952 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4953 			req->v8.active_dwell[IWX_SCAN_LB_LMAC_IDX] = 10;
4954 			req->v8.passive_dwell[IWX_SCAN_LB_LMAC_IDX] = 110;
4955 		} else {
4956 			req->v7.active_dwell = 10;
4957 			req->v7.passive_dwell = 110;
4958 			req->v7.fragmented_dwell = 44;
4959 		}
4960 	} else {
4961 		/* These timings correspond to iwlwifi's UNASSOC scan. */
4962 		req->v1.active_dwell = 10;
4963 		req->v1.passive_dwell = 110;
4964 		req->v1.fragmented_dwell = 44;
4965 		req->v1.extended_dwell = 90;
4966 	}
4967 
4968 	if (bgscan) {
4969 		const uint32_t timeout = htole32(120);
4970 		if (isset(sc->sc_ucode_api,
4971 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4972 			req->v8.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4973 			req->v8.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4974 		} else if (isset(sc->sc_ucode_api,
4975 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4976 			req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4977 			req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4978 		} else {
4979 			req->v1.max_out_time = timeout;
4980 			req->v1.suspend_time = timeout;
4981 		}
4982 	}
4983 
4984 	req->v1.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4985 	req->ooc_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4986 
4987 	cmd_data = iwx_get_scan_req_umac_data(sc, req);
4988 	chanparam = iwx_get_scan_req_umac_chan_param(sc, req);
4989 	chanparam->count = iwx_umac_scan_fill_channels(sc,
4990 	    (struct iwx_scan_channel_cfg_umac *)cmd_data,
4991 	    ic->ic_des_esslen != 0, bgscan);
4992 	chanparam->flags = 0;
4993 
4994 	tail_data = cmd_data + sizeof(struct iwx_scan_channel_cfg_umac) *
4995 	    sc->sc_capa_n_scan_channels;
4996 	tail = tail_data;
4997 	/* tail v1 layout differs in preq and direct_scan member fields. */
4998 	tailv1 = tail_data;
4999 
5000 	req->general_flags = htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5001 	    IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
5002 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
5003 		req->v8.general_flags2 =
5004 			IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
5005 	}
5006 
5007 #if 0 /* XXX Active scan causes firmware errors after association. */
5008 	/* Check if we're doing an active directed scan. */
5009 	if (ic->ic_des_esslen != 0) {
5010 		if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
5011 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5012 			tail->direct_scan[0].len = ic->ic_des_esslen;
5013 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5014 			    ic->ic_des_esslen);
5015 		} else {
5016 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5017 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
5018 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
5019 			    ic->ic_des_esslen);
5020 		}
5021 		req->general_flags |=
5022 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5023 	} else
5024 #endif
5025 		req->general_flags |= htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5026 
5027 	if (isset(sc->sc_enabled_capa,
5028 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5029 		req->general_flags |=
5030 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5031 
5032 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5033 		req->general_flags |=
5034 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
5035 	} else {
5036 		req->general_flags |=
5037 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5038 	}
5039 
5040 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5041 		err = iwx_fill_probe_req(sc, &tail->preq);
5042 	else
5043 		err = iwx_fill_probe_req_v1(sc, &tailv1->preq);
5044 	if (err) {
5045 		free(req, M_DEVBUF, req_len);
5046 		return err;
5047 	}
5048 
5049 	/* Specify the scan plan: We'll do one iteration. */
5050 	tail->schedule[0].interval = 0;
5051 	tail->schedule[0].iter_count = 1;
5052 
5053 	err = iwx_send_cmd(sc, &hcmd);
5054 	free(req, M_DEVBUF, req_len);
5055 	return err;
5056 }
5057 
5058 uint8_t
5059 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5060 {
5061 	int i;
5062 	uint8_t rval;
5063 
5064 	for (i = 0; i < rs->rs_nrates; i++) {
5065 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5066 		if (rval == iwx_rates[ridx].rate)
5067 			return rs->rs_rates[i];
5068 	}
5069 
5070 	return 0;
5071 }
5072 
5073 int
5074 iwx_rval2ridx(int rval)
5075 {
5076 	int ridx;
5077 
5078 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
5079 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
5080 			continue;
5081 		if (rval == iwx_rates[ridx].rate)
5082 			break;
5083 	}
5084 
5085        return ridx;
5086 }
5087 
5088 void
5089 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
5090     int *ofdm_rates)
5091 {
5092 	struct ieee80211_node *ni = &in->in_ni;
5093 	struct ieee80211_rateset *rs = &ni->ni_rates;
5094 	int lowest_present_ofdm = -1;
5095 	int lowest_present_cck = -1;
5096 	uint8_t cck = 0;
5097 	uint8_t ofdm = 0;
5098 	int i;
5099 
5100 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5101 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5102 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
5103 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5104 				continue;
5105 			cck |= (1 << i);
5106 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5107 				lowest_present_cck = i;
5108 		}
5109 	}
5110 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
5111 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5112 			continue;
5113 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
5114 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5115 			lowest_present_ofdm = i;
5116 	}
5117 
5118 	/*
5119 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5120 	 * variables. This isn't sufficient though, as there might not
5121 	 * be all the right rates in the bitmap. E.g. if the only basic
5122 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5123 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5124 	 *
5125 	 *    [...] a STA responding to a received frame shall transmit
5126 	 *    its Control Response frame [...] at the highest rate in the
5127 	 *    BSSBasicRateSet parameter that is less than or equal to the
5128 	 *    rate of the immediately previous frame in the frame exchange
5129 	 *    sequence ([...]) and that is of the same modulation class
5130 	 *    ([...]) as the received frame. If no rate contained in the
5131 	 *    BSSBasicRateSet parameter meets these conditions, then the
5132 	 *    control frame sent in response to a received frame shall be
5133 	 *    transmitted at the highest mandatory rate of the PHY that is
5134 	 *    less than or equal to the rate of the received frame, and
5135 	 *    that is of the same modulation class as the received frame.
5136 	 *
5137 	 * As a consequence, we need to add all mandatory rates that are
5138 	 * lower than all of the basic rates to these bitmaps.
5139 	 */
5140 
5141 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
5142 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
5143 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
5144 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
5145 	/* 6M already there or needed so always add */
5146 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
5147 
5148 	/*
5149 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5150 	 * Note, however:
5151 	 *  - if no CCK rates are basic, it must be ERP since there must
5152 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5153 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5154 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5155 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5156 	 *  - if 2M is basic, 1M is mandatory
5157 	 *  - if 1M is basic, that's the only valid ACK rate.
5158 	 * As a consequence, it's not as complicated as it sounds, just add
5159 	 * any lower rates to the ACK rate bitmap.
5160 	 */
5161 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
5162 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
5163 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
5164 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
5165 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
5166 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
5167 	/* 1M already there or needed so always add */
5168 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
5169 
5170 	*cck_rates = cck;
5171 	*ofdm_rates = ofdm;
5172 }
5173 
5174 void
5175 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
5176     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
5177 {
5178 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5179 	struct ieee80211com *ic = &sc->sc_ic;
5180 	struct ieee80211_node *ni = ic->ic_bss;
5181 	int cck_ack_rates, ofdm_ack_rates;
5182 	int i;
5183 
5184 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5185 	    in->in_color));
5186 	cmd->action = htole32(action);
5187 
5188 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5189 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
5190 	else if (ic->ic_opmode == IEEE80211_M_STA)
5191 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
5192 	else
5193 		panic("unsupported operating mode %d\n", ic->ic_opmode);
5194 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
5195 
5196 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5197 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5198 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
5199 		return;
5200 	}
5201 
5202 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5203 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5204 	cmd->cck_rates = htole32(cck_ack_rates);
5205 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5206 
5207 	cmd->cck_short_preamble
5208 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5209 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
5210 	cmd->short_slot
5211 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5212 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
5213 
5214 	for (i = 0; i < EDCA_NUM_AC; i++) {
5215 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
5216 		int txf = iwx_ac_to_tx_fifo[i];
5217 
5218 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
5219 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
5220 		cmd->ac[txf].aifsn = ac->ac_aifsn;
5221 		cmd->ac[txf].fifos_mask = (1 << txf);
5222 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
5223 	}
5224 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5225 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
5226 
5227 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5228 		enum ieee80211_htprot htprot =
5229 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5230 		switch (htprot) {
5231 		case IEEE80211_HTPROT_NONE:
5232 			break;
5233 		case IEEE80211_HTPROT_NONMEMBER:
5234 		case IEEE80211_HTPROT_NONHT_MIXED:
5235 			cmd->protection_flags |=
5236 			    htole32(IWX_MAC_PROT_FLG_HT_PROT);
5237 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5238 				cmd->protection_flags |=
5239 				    htole32(IWX_MAC_PROT_FLG_SELF_CTS_EN);
5240 			break;
5241 		case IEEE80211_HTPROT_20MHZ:
5242 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
5243 				/* XXX ... and if our channel is 40 MHz ... */
5244 				cmd->protection_flags |=
5245 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
5246 				    IWX_MAC_PROT_FLG_FAT_PROT);
5247 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5248 					cmd->protection_flags |= htole32(
5249 					    IWX_MAC_PROT_FLG_SELF_CTS_EN);
5250 			}
5251 			break;
5252 		default:
5253 			break;
5254 		}
5255 
5256 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
5257 	}
5258 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5259 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
5260 
5261 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
5262 #undef IWX_EXP2
5263 }
5264 
5265 void
5266 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
5267     struct iwx_mac_data_sta *sta, int assoc)
5268 {
5269 	struct ieee80211_node *ni = &in->in_ni;
5270 	uint32_t dtim_off;
5271 	uint64_t tsf;
5272 
5273 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
5274 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
5275 	tsf = letoh64(tsf);
5276 
5277 	sta->is_assoc = htole32(assoc);
5278 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5279 	sta->dtim_tsf = htole64(tsf + dtim_off);
5280 	sta->bi = htole32(ni->ni_intval);
5281 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
5282 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
5283 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
5284 	sta->listen_interval = htole32(10);
5285 	sta->assoc_id = htole32(ni->ni_associd);
5286 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5287 }
5288 
5289 int
5290 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
5291     int assoc)
5292 {
5293 	struct ieee80211com *ic = &sc->sc_ic;
5294 	struct ieee80211_node *ni = &in->in_ni;
5295 	struct iwx_mac_ctx_cmd cmd;
5296 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
5297 
5298 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5299 		panic("MAC already added");
5300 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5301 		panic("MAC already removed");
5302 
5303 	memset(&cmd, 0, sizeof(cmd));
5304 
5305 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
5306 
5307 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5308 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
5309 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
5310 		    IWX_MAC_FILTER_ACCEPT_GRP |
5311 		    IWX_MAC_FILTER_IN_BEACON |
5312 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
5313 		    IWX_MAC_FILTER_IN_CRC32);
5314 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
5315 		/*
5316 		 * Allow beacons to pass through as long as we are not
5317 		 * associated or we do not have dtim period information.
5318 		 */
5319 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
5320 	else
5321 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5322 
5323 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5324 }
5325 
5326 int
5327 iwx_clear_statistics(struct iwx_softc *sc)
5328 {
5329 	struct iwx_statistics_cmd scmd = {
5330 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
5331 	};
5332 	struct iwx_host_cmd cmd = {
5333 		.id = IWX_STATISTICS_CMD,
5334 		.len[0] = sizeof(scmd),
5335 		.data[0] = &scmd,
5336 		.flags = IWX_CMD_WANT_RESP,
5337 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
5338 	};
5339 	int err;
5340 
5341 	err = iwx_send_cmd(sc, &cmd);
5342 	if (err)
5343 		return err;
5344 
5345 	iwx_free_resp(sc, &cmd);
5346 	return 0;
5347 }
5348 
5349 int
5350 iwx_update_quotas(struct iwx_softc *sc, struct iwx_node *in, int running)
5351 {
5352 	struct iwx_time_quota_cmd cmd;
5353 	int i, idx, num_active_macs, quota, quota_rem;
5354 	int colors[IWX_MAX_BINDINGS] = { -1, -1, -1, -1, };
5355 	int n_ifs[IWX_MAX_BINDINGS] = {0, };
5356 	uint16_t id;
5357 
5358 	memset(&cmd, 0, sizeof(cmd));
5359 
5360 	/* currently, PHY ID == binding ID */
5361 	if (in && in->in_phyctxt) {
5362 		id = in->in_phyctxt->id;
5363 		KASSERT(id < IWX_MAX_BINDINGS);
5364 		colors[id] = in->in_phyctxt->color;
5365 		if (running)
5366 			n_ifs[id] = 1;
5367 	}
5368 
5369 	/*
5370 	 * The FW's scheduling session consists of
5371 	 * IWX_MAX_QUOTA fragments. Divide these fragments
5372 	 * equally between all the bindings that require quota
5373 	 */
5374 	num_active_macs = 0;
5375 	for (i = 0; i < IWX_MAX_BINDINGS; i++) {
5376 		cmd.quotas[i].id_and_color = htole32(IWX_FW_CTXT_INVALID);
5377 		num_active_macs += n_ifs[i];
5378 	}
5379 
5380 	quota = 0;
5381 	quota_rem = 0;
5382 	if (num_active_macs) {
5383 		quota = IWX_MAX_QUOTA / num_active_macs;
5384 		quota_rem = IWX_MAX_QUOTA % num_active_macs;
5385 	}
5386 
5387 	for (idx = 0, i = 0; i < IWX_MAX_BINDINGS; i++) {
5388 		if (colors[i] < 0)
5389 			continue;
5390 
5391 		cmd.quotas[idx].id_and_color =
5392 			htole32(IWX_FW_CMD_ID_AND_COLOR(i, colors[i]));
5393 
5394 		if (n_ifs[i] <= 0) {
5395 			cmd.quotas[idx].quota = htole32(0);
5396 			cmd.quotas[idx].max_duration = htole32(0);
5397 		} else {
5398 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5399 			cmd.quotas[idx].max_duration = htole32(0);
5400 		}
5401 		idx++;
5402 	}
5403 
5404 	/* Give the remainder of the session to the first binding */
5405 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5406 
5407 	return iwx_send_cmd_pdu(sc, IWX_TIME_QUOTA_CMD, 0,
5408 	    sizeof(cmd), &cmd);
5409 }
5410 
5411 void
5412 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
5413 {
5414 	int s = splnet();
5415 
5416 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
5417 		splx(s);
5418 		return;
5419 	}
5420 
5421 	refcnt_take(&sc->task_refs);
5422 	if (!task_add(taskq, task))
5423 		refcnt_rele_wake(&sc->task_refs);
5424 	splx(s);
5425 }
5426 
5427 void
5428 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
5429 {
5430 	if (task_del(taskq, task))
5431 		refcnt_rele(&sc->task_refs);
5432 }
5433 
5434 int
5435 iwx_scan(struct iwx_softc *sc)
5436 {
5437 	struct ieee80211com *ic = &sc->sc_ic;
5438 	struct ifnet *ifp = IC2IFP(ic);
5439 	int err;
5440 
5441 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
5442 		err = iwx_scan_abort(sc);
5443 		if (err) {
5444 			printf("%s: could not abort background scan\n",
5445 			    DEVNAME(sc));
5446 			return err;
5447 		}
5448 	}
5449 
5450 	err = iwx_umac_scan(sc, 0);
5451 	if (err) {
5452 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5453 		return err;
5454 	}
5455 
5456 	/*
5457 	 * The current mode might have been fixed during association.
5458 	 * Ensure all channels get scanned.
5459 	 */
5460 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
5461 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
5462 
5463 	sc->sc_flags |= IWX_FLAG_SCANNING;
5464 	if (ifp->if_flags & IFF_DEBUG)
5465 		printf("%s: %s -> %s\n", ifp->if_xname,
5466 		    ieee80211_state_name[ic->ic_state],
5467 		    ieee80211_state_name[IEEE80211_S_SCAN]);
5468 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
5469 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
5470 		ieee80211_node_cleanup(ic, ic->ic_bss);
5471 	}
5472 	ic->ic_state = IEEE80211_S_SCAN;
5473 	wakeup(&ic->ic_state); /* wake iwx_init() */
5474 
5475 	return 0;
5476 }
5477 
5478 int
5479 iwx_bgscan(struct ieee80211com *ic)
5480 {
5481 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
5482 	int err;
5483 
5484 	if (sc->sc_flags & IWX_FLAG_SCANNING)
5485 		return 0;
5486 
5487 	err = iwx_umac_scan(sc, 1);
5488 	if (err) {
5489 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5490 		return err;
5491 	}
5492 
5493 	sc->sc_flags |= IWX_FLAG_BGSCAN;
5494 	return 0;
5495 }
5496 
5497 int
5498 iwx_umac_scan_abort(struct iwx_softc *sc)
5499 {
5500 	struct iwx_umac_scan_abort cmd = { 0 };
5501 
5502 	return iwx_send_cmd_pdu(sc,
5503 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
5504 	    0, sizeof(cmd), &cmd);
5505 }
5506 
5507 int
5508 iwx_scan_abort(struct iwx_softc *sc)
5509 {
5510 	int err;
5511 
5512 	err = iwx_umac_scan_abort(sc);
5513 	if (err == 0)
5514 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
5515 	return err;
5516 }
5517 
5518 int
5519 iwx_enable_data_tx_queues(struct iwx_softc *sc)
5520 {
5521 	int err, ac;
5522 
5523 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5524 		int qid = ac + IWX_DQA_AUX_QUEUE + 1;
5525 		/*
5526 		 * Regular data frames use the "MGMT" TID and queue.
5527 		 * Other TIDs and queues are reserved for frame aggregation.
5528 		 */
5529 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, IWX_MGMT_TID,
5530 		    IWX_TX_RING_COUNT);
5531 		if (err) {
5532 			printf("%s: could not enable Tx queue %d (error %d)\n",
5533 			    DEVNAME(sc), ac, err);
5534 			return err;
5535 		}
5536 	}
5537 
5538 	return 0;
5539 }
5540 
5541 int
5542 iwx_auth(struct iwx_softc *sc)
5543 {
5544 	struct ieee80211com *ic = &sc->sc_ic;
5545 	struct iwx_node *in = (void *)ic->ic_bss;
5546 	uint32_t duration;
5547 	int generation = sc->sc_generation, err;
5548 
5549 	splassert(IPL_NET);
5550 
5551 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5552 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
5553 	else
5554 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5555 	err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5556 	    IWX_FW_CTXT_ACTION_MODIFY, 0);
5557 	if (err) {
5558 		printf("%s: could not update PHY context (error %d)\n",
5559 		    DEVNAME(sc), err);
5560 		return err;
5561 	}
5562 	in->in_phyctxt = &sc->sc_phyctxt[0];
5563 
5564 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
5565 	if (err) {
5566 		printf("%s: could not add MAC context (error %d)\n",
5567 		    DEVNAME(sc), err);
5568 		return err;
5569  	}
5570 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
5571 
5572 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
5573 	if (err) {
5574 		printf("%s: could not add binding (error %d)\n",
5575 		    DEVNAME(sc), err);
5576 		goto rm_mac_ctxt;
5577 	}
5578 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
5579 
5580 	err = iwx_add_sta_cmd(sc, in, 0);
5581 	if (err) {
5582 		printf("%s: could not add sta (error %d)\n",
5583 		    DEVNAME(sc), err);
5584 		goto rm_binding;
5585 	}
5586 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
5587 
5588 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5589 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
5590 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
5591 		    IWX_TX_RING_COUNT);
5592 		if (err)
5593 			goto rm_sta;
5594 		return 0;
5595 	}
5596 
5597 	err = iwx_enable_data_tx_queues(sc);
5598 	if (err)
5599 		goto rm_sta;
5600 
5601 	err = iwx_clear_statistics(sc);
5602 	if (err)
5603 		goto rm_sta;
5604 
5605 	/*
5606 	 * Prevent the FW from wandering off channel during association
5607 	 * by "protecting" the session with a time event.
5608 	 */
5609 	if (in->in_ni.ni_intval)
5610 		duration = in->in_ni.ni_intval * 2;
5611 	else
5612 		duration = IEEE80211_DUR_TU;
5613 	iwx_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5614 
5615 	return 0;
5616 
5617 rm_sta:
5618 	if (generation == sc->sc_generation) {
5619 		iwx_rm_sta_cmd(sc, in);
5620 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5621 	}
5622 rm_binding:
5623 	if (generation == sc->sc_generation) {
5624 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
5625 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
5626 	}
5627 rm_mac_ctxt:
5628 	if (generation == sc->sc_generation) {
5629 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
5630 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
5631 	}
5632 	return err;
5633 }
5634 
5635 int
5636 iwx_deauth(struct iwx_softc *sc)
5637 {
5638 	struct ieee80211com *ic = &sc->sc_ic;
5639 	struct iwx_node *in = (void *)ic->ic_bss;
5640 	int err;
5641 
5642 	splassert(IPL_NET);
5643 
5644 	iwx_unprotect_session(sc, in);
5645 
5646 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
5647 		err = iwx_rm_sta_cmd(sc, in);
5648 		if (err) {
5649 			printf("%s: could not remove STA (error %d)\n",
5650 			    DEVNAME(sc), err);
5651 			return err;
5652 		}
5653 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5654 	}
5655 
5656 	err = iwx_flush_tx_path(sc);
5657 	if (err) {
5658 		printf("%s: could not flush Tx path (error %d)\n",
5659 		    DEVNAME(sc), err);
5660 		return err;
5661 	}
5662 
5663 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
5664 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
5665 		if (err) {
5666 			printf("%s: could not remove binding (error %d)\n",
5667 			    DEVNAME(sc), err);
5668 			return err;
5669 		}
5670 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
5671 	}
5672 
5673 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
5674 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
5675 		if (err) {
5676 			printf("%s: could not remove MAC context (error %d)\n",
5677 			    DEVNAME(sc), err);
5678 			return err;
5679 		}
5680 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
5681 	}
5682 
5683 	return 0;
5684 }
5685 
5686 int
5687 iwx_assoc(struct iwx_softc *sc)
5688 {
5689 	struct ieee80211com *ic = &sc->sc_ic;
5690 	struct iwx_node *in = (void *)ic->ic_bss;
5691 	int update_sta = (sc->sc_flags & IWX_FLAG_STA_ACTIVE);
5692 	int err;
5693 
5694 	splassert(IPL_NET);
5695 
5696 	err = iwx_add_sta_cmd(sc, in, update_sta);
5697 	if (err) {
5698 		printf("%s: could not %s STA (error %d)\n",
5699 		    DEVNAME(sc), update_sta ? "update" : "add", err);
5700 		return err;
5701 	}
5702 
5703 	if (!update_sta)
5704 		err = iwx_enable_data_tx_queues(sc);
5705 
5706 	return err;
5707 }
5708 
5709 int
5710 iwx_disassoc(struct iwx_softc *sc)
5711 {
5712 	struct ieee80211com *ic = &sc->sc_ic;
5713 	struct iwx_node *in = (void *)ic->ic_bss;
5714 	int err;
5715 
5716 	splassert(IPL_NET);
5717 
5718 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
5719 		err = iwx_rm_sta_cmd(sc, in);
5720 		if (err) {
5721 			printf("%s: could not remove STA (error %d)\n",
5722 			    DEVNAME(sc), err);
5723 			return err;
5724 		}
5725 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5726 	}
5727 
5728 	return 0;
5729 }
5730 
5731 int
5732 iwx_run(struct iwx_softc *sc)
5733 {
5734 	struct ieee80211com *ic = &sc->sc_ic;
5735 	struct iwx_node *in = (void *)ic->ic_bss;
5736 	int err;
5737 
5738 	splassert(IPL_NET);
5739 
5740 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5741 		/* Add a MAC context and a sniffing STA. */
5742 		err = iwx_auth(sc);
5743 		if (err)
5744 			return err;
5745 	}
5746 
5747 	/* Configure Rx chains for MIMO. */
5748 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
5749 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
5750 	    !sc->sc_nvm.sku_cap_mimo_disable) {
5751 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
5752 		    2, 2, IWX_FW_CTXT_ACTION_MODIFY, 0);
5753 		if (err) {
5754 			printf("%s: failed to update PHY\n",
5755 			    DEVNAME(sc));
5756 			return err;
5757 		}
5758 	}
5759 
5760 	/* We have now been assigned an associd by the AP. */
5761 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
5762 	if (err) {
5763 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5764 		return err;
5765 	}
5766 
5767 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
5768 	if (err) {
5769 		printf("%s: could not set sf full on (error %d)\n",
5770 		    DEVNAME(sc), err);
5771 		return err;
5772 	}
5773 
5774 	err = iwx_allow_mcast(sc);
5775 	if (err) {
5776 		printf("%s: could not allow mcast (error %d)\n",
5777 		    DEVNAME(sc), err);
5778 		return err;
5779 	}
5780 
5781 	err = iwx_power_update_device(sc);
5782 	if (err) {
5783 		printf("%s: could not send power command (error %d)\n",
5784 		    DEVNAME(sc), err);
5785 		return err;
5786 	}
5787 #ifdef notyet
5788 	/*
5789 	 * Disabled for now. Default beacon filter settings
5790 	 * prevent net80211 from getting ERP and HT protection
5791 	 * updates from beacons.
5792 	 */
5793 	err = iwx_enable_beacon_filter(sc, in);
5794 	if (err) {
5795 		printf("%s: could not enable beacon filter\n",
5796 		    DEVNAME(sc));
5797 		return err;
5798 	}
5799 #endif
5800 	err = iwx_power_mac_update_mode(sc, in);
5801 	if (err) {
5802 		printf("%s: could not update MAC power (error %d)\n",
5803 		    DEVNAME(sc), err);
5804 		return err;
5805 	}
5806 
5807 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
5808 		err = iwx_update_quotas(sc, in, 1);
5809 		if (err) {
5810 			printf("%s: could not update quotas (error %d)\n",
5811 			    DEVNAME(sc), err);
5812 			return err;
5813 		}
5814 	}
5815 
5816 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5817 	ieee80211_mira_node_init(&in->in_mn);
5818 
5819 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5820 		return 0;
5821 
5822 	/* Start at lowest available bit-rate, AMRR will raise. */
5823 	in->in_ni.ni_txrate = 0;
5824 	in->in_ni.ni_txmcs = 0;
5825 
5826 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_TLC_OFFLOAD))
5827 		DPRINTF(("%s: TODO: Enable firmware rate scaling?\n",
5828 		    DEVNAME(sc)));
5829 
5830 	timeout_add_msec(&sc->sc_calib_to, 500);
5831 	return 0;
5832 }
5833 
5834 int
5835 iwx_run_stop(struct iwx_softc *sc)
5836 {
5837 	struct ieee80211com *ic = &sc->sc_ic;
5838 	struct iwx_node *in = (void *)ic->ic_bss;
5839 	int err;
5840 
5841 	splassert(IPL_NET);
5842 
5843 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
5844 	if (err)
5845 		return err;
5846 
5847 	err = iwx_disable_beacon_filter(sc);
5848 	if (err) {
5849 		printf("%s: could not disable beacon filter (error %d)\n",
5850 		    DEVNAME(sc), err);
5851 		return err;
5852 	}
5853 
5854 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
5855 		err = iwx_update_quotas(sc, in, 0);
5856 		if (err) {
5857 			printf("%s: could not update quotas (error %d)\n",
5858 			    DEVNAME(sc), err);
5859 			return err;
5860 		}
5861 	}
5862 
5863 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
5864 	if (err) {
5865 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5866 		return err;
5867 	}
5868 
5869 	/* Reset Tx chains in case MIMO was enabled. */
5870 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
5871 	    !sc->sc_nvm.sku_cap_mimo_disable) {
5872 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5873 		    IWX_FW_CTXT_ACTION_MODIFY, 0);
5874 		if (err) {
5875 			printf("%s: failed to update PHY\n", DEVNAME(sc));
5876 			return err;
5877 		}
5878 	}
5879 
5880 	return 0;
5881 }
5882 
5883 struct ieee80211_node *
5884 iwx_node_alloc(struct ieee80211com *ic)
5885 {
5886 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5887 }
5888 
5889 void
5890 iwx_calib_timeout(void *arg)
5891 {
5892 	struct iwx_softc *sc = arg;
5893 	struct ieee80211com *ic = &sc->sc_ic;
5894 	struct iwx_node *in = (void *)ic->ic_bss;
5895 	struct ieee80211_node *ni = &in->in_ni;
5896 	int s;
5897 
5898 	s = splnet();
5899 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
5900 	    ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) &&
5901 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5902 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5903 		if (in->ht_force_cck) {
5904 			struct ieee80211_rateset *rs = &ni->ni_rates;
5905 			uint8_t rv;
5906 			rv = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
5907 			if (IWX_RVAL_IS_OFDM(rv))
5908 				in->ht_force_cck = 0;
5909 		}
5910 	}
5911 
5912 	splx(s);
5913 
5914 	timeout_add_msec(&sc->sc_calib_to, 500);
5915 }
5916 
5917 int
5918 iwx_media_change(struct ifnet *ifp)
5919 {
5920 	struct iwx_softc *sc = ifp->if_softc;
5921 	struct ieee80211com *ic = &sc->sc_ic;
5922 	uint8_t rate, ridx;
5923 	int err;
5924 
5925 	err = ieee80211_media_change(ifp);
5926 	if (err != ENETRESET)
5927 		return err;
5928 
5929 	if (ic->ic_fixed_mcs != -1)
5930 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
5931 	else if (ic->ic_fixed_rate != -1) {
5932 		rate = ic->ic_sup_rates[ic->ic_curmode].
5933 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5934 		/* Map 802.11 rate to HW rate index. */
5935 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
5936 			if (iwx_rates[ridx].rate == rate)
5937 				break;
5938 		sc->sc_fixed_ridx = ridx;
5939 	}
5940 
5941 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5942 	    (IFF_UP | IFF_RUNNING)) {
5943 		iwx_stop(ifp);
5944 		err = iwx_init(ifp);
5945 	}
5946 	return err;
5947 }
5948 
5949 void
5950 iwx_newstate_task(void *psc)
5951 {
5952 	struct iwx_softc *sc = (struct iwx_softc *)psc;
5953 	struct ieee80211com *ic = &sc->sc_ic;
5954 	enum ieee80211_state nstate = sc->ns_nstate;
5955 	enum ieee80211_state ostate = ic->ic_state;
5956 	int arg = sc->ns_arg;
5957 	int err = 0, s = splnet();
5958 
5959 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
5960 		/* iwx_stop() is waiting for us. */
5961 		refcnt_rele_wake(&sc->task_refs);
5962 		splx(s);
5963 		return;
5964 	}
5965 
5966 	if (ostate == IEEE80211_S_SCAN) {
5967 		if (nstate == ostate) {
5968 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
5969 				refcnt_rele_wake(&sc->task_refs);
5970 				splx(s);
5971 				return;
5972 			}
5973 			/* Firmware is no longer scanning. Do another scan. */
5974 			goto next_scan;
5975 		}
5976 	}
5977 
5978 	if (nstate <= ostate) {
5979 		switch (ostate) {
5980 		case IEEE80211_S_RUN:
5981 			err = iwx_run_stop(sc);
5982 			if (err)
5983 				goto out;
5984 			/* FALLTHROUGH */
5985 		case IEEE80211_S_ASSOC:
5986 			if (nstate <= IEEE80211_S_ASSOC) {
5987 				err = iwx_disassoc(sc);
5988 				if (err)
5989 					goto out;
5990 			}
5991 			/* FALLTHROUGH */
5992 		case IEEE80211_S_AUTH:
5993 			if (nstate <= IEEE80211_S_AUTH) {
5994 				err = iwx_deauth(sc);
5995 				if (err)
5996 					goto out;
5997 			}
5998 			/* FALLTHROUGH */
5999 		case IEEE80211_S_SCAN:
6000 		case IEEE80211_S_INIT:
6001 			break;
6002 		}
6003 
6004 		/* Die now if iwx_stop() was called while we were sleeping. */
6005 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6006 			refcnt_rele_wake(&sc->task_refs);
6007 			splx(s);
6008 			return;
6009 		}
6010 	}
6011 
6012 	switch (nstate) {
6013 	case IEEE80211_S_INIT:
6014 		break;
6015 
6016 	case IEEE80211_S_SCAN:
6017 next_scan:
6018 		err = iwx_scan(sc);
6019 		if (err)
6020 			break;
6021 		refcnt_rele_wake(&sc->task_refs);
6022 		splx(s);
6023 		return;
6024 
6025 	case IEEE80211_S_AUTH:
6026 		err = iwx_auth(sc);
6027 		break;
6028 
6029 	case IEEE80211_S_ASSOC:
6030 		err = iwx_assoc(sc);
6031 		break;
6032 
6033 	case IEEE80211_S_RUN:
6034 		err = iwx_run(sc);
6035 		break;
6036 	}
6037 
6038 out:
6039 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
6040 		if (err)
6041 			task_add(systq, &sc->init_task);
6042 		else
6043 			sc->sc_newstate(ic, nstate, arg);
6044 	}
6045 	refcnt_rele_wake(&sc->task_refs);
6046 	splx(s);
6047 }
6048 
6049 int
6050 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6051 {
6052 	struct ifnet *ifp = IC2IFP(ic);
6053 	struct iwx_softc *sc = ifp->if_softc;
6054 	struct iwx_node *in = (void *)ic->ic_bss;
6055 
6056 	if (ic->ic_state == IEEE80211_S_RUN) {
6057 		timeout_del(&sc->sc_calib_to);
6058 		ieee80211_mira_cancel_timeouts(&in->in_mn);
6059 		iwx_del_task(sc, systq, &sc->ba_task);
6060 		iwx_del_task(sc, systq, &sc->htprot_task);
6061 	}
6062 
6063 	sc->ns_nstate = nstate;
6064 	sc->ns_arg = arg;
6065 
6066 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
6067 
6068 	return 0;
6069 }
6070 
6071 void
6072 iwx_endscan(struct iwx_softc *sc)
6073 {
6074 	struct ieee80211com *ic = &sc->sc_ic;
6075 
6076 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
6077 		return;
6078 
6079 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6080 	ieee80211_end_scan(&ic->ic_if);
6081 }
6082 
6083 /*
6084  * Aging and idle timeouts for the different possible scenarios
6085  * in default configuration
6086  */
6087 static const uint32_t
6088 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6089 	{
6090 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6091 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6092 	},
6093 	{
6094 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
6095 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6096 	},
6097 	{
6098 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
6099 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
6100 	},
6101 	{
6102 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
6103 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
6104 	},
6105 	{
6106 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
6107 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
6108 	},
6109 };
6110 
6111 /*
6112  * Aging and idle timeouts for the different possible scenarios
6113  * in single BSS MAC configuration.
6114  */
6115 static const uint32_t
6116 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6117 	{
6118 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
6119 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
6120 	},
6121 	{
6122 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
6123 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
6124 	},
6125 	{
6126 		htole32(IWX_SF_MCAST_AGING_TIMER),
6127 		htole32(IWX_SF_MCAST_IDLE_TIMER)
6128 	},
6129 	{
6130 		htole32(IWX_SF_BA_AGING_TIMER),
6131 		htole32(IWX_SF_BA_IDLE_TIMER)
6132 	},
6133 	{
6134 		htole32(IWX_SF_TX_RE_AGING_TIMER),
6135 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
6136 	},
6137 };
6138 
6139 void
6140 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
6141     struct ieee80211_node *ni)
6142 {
6143 	int i, j, watermark;
6144 
6145 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
6146 
6147 	/*
6148 	 * If we are in association flow - check antenna configuration
6149 	 * capabilities of the AP station, and choose the watermark accordingly.
6150 	 */
6151 	if (ni) {
6152 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6153 			if (ni->ni_rxmcs[1] != 0)
6154 				watermark = IWX_SF_W_MARK_MIMO2;
6155 			else
6156 				watermark = IWX_SF_W_MARK_SISO;
6157 		} else {
6158 			watermark = IWX_SF_W_MARK_LEGACY;
6159 		}
6160 	/* default watermark value for unassociated mode. */
6161 	} else {
6162 		watermark = IWX_SF_W_MARK_MIMO2;
6163 	}
6164 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
6165 
6166 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
6167 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
6168 			sf_cmd->long_delay_timeouts[i][j] =
6169 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
6170 		}
6171 	}
6172 
6173 	if (ni) {
6174 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
6175 		       sizeof(iwx_sf_full_timeout));
6176 	} else {
6177 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
6178 		       sizeof(iwx_sf_full_timeout_def));
6179 	}
6180 
6181 }
6182 
6183 int
6184 iwx_sf_config(struct iwx_softc *sc, int new_state)
6185 {
6186 	struct ieee80211com *ic = &sc->sc_ic;
6187 	struct iwx_sf_cfg_cmd sf_cmd = {
6188 		.state = htole32(new_state),
6189 	};
6190 	int err = 0;
6191 
6192 	switch (new_state) {
6193 	case IWX_SF_UNINIT:
6194 	case IWX_SF_INIT_OFF:
6195 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
6196 		break;
6197 	case IWX_SF_FULL_ON:
6198 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6199 		break;
6200 	default:
6201 		return EINVAL;
6202 	}
6203 
6204 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
6205 				   sizeof(sf_cmd), &sf_cmd);
6206 	return err;
6207 }
6208 
6209 int
6210 iwx_send_bt_init_conf(struct iwx_softc *sc)
6211 {
6212 	struct iwx_bt_coex_cmd bt_cmd;
6213 
6214 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
6215 	bt_cmd.enabled_modules = htole32(IWX_BT_COEX_HIGH_BAND_RET);
6216 
6217 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
6218 	    &bt_cmd);
6219 }
6220 
6221 int
6222 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
6223 {
6224 	struct iwx_mcc_update_cmd mcc_cmd;
6225 	struct iwx_host_cmd hcmd = {
6226 		.id = IWX_MCC_UPDATE_CMD,
6227 		.flags = IWX_CMD_WANT_RESP,
6228 		.data = { &mcc_cmd },
6229 	};
6230 	int err;
6231 
6232 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6233 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6234 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6235 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6236 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
6237 	else
6238 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
6239 
6240 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
6241 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) +
6242 	    sizeof(struct iwx_mcc_update_resp);
6243 
6244 	err = iwx_send_cmd(sc, &hcmd);
6245 	if (err)
6246 		return err;
6247 
6248 	iwx_free_resp(sc, &hcmd);
6249 
6250 	return 0;
6251 }
6252 
6253 int
6254 iwx_init_hw(struct iwx_softc *sc)
6255 {
6256 	struct ieee80211com *ic = &sc->sc_ic;
6257 	int err, i;
6258 
6259 	err = iwx_preinit(sc);
6260 	if (err)
6261 		return err;
6262 
6263 	err = iwx_start_hw(sc);
6264 	if (err) {
6265 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6266 		return err;
6267 	}
6268 
6269 	err = iwx_run_init_mvm_ucode(sc, 0);
6270 	if (err)
6271 		return err;
6272 
6273 	/* Should stop and start HW since INIT image just loaded. */
6274 	iwx_stop_device(sc);
6275 	err = iwx_start_hw(sc);
6276 	if (err) {
6277 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6278 		return err;
6279 	}
6280 
6281 	err = iwx_load_ucode_wait_alive(sc);
6282 	if (err) {
6283 		printf("%s: could not load firmware\n", DEVNAME(sc));
6284 		goto err;
6285 	}
6286 
6287 	if (!iwx_nic_lock(sc))
6288 		return EBUSY;
6289 
6290 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
6291 	if (err) {
6292 		printf("%s: could not init tx ant config (error %d)\n",
6293 		    DEVNAME(sc), err);
6294 		goto err;
6295 	}
6296 
6297 	if (sc->sc_tx_with_siso_diversity) {
6298 		err = iwx_send_phy_cfg_cmd(sc);
6299 		if (err) {
6300 			printf("%s: could not send phy config (error %d)\n",
6301 			    DEVNAME(sc), err);
6302 			goto err;
6303 		}
6304 	}
6305 
6306 	err = iwx_send_bt_init_conf(sc);
6307 	if (err) {
6308 		printf("%s: could not init bt coex (error %d)\n",
6309 		    DEVNAME(sc), err);
6310 		return err;
6311 	}
6312 
6313 	err = iwx_send_dqa_cmd(sc);
6314 	if (err)
6315 		return err;
6316 
6317 	/* Add auxiliary station for scanning */
6318 	err = iwx_add_aux_sta(sc);
6319 	if (err) {
6320 		printf("%s: could not add aux station (error %d)\n",
6321 		    DEVNAME(sc), err);
6322 		goto err;
6323 	}
6324 
6325 	for (i = 0; i < 1; i++) {
6326 		/*
6327 		 * The channel used here isn't relevant as it's
6328 		 * going to be overwritten in the other flows.
6329 		 * For now use the first channel we have.
6330 		 */
6331 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6332 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6333 		    IWX_FW_CTXT_ACTION_ADD, 0);
6334 		if (err) {
6335 			printf("%s: could not add phy context %d (error %d)\n",
6336 			    DEVNAME(sc), i, err);
6337 			goto err;
6338 		}
6339 	}
6340 
6341 	err = iwx_config_ltr(sc);
6342 	if (err) {
6343 		printf("%s: PCIe LTR configuration failed (error %d)\n",
6344 		    DEVNAME(sc), err);
6345 	}
6346 
6347 	err = iwx_power_update_device(sc);
6348 	if (err) {
6349 		printf("%s: could not send power command (error %d)\n",
6350 		    DEVNAME(sc), err);
6351 		goto err;
6352 	}
6353 
6354 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
6355 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
6356 		if (err) {
6357 			printf("%s: could not init LAR (error %d)\n",
6358 			    DEVNAME(sc), err);
6359 			goto err;
6360 		}
6361 	}
6362 
6363 	err = iwx_config_umac_scan(sc);
6364 	if (err) {
6365 		printf("%s: could not configure scan (error %d)\n",
6366 		    DEVNAME(sc), err);
6367 		goto err;
6368 	}
6369 
6370 	err = iwx_disable_beacon_filter(sc);
6371 	if (err) {
6372 		printf("%s: could not disable beacon filter (error %d)\n",
6373 		    DEVNAME(sc), err);
6374 		goto err;
6375 	}
6376 
6377 err:
6378 	iwx_nic_unlock(sc);
6379 	return err;
6380 }
6381 
6382 /* Allow multicast from our BSSID. */
6383 int
6384 iwx_allow_mcast(struct iwx_softc *sc)
6385 {
6386 	struct ieee80211com *ic = &sc->sc_ic;
6387 	struct ieee80211_node *ni = ic->ic_bss;
6388 	struct iwx_mcast_filter_cmd *cmd;
6389 	size_t size;
6390 	int err;
6391 
6392 	size = roundup(sizeof(*cmd), 4);
6393 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6394 	if (cmd == NULL)
6395 		return ENOMEM;
6396 	cmd->filter_own = 1;
6397 	cmd->port_id = 0;
6398 	cmd->count = 0;
6399 	cmd->pass_all = 1;
6400 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6401 
6402 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
6403 	    0, size, cmd);
6404 	free(cmd, M_DEVBUF, size);
6405 	return err;
6406 }
6407 
6408 int
6409 iwx_init(struct ifnet *ifp)
6410 {
6411 	struct iwx_softc *sc = ifp->if_softc;
6412 	struct ieee80211com *ic = &sc->sc_ic;
6413 	int err, generation;
6414 
6415 	rw_assert_wrlock(&sc->ioctl_rwl);
6416 
6417 	generation = ++sc->sc_generation;
6418 
6419 	KASSERT(sc->task_refs.refs == 0);
6420 	refcnt_init(&sc->task_refs);
6421 
6422 	err = iwx_init_hw(sc);
6423 	if (err) {
6424 		if (generation == sc->sc_generation)
6425 			iwx_stop(ifp);
6426 		return err;
6427 	}
6428 
6429 	ifq_clr_oactive(&ifp->if_snd);
6430 	ifp->if_flags |= IFF_RUNNING;
6431 
6432 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6433 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
6434 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
6435 		return 0;
6436 	}
6437 
6438 	ieee80211_begin_scan(ifp);
6439 
6440 	/*
6441 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
6442 	 * Wait until the transition to SCAN state has completed.
6443 	 */
6444 	do {
6445 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
6446 		    SEC_TO_NSEC(1));
6447 		if (generation != sc->sc_generation)
6448 			return ENXIO;
6449 		if (err)
6450 			return err;
6451 	} while (ic->ic_state != IEEE80211_S_SCAN);
6452 
6453 	return 0;
6454 }
6455 
6456 void
6457 iwx_start(struct ifnet *ifp)
6458 {
6459 	struct iwx_softc *sc = ifp->if_softc;
6460 	struct ieee80211com *ic = &sc->sc_ic;
6461 	struct ieee80211_node *ni;
6462 	struct ether_header *eh;
6463 	struct mbuf *m;
6464 	int ac = EDCA_AC_BE; /* XXX */
6465 
6466 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
6467 		return;
6468 
6469 	for (;;) {
6470 		/* why isn't this done per-queue? */
6471 		if (sc->qfullmsk != 0) {
6472 			ifq_set_oactive(&ifp->if_snd);
6473 			break;
6474 		}
6475 
6476 		/* need to send management frames even if we're not RUNning */
6477 		m = mq_dequeue(&ic->ic_mgtq);
6478 		if (m) {
6479 			ni = m->m_pkthdr.ph_cookie;
6480 			goto sendit;
6481 		}
6482 
6483 		if (ic->ic_state != IEEE80211_S_RUN ||
6484 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
6485 			break;
6486 
6487 		IFQ_DEQUEUE(&ifp->if_snd, m);
6488 		if (!m)
6489 			break;
6490 		if (m->m_len < sizeof (*eh) &&
6491 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
6492 			ifp->if_oerrors++;
6493 			continue;
6494 		}
6495 #if NBPFILTER > 0
6496 		if (ifp->if_bpf != NULL)
6497 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
6498 #endif
6499 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
6500 			ifp->if_oerrors++;
6501 			continue;
6502 		}
6503 
6504  sendit:
6505 #if NBPFILTER > 0
6506 		if (ic->ic_rawbpf != NULL)
6507 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
6508 #endif
6509 		if (iwx_tx(sc, m, ni, ac) != 0) {
6510 			ieee80211_release_node(ic, ni);
6511 			ifp->if_oerrors++;
6512 			continue;
6513 		}
6514 
6515 		if (ifp->if_flags & IFF_UP) {
6516 			sc->sc_tx_timer = 15;
6517 			ifp->if_timer = 1;
6518 		}
6519 	}
6520 
6521 	return;
6522 }
6523 
6524 void
6525 iwx_stop(struct ifnet *ifp)
6526 {
6527 	struct iwx_softc *sc = ifp->if_softc;
6528 	struct ieee80211com *ic = &sc->sc_ic;
6529 	struct iwx_node *in = (void *)ic->ic_bss;
6530 	int i, s = splnet();
6531 
6532 	rw_assert_wrlock(&sc->ioctl_rwl);
6533 
6534 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
6535 
6536 	/* Cancel scheduled tasks and let any stale tasks finish up. */
6537 	task_del(systq, &sc->init_task);
6538 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
6539 	iwx_del_task(sc, systq, &sc->ba_task);
6540 	iwx_del_task(sc, systq, &sc->htprot_task);
6541 	KASSERT(sc->task_refs.refs >= 1);
6542 	refcnt_finalize(&sc->task_refs, "iwxstop");
6543 
6544 	iwx_stop_device(sc);
6545 
6546 	/* Reset soft state. */
6547 
6548 	sc->sc_generation++;
6549 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
6550 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
6551 		sc->sc_cmd_resp_pkt[i] = NULL;
6552 		sc->sc_cmd_resp_len[i] = 0;
6553 	}
6554 	ifp->if_flags &= ~IFF_RUNNING;
6555 	ifq_clr_oactive(&ifp->if_snd);
6556 
6557 	in->in_phyctxt = NULL;
6558 	if (ic->ic_state == IEEE80211_S_RUN)
6559 		ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
6560 
6561 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6562 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6563 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6564 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6565 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
6566 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
6567 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
6568 
6569 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6570 
6571 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
6572 	ifp->if_timer = sc->sc_tx_timer = 0;
6573 
6574 	splx(s);
6575 }
6576 
6577 void
6578 iwx_watchdog(struct ifnet *ifp)
6579 {
6580 	struct iwx_softc *sc = ifp->if_softc;
6581 
6582 	ifp->if_timer = 0;
6583 	if (sc->sc_tx_timer > 0) {
6584 		if (--sc->sc_tx_timer == 0) {
6585 			printf("%s: device timeout\n", DEVNAME(sc));
6586 #ifdef IWX_DEBUG
6587 			iwx_nic_error(sc);
6588 #endif
6589 			if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
6590 				task_add(systq, &sc->init_task);
6591 			ifp->if_oerrors++;
6592 			return;
6593 		}
6594 		ifp->if_timer = 1;
6595 	}
6596 
6597 	ieee80211_watchdog(ifp);
6598 }
6599 
6600 int
6601 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6602 {
6603 	struct iwx_softc *sc = ifp->if_softc;
6604 	int s, err = 0, generation = sc->sc_generation;
6605 
6606 	/*
6607 	 * Prevent processes from entering this function while another
6608 	 * process is tsleep'ing in it.
6609 	 */
6610 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
6611 	if (err == 0 && generation != sc->sc_generation) {
6612 		rw_exit(&sc->ioctl_rwl);
6613 		return ENXIO;
6614 	}
6615 	if (err)
6616 		return err;
6617 	s = splnet();
6618 
6619 	switch (cmd) {
6620 	case SIOCSIFADDR:
6621 		ifp->if_flags |= IFF_UP;
6622 		/* FALLTHROUGH */
6623 	case SIOCSIFFLAGS:
6624 		if (ifp->if_flags & IFF_UP) {
6625 			if (!(ifp->if_flags & IFF_RUNNING)) {
6626 				err = iwx_init(ifp);
6627 			}
6628 		} else {
6629 			if (ifp->if_flags & IFF_RUNNING)
6630 				iwx_stop(ifp);
6631 		}
6632 		break;
6633 
6634 	default:
6635 		err = ieee80211_ioctl(ifp, cmd, data);
6636 	}
6637 
6638 	if (err == ENETRESET) {
6639 		err = 0;
6640 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6641 		    (IFF_UP | IFF_RUNNING)) {
6642 			iwx_stop(ifp);
6643 			err = iwx_init(ifp);
6644 		}
6645 	}
6646 
6647 	splx(s);
6648 	rw_exit(&sc->ioctl_rwl);
6649 
6650 	return err;
6651 }
6652 
6653 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
6654 /*
6655  * Note: This structure is read from the device with IO accesses,
6656  * and the reading already does the endian conversion. As it is
6657  * read with uint32_t-sized accesses, any members with a different size
6658  * need to be ordered correctly though!
6659  */
6660 struct iwx_error_event_table {
6661 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6662 	uint32_t error_id;		/* type of error */
6663 	uint32_t trm_hw_status0;	/* TRM HW status */
6664 	uint32_t trm_hw_status1;	/* TRM HW status */
6665 	uint32_t blink2;		/* branch link */
6666 	uint32_t ilink1;		/* interrupt link */
6667 	uint32_t ilink2;		/* interrupt link */
6668 	uint32_t data1;		/* error-specific data */
6669 	uint32_t data2;		/* error-specific data */
6670 	uint32_t data3;		/* error-specific data */
6671 	uint32_t bcon_time;		/* beacon timer */
6672 	uint32_t tsf_low;		/* network timestamp function timer */
6673 	uint32_t tsf_hi;		/* network timestamp function timer */
6674 	uint32_t gp1;		/* GP1 timer register */
6675 	uint32_t gp2;		/* GP2 timer register */
6676 	uint32_t fw_rev_type;	/* firmware revision type */
6677 	uint32_t major;		/* uCode version major */
6678 	uint32_t minor;		/* uCode version minor */
6679 	uint32_t hw_ver;		/* HW Silicon version */
6680 	uint32_t brd_ver;		/* HW board version */
6681 	uint32_t log_pc;		/* log program counter */
6682 	uint32_t frame_ptr;		/* frame pointer */
6683 	uint32_t stack_ptr;		/* stack pointer */
6684 	uint32_t hcmd;		/* last host command header */
6685 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6686 				 * rxtx_flag */
6687 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6688 				 * host_flag */
6689 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6690 				 * enc_flag */
6691 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6692 				 * time_flag */
6693 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6694 				 * wico interrupt */
6695 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6696 	uint32_t wait_event;		/* wait event() caller address */
6697 	uint32_t l2p_control;	/* L2pControlField */
6698 	uint32_t l2p_duration;	/* L2pDurationField */
6699 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6700 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6701 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6702 				 * (LMPM_PMG_SEL) */
6703 	uint32_t u_timestamp;	/* indicate when the date and time of the
6704 				 * compilation */
6705 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6706 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6707 
6708 /*
6709  * UMAC error struct - relevant starting from family 8000 chip.
6710  * Note: This structure is read from the device with IO accesses,
6711  * and the reading already does the endian conversion. As it is
6712  * read with u32-sized accesses, any members with a different size
6713  * need to be ordered correctly though!
6714  */
6715 struct iwx_umac_error_event_table {
6716 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6717 	uint32_t error_id;	/* type of error */
6718 	uint32_t blink1;	/* branch link */
6719 	uint32_t blink2;	/* branch link */
6720 	uint32_t ilink1;	/* interrupt link */
6721 	uint32_t ilink2;	/* interrupt link */
6722 	uint32_t data1;		/* error-specific data */
6723 	uint32_t data2;		/* error-specific data */
6724 	uint32_t data3;		/* error-specific data */
6725 	uint32_t umac_major;
6726 	uint32_t umac_minor;
6727 	uint32_t frame_pointer;	/* core register 27*/
6728 	uint32_t stack_pointer;	/* core register 28 */
6729 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6730 	uint32_t nic_isr_pref;	/* ISR status register */
6731 } __packed;
6732 
6733 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6734 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6735 
6736 void
6737 iwx_nic_umac_error(struct iwx_softc *sc)
6738 {
6739 	struct iwx_umac_error_event_table table;
6740 	uint32_t base;
6741 
6742 	base = sc->sc_uc.uc_umac_error_event_table;
6743 
6744 	if (base < 0x800000) {
6745 		printf("%s: Invalid error log pointer 0x%08x\n",
6746 		    DEVNAME(sc), base);
6747 		return;
6748 	}
6749 
6750 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6751 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6752 		return;
6753 	}
6754 
6755 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6756 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
6757 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6758 			sc->sc_flags, table.valid);
6759 	}
6760 
6761 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
6762 		iwx_desc_lookup(table.error_id));
6763 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
6764 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
6765 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
6766 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
6767 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
6768 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
6769 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
6770 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
6771 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
6772 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
6773 	    table.frame_pointer);
6774 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
6775 	    table.stack_pointer);
6776 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
6777 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
6778 	    table.nic_isr_pref);
6779 }
6780 
6781 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
6782 static struct {
6783 	const char *name;
6784 	uint8_t num;
6785 } advanced_lookup[] = {
6786 	{ "NMI_INTERRUPT_WDG", 0x34 },
6787 	{ "SYSASSERT", 0x35 },
6788 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6789 	{ "BAD_COMMAND", 0x38 },
6790 	{ "BAD_COMMAND", 0x39 },
6791 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6792 	{ "FATAL_ERROR", 0x3D },
6793 	{ "NMI_TRM_HW_ERR", 0x46 },
6794 	{ "NMI_INTERRUPT_TRM", 0x4C },
6795 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6796 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6797 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6798 	{ "NMI_INTERRUPT_HOST", 0x66 },
6799 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
6800 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
6801 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
6802 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6803 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
6804 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6805 	{ "ADVANCED_SYSASSERT", 0 },
6806 };
6807 
6808 const char *
6809 iwx_desc_lookup(uint32_t num)
6810 {
6811 	int i;
6812 
6813 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
6814 		if (advanced_lookup[i].num ==
6815 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
6816 			return advanced_lookup[i].name;
6817 
6818 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6819 	return advanced_lookup[i].name;
6820 }
6821 
6822 /*
6823  * Support for dumping the error log seemed like a good idea ...
6824  * but it's mostly hex junk and the only sensible thing is the
6825  * hw/ucode revision (which we know anyway).  Since it's here,
6826  * I'll just leave it in, just in case e.g. the Intel guys want to
6827  * help us decipher some "ADVANCED_SYSASSERT" later.
6828  */
6829 void
6830 iwx_nic_error(struct iwx_softc *sc)
6831 {
6832 	struct iwx_error_event_table table;
6833 	uint32_t base;
6834 
6835 	printf("%s: dumping device error log\n", DEVNAME(sc));
6836 	base = sc->sc_uc.uc_lmac_error_event_table[0];
6837 	if (base < 0x800000) {
6838 		printf("%s: Invalid error log pointer 0x%08x\n",
6839 		    DEVNAME(sc), base);
6840 		return;
6841 	}
6842 
6843 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6844 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6845 		return;
6846 	}
6847 
6848 	if (!table.valid) {
6849 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
6850 		return;
6851 	}
6852 
6853 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6854 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
6855 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6856 		    sc->sc_flags, table.valid);
6857 	}
6858 
6859 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
6860 	    iwx_desc_lookup(table.error_id));
6861 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
6862 	    table.trm_hw_status0);
6863 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
6864 	    table.trm_hw_status1);
6865 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
6866 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
6867 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
6868 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
6869 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
6870 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
6871 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
6872 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
6873 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
6874 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
6875 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
6876 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
6877 	    table.fw_rev_type);
6878 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
6879 	    table.major);
6880 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
6881 	    table.minor);
6882 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
6883 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
6884 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
6885 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
6886 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
6887 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
6888 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
6889 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
6890 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
6891 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
6892 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
6893 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
6894 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
6895 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
6896 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
6897 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
6898 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
6899 
6900 	if (sc->sc_uc.uc_umac_error_event_table)
6901 		iwx_nic_umac_error(sc);
6902 }
6903 #endif
6904 
6905 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
6906 do {									\
6907 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6908 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
6909 	_var_ = (void *)((_pkt_)+1);					\
6910 } while (/*CONSTCOND*/0)
6911 
6912 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6913 do {									\
6914 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6915 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6916 	_ptr_ = (void *)((_pkt_)+1);					\
6917 } while (/*CONSTCOND*/0)
6918 
6919 int
6920 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
6921 {
6922 	int qid, idx, code;
6923 
6924 	qid = pkt->hdr.qid & ~0x80;
6925 	idx = pkt->hdr.idx;
6926 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6927 
6928 	return (!(qid == 0 && idx == 0 && code == 0) &&
6929 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
6930 }
6931 
6932 void
6933 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
6934 {
6935 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6936 	struct iwx_rx_packet *pkt, *nextpkt;
6937 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
6938 	struct mbuf *m0, *m;
6939 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
6940 	size_t remain = IWX_RBUF_SIZE;
6941 	int qid, idx, code, handled = 1;
6942 
6943 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
6944 	    BUS_DMASYNC_POSTREAD);
6945 
6946 	m0 = data->m;
6947 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
6948 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
6949 		qid = pkt->hdr.qid;
6950 		idx = pkt->hdr.idx;
6951 
6952 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6953 
6954 		if (!iwx_rx_pkt_valid(pkt))
6955 			break;
6956 
6957 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
6958 		if (len < sizeof(pkt->hdr) ||
6959 		    len > (IWX_RBUF_SIZE - offset - minsz))
6960 			break;
6961 
6962 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
6963 			/* Take mbuf m0 off the RX ring. */
6964 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
6965 				ifp->if_ierrors++;
6966 				break;
6967 			}
6968 			KASSERT(data->m != m0);
6969 		}
6970 
6971 		switch (code) {
6972 		case IWX_REPLY_RX_PHY_CMD:
6973 			iwx_rx_rx_phy_cmd(sc, pkt, data);
6974 			break;
6975 
6976 		case IWX_REPLY_RX_MPDU_CMD: {
6977 			size_t maxlen = remain - minsz;
6978 			nextoff = offset +
6979 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
6980 			nextpkt = (struct iwx_rx_packet *)
6981 			    (m0->m_data + nextoff);
6982 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
6983 			    !iwx_rx_pkt_valid(nextpkt)) {
6984 				/* No need to copy last frame in buffer. */
6985 				if (offset > 0)
6986 					m_adj(m0, offset);
6987 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
6988 				m0 = NULL; /* stack owns m0 now; abort loop */
6989 			} else {
6990 				/*
6991 				 * Create an mbuf which points to the current
6992 				 * packet. Always copy from offset zero to
6993 				 * preserve m_pkthdr.
6994 				 */
6995 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
6996 				if (m == NULL) {
6997 					ifp->if_ierrors++;
6998 					m_freem(m0);
6999 					m0 = NULL;
7000 					break;
7001 				}
7002 				m_adj(m, offset);
7003 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
7004 			}
7005 
7006 			if (offset + minsz < remain)
7007 				remain -= offset;
7008 			else
7009 				remain = minsz;
7010  			break;
7011 		}
7012 
7013 		case IWX_TX_CMD:
7014 			iwx_rx_tx_cmd(sc, pkt, data);
7015 			break;
7016 
7017 		case IWX_MISSED_BEACONS_NOTIFICATION:
7018 			iwx_rx_bmiss(sc, pkt, data);
7019 			break;
7020 
7021 		case IWX_MFUART_LOAD_NOTIFICATION:
7022 			break;
7023 
7024 		case IWX_ALIVE: {
7025 			struct iwx_alive_resp_v4 *resp4;
7026 
7027 			DPRINTF(("%s: firmware alive\n", __func__));
7028 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
7029 				SYNC_RESP_STRUCT(resp4, pkt);
7030 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
7031 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
7032 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
7033 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
7034 				sc->sc_uc.uc_log_event_table = le32toh(
7035 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
7036 				sc->sched_base = le32toh(
7037 				    resp4->lmac_data[0].dbg_ptrs.scd_base_ptr);
7038 				sc->sc_uc.uc_umac_error_event_table = le32toh(
7039 				    resp4->umac_data.dbg_ptrs.error_info_addr);
7040 				if (resp4->status == IWX_ALIVE_STATUS_OK)
7041 					sc->sc_uc.uc_ok = 1;
7042 				else
7043 					sc->sc_uc.uc_ok = 0;
7044 			}
7045 
7046 			sc->sc_uc.uc_intr = 1;
7047 			wakeup(&sc->sc_uc);
7048 			break;
7049 		}
7050 
7051 		case IWX_STATISTICS_NOTIFICATION: {
7052 			struct iwx_notif_statistics *stats;
7053 			SYNC_RESP_STRUCT(stats, pkt);
7054 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7055 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
7056 			break;
7057 		}
7058 
7059 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
7060 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7061 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
7062 			break;
7063 
7064 		case IWX_PHY_CONFIGURATION_CMD:
7065 		case IWX_TX_ANT_CONFIGURATION_CMD:
7066 		case IWX_ADD_STA:
7067 		case IWX_MAC_CONTEXT_CMD:
7068 		case IWX_REPLY_SF_CFG_CMD:
7069 		case IWX_POWER_TABLE_CMD:
7070 		case IWX_LTR_CONFIG:
7071 		case IWX_PHY_CONTEXT_CMD:
7072 		case IWX_BINDING_CONTEXT_CMD:
7073 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
7074 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
7075 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
7076 		case IWX_REPLY_BEACON_FILTERING_CMD:
7077 		case IWX_MAC_PM_POWER_TABLE:
7078 		case IWX_TIME_QUOTA_CMD:
7079 		case IWX_REMOVE_STA:
7080 		case IWX_TXPATH_FLUSH:
7081 		case IWX_BT_CONFIG:
7082 		case IWX_NVM_ACCESS_CMD:
7083 		case IWX_MCC_UPDATE_CMD:
7084 		case IWX_TIME_EVENT_CMD:
7085 		case IWX_STATISTICS_CMD:
7086 		case IWX_SCD_QUEUE_CFG: {
7087 			size_t pkt_len;
7088 
7089 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
7090 				break;
7091 
7092 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7093 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7094 
7095 			pkt_len = sizeof(pkt->len_n_flags) +
7096 			    iwx_rx_packet_len(pkt);
7097 
7098 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
7099 			    pkt_len < sizeof(*pkt) ||
7100 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
7101 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
7102 				    sc->sc_cmd_resp_len[idx]);
7103 				sc->sc_cmd_resp_pkt[idx] = NULL;
7104 				break;
7105 			}
7106 
7107 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
7108 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7109 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
7110 			break;
7111 		}
7112 
7113 		case IWX_INIT_COMPLETE_NOTIF:
7114 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
7115 			wakeup(&sc->sc_init_complete);
7116 			break;
7117 
7118 		case IWX_SCAN_COMPLETE_UMAC: {
7119 			struct iwx_umac_scan_complete *notif;
7120 			SYNC_RESP_STRUCT(notif, pkt);
7121 			iwx_endscan(sc);
7122 			break;
7123 		}
7124 
7125 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
7126 			struct iwx_umac_scan_iter_complete_notif *notif;
7127 			SYNC_RESP_STRUCT(notif, pkt);
7128 			iwx_endscan(sc);
7129 			break;
7130 		}
7131 
7132 		case IWX_REPLY_ERROR: {
7133 			struct iwx_error_resp *resp;
7134 			SYNC_RESP_STRUCT(resp, pkt);
7135 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
7136 				DEVNAME(sc), le32toh(resp->error_type),
7137 				resp->cmd_id);
7138 			break;
7139 		}
7140 
7141 		case IWX_TIME_EVENT_NOTIFICATION: {
7142 			struct iwx_time_event_notif *notif;
7143 			uint32_t action;
7144 			SYNC_RESP_STRUCT(notif, pkt);
7145 
7146 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
7147 				break;
7148 			action = le32toh(notif->action);
7149 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
7150 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
7151 			break;
7152 		}
7153 
7154 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
7155 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
7156 		    break;
7157 
7158 		/*
7159 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
7160 		 * messages. Just ignore them for now.
7161 		 */
7162 		case IWX_DEBUG_LOG_MSG:
7163 			break;
7164 
7165 		case IWX_MCAST_FILTER_CMD:
7166 			break;
7167 
7168 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
7169 			break;
7170 
7171 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
7172 			break;
7173 
7174 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
7175 		    IWX_NVM_ACCESS_COMPLETE):
7176 			break;
7177 
7178 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
7179 			break; /* happens in monitor mode; ignore for now */
7180 
7181 		default:
7182 			handled = 0;
7183 			printf("%s: unhandled firmware response 0x%x/0x%x "
7184 			    "rx ring %d[%d]\n",
7185 			    DEVNAME(sc), code, pkt->len_n_flags,
7186 			    (qid & ~0x80), idx);
7187 			break;
7188 		}
7189 
7190 		/*
7191 		 * uCode sets bit 0x80 when it originates the notification,
7192 		 * i.e. when the notification is not a direct response to a
7193 		 * command sent by the driver.
7194 		 * For example, uCode issues IWX_REPLY_RX when it sends a
7195 		 * received frame to the driver.
7196 		 */
7197 		if (handled && !(qid & (1 << 7))) {
7198 			iwx_cmd_done(sc, qid, idx, code);
7199 		}
7200 
7201 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
7202 	}
7203 
7204 	if (m0 && m0 != data->m)
7205 		m_freem(m0);
7206 }
7207 
7208 void
7209 iwx_notif_intr(struct iwx_softc *sc)
7210 {
7211 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
7212 	uint16_t hw;
7213 
7214 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7215 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7216 
7217 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7218 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
7219 	while (sc->rxq.cur != hw) {
7220 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7221 		iwx_rx_pkt(sc, data, &ml);
7222 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
7223 	}
7224 	if_input(&sc->sc_ic.ic_if, &ml);
7225 
7226 	/*
7227 	 * Tell the firmware what we have processed.
7228 	 * Seems like the hardware gets upset unless we align the write by 8??
7229 	 */
7230 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
7231 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
7232 }
7233 
7234 int
7235 iwx_intr(void *arg)
7236 {
7237 	struct iwx_softc *sc = arg;
7238 	int handled = 0;
7239 	int r1, r2, rv = 0;
7240 
7241 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
7242 
7243 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
7244 		uint32_t *ict = sc->ict_dma.vaddr;
7245 		int tmp;
7246 
7247 		tmp = htole32(ict[sc->ict_cur]);
7248 		if (!tmp)
7249 			goto out_ena;
7250 
7251 		/*
7252 		 * ok, there was something.  keep plowing until we have all.
7253 		 */
7254 		r1 = r2 = 0;
7255 		while (tmp) {
7256 			r1 |= tmp;
7257 			ict[sc->ict_cur] = 0;
7258 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
7259 			tmp = htole32(ict[sc->ict_cur]);
7260 		}
7261 
7262 		/* this is where the fun begins.  don't ask */
7263 		if (r1 == 0xffffffff)
7264 			r1 = 0;
7265 
7266 		/* i am not expected to understand this */
7267 		if (r1 & 0xc0000)
7268 			r1 |= 0x8000;
7269 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7270 	} else {
7271 		r1 = IWX_READ(sc, IWX_CSR_INT);
7272 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7273 			goto out;
7274 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
7275 	}
7276 	if (r1 == 0 && r2 == 0) {
7277 		goto out_ena;
7278 	}
7279 
7280 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
7281 
7282 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
7283 		int i;
7284 
7285 		/* Firmware has now configured the RFH. */
7286 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
7287 			iwx_update_rx_desc(sc, &sc->rxq, i);
7288 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
7289 	}
7290 
7291 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
7292 
7293 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
7294 		handled |= IWX_CSR_INT_BIT_RF_KILL;
7295 		iwx_check_rfkill(sc);
7296 		task_add(systq, &sc->init_task);
7297 		rv = 1;
7298 		goto out_ena;
7299 	}
7300 
7301 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
7302 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7303 		int i;
7304 
7305 		iwx_nic_error(sc);
7306 
7307 		/* Dump driver status (TX and RX rings) while we're here. */
7308 		printf("driver status:\n");
7309 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
7310 			struct iwx_tx_ring *ring = &sc->txq[i];
7311 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
7312 			    "queued=%-3d\n",
7313 			    i, ring->qid, ring->cur, ring->queued);
7314 		}
7315 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
7316 		printf("  802.11 state %s\n",
7317 		    ieee80211_state_name[sc->sc_ic.ic_state]);
7318 #endif
7319 
7320 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7321 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7322 			task_add(systq, &sc->init_task);
7323 		rv = 1;
7324 		goto out;
7325 
7326 	}
7327 
7328 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
7329 		handled |= IWX_CSR_INT_BIT_HW_ERR;
7330 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7331 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7332 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7333 			task_add(systq, &sc->init_task);
7334 		}
7335 		rv = 1;
7336 		goto out;
7337 	}
7338 
7339 	/* firmware chunk loaded */
7340 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
7341 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
7342 		handled |= IWX_CSR_INT_BIT_FH_TX;
7343 
7344 		sc->sc_fw_chunk_done = 1;
7345 		wakeup(&sc->sc_fw);
7346 	}
7347 
7348 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
7349 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
7350 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
7351 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
7352 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
7353 		}
7354 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
7355 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
7356 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
7357 		}
7358 
7359 		/* Disable periodic interrupt; we use it as just a one-shot. */
7360 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
7361 
7362 		/*
7363 		 * Enable periodic interrupt in 8 msec only if we received
7364 		 * real RX interrupt (instead of just periodic int), to catch
7365 		 * any dangling Rx interrupt.  If it was just the periodic
7366 		 * interrupt, there was no dangling Rx activity, and no need
7367 		 * to extend the periodic interrupt; one-shot is enough.
7368 		 */
7369 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
7370 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
7371 			    IWX_CSR_INT_PERIODIC_ENA);
7372 
7373 		iwx_notif_intr(sc);
7374 	}
7375 
7376 	rv = 1;
7377 
7378  out_ena:
7379 	iwx_restore_interrupts(sc);
7380  out:
7381 	return rv;
7382 }
7383 
7384 int
7385 iwx_intr_msix(void *arg)
7386 {
7387 	struct iwx_softc *sc = arg;
7388 	uint32_t inta_fh, inta_hw;
7389 	int vector = 0;
7390 
7391 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
7392 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
7393 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
7394 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
7395 	inta_fh &= sc->sc_fh_mask;
7396 	inta_hw &= sc->sc_hw_mask;
7397 
7398 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
7399 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
7400 		iwx_notif_intr(sc);
7401 	}
7402 
7403 	/* firmware chunk loaded */
7404 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
7405 		sc->sc_fw_chunk_done = 1;
7406 		wakeup(&sc->sc_fw);
7407 	}
7408 
7409 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
7410 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
7411 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
7412 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7413 		int i;
7414 
7415 		iwx_nic_error(sc);
7416 
7417 		/* Dump driver status (TX and RX rings) while we're here. */
7418 		printf("driver status:\n");
7419 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
7420 			struct iwx_tx_ring *ring = &sc->txq[i];
7421 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
7422 			    "queued=%-3d\n",
7423 			    i, ring->qid, ring->cur, ring->queued);
7424 		}
7425 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
7426 		printf("  802.11 state %s\n",
7427 		    ieee80211_state_name[sc->sc_ic.ic_state]);
7428 #endif
7429 
7430 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7431 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7432 			task_add(systq, &sc->init_task);
7433 		return 1;
7434 	}
7435 
7436 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
7437 		iwx_check_rfkill(sc);
7438 		task_add(systq, &sc->init_task);
7439 	}
7440 
7441 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
7442 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7443 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7444 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7445 			task_add(systq, &sc->init_task);
7446 		}
7447 		return 1;
7448 	}
7449 
7450 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
7451 		int i;
7452 
7453 		/* Firmware has now configured the RFH. */
7454 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
7455 			iwx_update_rx_desc(sc, &sc->rxq, i);
7456 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
7457 	}
7458 
7459 	/*
7460 	 * Before sending the interrupt the HW disables it to prevent
7461 	 * a nested interrupt. This is done by writing 1 to the corresponding
7462 	 * bit in the mask register. After handling the interrupt, it should be
7463 	 * re-enabled by clearing this bit. This register is defined as
7464 	 * write 1 clear (W1C) register, meaning that it's being clear
7465 	 * by writing 1 to the bit.
7466 	 */
7467 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
7468 	return 1;
7469 }
7470 
7471 typedef void *iwx_match_t;
7472 
7473 static const struct pci_matchid iwx_devices[] = {
7474 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
7475 };
7476 
7477 int
7478 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
7479 {
7480 	return pci_matchbyid((struct pci_attach_args *)aux, iwx_devices,
7481 	    nitems(iwx_devices));
7482 }
7483 
7484 int
7485 iwx_preinit(struct iwx_softc *sc)
7486 {
7487 	struct ieee80211com *ic = &sc->sc_ic;
7488 	struct ifnet *ifp = IC2IFP(ic);
7489 	int err;
7490 	static int attached;
7491 
7492 	err = iwx_prepare_card_hw(sc);
7493 	if (err) {
7494 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7495 		return err;
7496 	}
7497 
7498 	if (attached) {
7499 		/* Update MAC in case the upper layers changed it. */
7500 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
7501 		    ((struct arpcom *)ifp)->ac_enaddr);
7502 		return 0;
7503 	}
7504 
7505 	err = iwx_start_hw(sc);
7506 	if (err) {
7507 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7508 		return err;
7509 	}
7510 
7511 	err = iwx_run_init_mvm_ucode(sc, 1);
7512 	iwx_stop_device(sc);
7513 	if (err)
7514 		return err;
7515 
7516 	/* Print version info and MAC address on first successful fw load. */
7517 	attached = 1;
7518 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
7519 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
7520 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
7521 
7522 	if (sc->sc_nvm.sku_cap_11n_enable)
7523 		iwx_setup_ht_rates(sc);
7524 
7525 	/* not all hardware can do 5GHz band */
7526 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
7527 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
7528 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
7529 
7530 	/* Configure channel information obtained from firmware. */
7531 	ieee80211_channel_init(ifp);
7532 
7533 	/* Configure MAC address. */
7534 	err = if_setlladdr(ifp, ic->ic_myaddr);
7535 	if (err)
7536 		printf("%s: could not set MAC address (error %d)\n",
7537 		    DEVNAME(sc), err);
7538 
7539 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
7540 
7541 	return 0;
7542 }
7543 
7544 void
7545 iwx_attach_hook(struct device *self)
7546 {
7547 	struct iwx_softc *sc = (void *)self;
7548 
7549 	KASSERT(!cold);
7550 
7551 	iwx_preinit(sc);
7552 }
7553 
7554 void
7555 iwx_attach(struct device *parent, struct device *self, void *aux)
7556 {
7557 	struct iwx_softc *sc = (void *)self;
7558 	struct pci_attach_args *pa = aux;
7559 	pci_intr_handle_t ih;
7560 	pcireg_t reg, memtype;
7561 	struct ieee80211com *ic = &sc->sc_ic;
7562 	struct ifnet *ifp = &ic->ic_if;
7563 	const char *intrstr;
7564 	int err;
7565 	int txq_i, i;
7566 
7567 	sc->sc_pct = pa->pa_pc;
7568 	sc->sc_pcitag = pa->pa_tag;
7569 	sc->sc_dmat = pa->pa_dmat;
7570 
7571 	rw_init(&sc->ioctl_rwl, "iwxioctl");
7572 
7573 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7574 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7575 	if (err == 0) {
7576 		printf("%s: PCIe capability structure not found!\n",
7577 		    DEVNAME(sc));
7578 		return;
7579 	}
7580 
7581 	/* Clear device-specific "PCI retry timeout" register (41h). */
7582 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7583 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7584 
7585 	/* Enable bus-mastering and hardware bug workaround. */
7586 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7587 	reg |= PCI_COMMAND_MASTER_ENABLE;
7588 	/* if !MSI */
7589 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
7590 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
7591 	}
7592 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7593 
7594 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7595 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7596 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
7597 	if (err) {
7598 		printf("%s: can't map mem space\n", DEVNAME(sc));
7599 		return;
7600 	}
7601 
7602 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
7603 		sc->sc_msix = 1;
7604 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
7605 		printf("%s: can't map interrupt\n", DEVNAME(sc));
7606 		return;
7607 	}
7608 
7609 	intrstr = pci_intr_string(sc->sc_pct, ih);
7610 	if (sc->sc_msix)
7611 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
7612 		    iwx_intr_msix, sc, DEVNAME(sc));
7613 	else
7614 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
7615 		    iwx_intr, sc, DEVNAME(sc));
7616 
7617 	if (sc->sc_ih == NULL) {
7618 		printf("\n");
7619 		printf("%s: can't establish interrupt", DEVNAME(sc));
7620 		if (intrstr != NULL)
7621 			printf(" at %s", intrstr);
7622 		printf("\n");
7623 		return;
7624 	}
7625 	printf(", %s\n", intrstr);
7626 
7627 	iwx_disable_interrupts(sc);
7628 
7629 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
7630 	switch (PCI_PRODUCT(pa->pa_id)) {
7631 	case PCI_PRODUCT_INTEL_WL_22500_1:
7632 		sc->sc_fwname = "iwx-cc-a0-46";
7633 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
7634 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
7635 		sc->sc_nvm_max_section_size = 32768;
7636 		sc->sc_integrated = 1;
7637 		sc->sc_tx_with_siso_diversity = 0;
7638 		break;
7639 	default:
7640 		printf("%s: unknown adapter type\n", DEVNAME(sc));
7641 		return;
7642 	}
7643 
7644 	/*
7645 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7646 	 * changed, and now the revision step also includes bit 0-1 (no more
7647 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7648 	 * in the old format.
7649 	 */
7650 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7651 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7652 
7653 	if (iwx_prepare_card_hw(sc) != 0) {
7654 		printf("%s: could not initialize hardware\n",
7655 		    DEVNAME(sc));
7656 		return;
7657 	}
7658 
7659 	/*
7660 	 * In order to recognize C step the driver should read the
7661 	 * chip version id located at the AUX bus MISC address.
7662 	 */
7663 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
7664 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7665 	DELAY(2);
7666 
7667 	err = iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
7668 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7669 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7670 			   25000);
7671 	if (!err) {
7672 		printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
7673 		return;
7674 	}
7675 
7676 	if (iwx_nic_lock(sc)) {
7677 		uint32_t hw_step = iwx_read_prph(sc, IWX_WFPM_CTRL_REG);
7678 		hw_step |= IWX_ENABLE_WFPM;
7679 		iwx_write_prph(sc, IWX_WFPM_CTRL_REG, hw_step);
7680 		hw_step = iwx_read_prph(sc, IWX_AUX_MISC_REG);
7681 		hw_step = (hw_step >> IWX_HW_STEP_LOCATION_BITS) & 0xF;
7682 		if (hw_step == 0x3)
7683 			sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7684 					(IWX_SILICON_C_STEP << 2);
7685 		iwx_nic_unlock(sc);
7686 	} else {
7687 		printf("%s: Failed to lock the nic\n", DEVNAME(sc));
7688 		return;
7689 	}
7690 
7691 	/*
7692 	 * Allocate DMA memory for firmware transfers.
7693 	 * Must be aligned on a 16-byte boundary.
7694 	 */
7695 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
7696 	    sc->sc_fwdmasegsz, 16);
7697 	if (err) {
7698 		printf("%s: could not allocate memory for firmware\n",
7699 		    DEVNAME(sc));
7700 		return;
7701 	}
7702 
7703 	/* Allocate interrupt cause table (ICT).*/
7704 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
7705 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
7706 	if (err) {
7707 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
7708 		goto fail1;
7709 	}
7710 
7711 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7712 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7713 	    nitems(sc->txq) * sizeof(struct iwx_agn_scd_bc_tbl), 1024);
7714 	if (err) {
7715 		printf("%s: could not allocate TX scheduler rings\n",
7716 		    DEVNAME(sc));
7717 		goto fail3;
7718 	}
7719 
7720 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
7721 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7722 		if (err) {
7723 			printf("%s: could not allocate TX ring %d\n",
7724 			    DEVNAME(sc), txq_i);
7725 			goto fail4;
7726 		}
7727 	}
7728 
7729 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
7730 	if (err) {
7731 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
7732 		goto fail4;
7733 	}
7734 
7735 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
7736 	if (sc->sc_nswq == NULL)
7737 		goto fail4;
7738 
7739 	/* Clear pending interrupts. */
7740 	IWX_WRITE(sc, IWX_CSR_INT, 0xffffffff);
7741 
7742 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
7743 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
7744 	ic->ic_state = IEEE80211_S_INIT;
7745 
7746 	/* Set device capabilities. */
7747 	ic->ic_caps =
7748 	    IEEE80211_C_WEP |		/* WEP */
7749 	    IEEE80211_C_RSN |		/* WPA/RSN */
7750 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
7751 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
7752 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
7753 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
7754 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
7755 
7756 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7757 	ic->ic_htcaps |=
7758 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
7759 	ic->ic_htxcaps = 0;
7760 	ic->ic_txbfcaps = 0;
7761 	ic->ic_aselcaps = 0;
7762 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7763 
7764 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7765 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7766 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7767 
7768 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
7769 		sc->sc_phyctxt[i].id = i;
7770 	}
7771 
7772 	sc->sc_amrr.amrr_min_success_threshold =  1;
7773 	sc->sc_amrr.amrr_max_success_threshold = 15;
7774 
7775 	/* IBSS channel undefined for now. */
7776 	ic->ic_ibss_chan = &ic->ic_channels[1];
7777 
7778 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
7779 
7780 	ifp->if_softc = sc;
7781 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7782 	ifp->if_ioctl = iwx_ioctl;
7783 	ifp->if_start = iwx_start;
7784 	ifp->if_watchdog = iwx_watchdog;
7785 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7786 
7787 	if_attach(ifp);
7788 	ieee80211_ifattach(ifp);
7789 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
7790 
7791 #if NBPFILTER > 0
7792 	iwx_radiotap_attach(sc);
7793 #endif
7794 	timeout_set(&sc->sc_calib_to, iwx_calib_timeout, sc);
7795 	task_set(&sc->init_task, iwx_init_task, sc);
7796 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
7797 	task_set(&sc->ba_task, iwx_ba_task, sc);
7798 	task_set(&sc->htprot_task, iwx_htprot_task, sc);
7799 
7800 	ic->ic_node_alloc = iwx_node_alloc;
7801 #ifdef notyet
7802 	/* TODO: background scans trigger firmware errors */
7803 	ic->ic_bgscan_start = iwx_bgscan;
7804 #endif
7805 
7806 	/* Override 802.11 state transition machine. */
7807 	sc->sc_newstate = ic->ic_newstate;
7808 	ic->ic_newstate = iwx_newstate;
7809 	ic->ic_update_htprot = iwx_update_htprot;
7810 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
7811 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
7812 #ifdef notyet
7813 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
7814 	ic->ic_ampdu_tx_stop = iwx_ampdu_tx_stop;
7815 #endif
7816 	/*
7817 	 * We cannot read the MAC address without loading the
7818 	 * firmware from disk. Postpone until mountroot is done.
7819 	 */
7820 	config_mountroot(self, iwx_attach_hook);
7821 
7822 	return;
7823 
7824 fail4:	while (--txq_i >= 0)
7825 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
7826 	iwx_free_rx_ring(sc, &sc->rxq);
7827 	iwx_dma_contig_free(&sc->sched_dma);
7828 fail3:	if (sc->ict_dma.vaddr != NULL)
7829 		iwx_dma_contig_free(&sc->ict_dma);
7830 
7831 fail1:	iwx_dma_contig_free(&sc->fw_dma);
7832 	return;
7833 }
7834 
7835 #if NBPFILTER > 0
7836 void
7837 iwx_radiotap_attach(struct iwx_softc *sc)
7838 {
7839 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
7840 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
7841 
7842 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7843 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7844 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
7845 
7846 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
7847 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7848 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
7849 }
7850 #endif
7851 
7852 void
7853 iwx_init_task(void *arg1)
7854 {
7855 	struct iwx_softc *sc = arg1;
7856 	struct ifnet *ifp = &sc->sc_ic.ic_if;
7857 	int s = splnet();
7858 	int generation = sc->sc_generation;
7859 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
7860 
7861 	rw_enter_write(&sc->ioctl_rwl);
7862 	if (generation != sc->sc_generation) {
7863 		rw_exit(&sc->ioctl_rwl);
7864 		splx(s);
7865 		return;
7866 	}
7867 
7868 	if (ifp->if_flags & IFF_RUNNING)
7869 		iwx_stop(ifp);
7870 	else
7871 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
7872 
7873 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7874 		iwx_init(ifp);
7875 
7876 	rw_exit(&sc->ioctl_rwl);
7877 	splx(s);
7878 }
7879 
7880 int
7881 iwx_resume(struct iwx_softc *sc)
7882 {
7883 	pcireg_t reg;
7884 
7885 	/* Clear device-specific "PCI retry timeout" register (41h). */
7886 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7887 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7888 
7889 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
7890 	iwx_conf_msix_hw(sc, 0);
7891 
7892 	iwx_enable_rfkill_int(sc);
7893 	iwx_check_rfkill(sc);
7894 
7895 	return iwx_prepare_card_hw(sc);
7896 }
7897 
7898 int
7899 iwx_activate(struct device *self, int act)
7900 {
7901 	struct iwx_softc *sc = (struct iwx_softc *)self;
7902 	struct ifnet *ifp = &sc->sc_ic.ic_if;
7903 	int err = 0;
7904 
7905 	switch (act) {
7906 	case DVACT_QUIESCE:
7907 		if (ifp->if_flags & IFF_RUNNING) {
7908 			rw_enter_write(&sc->ioctl_rwl);
7909 			iwx_stop(ifp);
7910 			rw_exit(&sc->ioctl_rwl);
7911 		}
7912 		break;
7913 	case DVACT_RESUME:
7914 		err = iwx_resume(sc);
7915 		if (err)
7916 			printf("%s: could not initialize hardware\n",
7917 			    DEVNAME(sc));
7918 		break;
7919 	case DVACT_WAKEUP:
7920 		/* Hardware should be up at this point. */
7921 		if (iwx_set_hw_ready(sc))
7922 			task_add(systq, &sc->init_task);
7923 		break;
7924 	}
7925 
7926 	return 0;
7927 }
7928 
7929 struct cfdriver iwx_cd = {
7930 	NULL, "iwx", DV_IFNET
7931 };
7932 
7933 struct cfattach iwx_ca = {
7934 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
7935 	NULL, iwx_activate
7936 };
7937