xref: /netbsd-src/sys/dev/pci/if_iwm.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /*	$NetBSD: if_iwm.c,v 1.70 2017/02/02 10:05:35 nonaka Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
3 #define IEEE80211_NO_HT
4 /*
5  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
6  *   Author: Stefan Sperling <stsp@openbsd.org>
7  * Copyright (c) 2014 Fixup Software Ltd.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.70 2017/02/02 10:05:35 nonaka Exp $");
111 
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123 
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <sys/intr.h>
129 
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134 
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140 
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143 
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
150 
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153 
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x)	do { ; } while (0)
160 #define DPRINTFN(n, x)	do { ; } while (0)
161 #endif
162 
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165 
166 static const uint8_t iwm_nvm_channels[] = {
167 	/* 2.4 GHz */
168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 	/* 5 GHz */
170 	36, 40, 44, 48, 52, 56, 60, 64,
171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 	149, 153, 157, 161, 165
173 };
174 
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 	/* 2.4 GHz */
177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 	/* 5 GHz */
179 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 	149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183 
184 #define IWM_NUM_2GHZ_CHANNELS	14
185 
186 static const struct iwm_rate {
187 	uint8_t rate;
188 	uint8_t plcp;
189 	uint8_t ht_plcp;
190 } iwm_rates[] = {
191 		/* Legacy */		/* HT */
192 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
193 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
195 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
197 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
198 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
199 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
200 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
201 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
202 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
203 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
204 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK	0
207 #define IWM_RIDX_OFDM	4
208 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211 
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 	IWM_RATE_MCS_0_INDEX,
216 	IWM_RATE_MCS_1_INDEX,
217 	IWM_RATE_MCS_2_INDEX,
218 	IWM_RATE_MCS_3_INDEX,
219 	IWM_RATE_MCS_4_INDEX,
220 	IWM_RATE_MCS_5_INDEX,
221 	IWM_RATE_MCS_6_INDEX,
222 	IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225 
226 struct iwm_nvm_section {
227 	uint16_t length;
228 	uint8_t *data;
229 };
230 
231 struct iwm_newstate_state {
232 	struct work ns_wk;
233 	enum ieee80211_state ns_nstate;
234 	int ns_arg;
235 	int ns_generation;
236 };
237 
238 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int	iwm_firmware_store_section(struct iwm_softc *,
240 		    enum iwm_ucode_type, uint8_t *, size_t);
241 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int	iwm_nic_lock(struct iwm_softc *);
252 static void	iwm_nic_unlock(struct iwm_softc *);
253 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 		    uint32_t);
255 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 		    bus_size_t, bus_size_t);
259 static void	iwm_dma_contig_free(struct iwm_dma_info *);
260 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void	iwm_disable_rx_dma(struct iwm_softc *);
262 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 		    int);
266 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void	iwm_enable_rfkill_int(struct iwm_softc *);
269 static int	iwm_check_rfkill(struct iwm_softc *);
270 static void	iwm_enable_interrupts(struct iwm_softc *);
271 static void	iwm_restore_interrupts(struct iwm_softc *);
272 static void	iwm_disable_interrupts(struct iwm_softc *);
273 static void	iwm_ict_reset(struct iwm_softc *);
274 static int	iwm_set_hw_ready(struct iwm_softc *);
275 static int	iwm_prepare_card_hw(struct iwm_softc *);
276 static void	iwm_apm_config(struct iwm_softc *);
277 static int	iwm_apm_init(struct iwm_softc *);
278 static void	iwm_apm_stop(struct iwm_softc *);
279 static int	iwm_allow_mcast(struct iwm_softc *);
280 static int	iwm_start_hw(struct iwm_softc *);
281 static void	iwm_stop_device(struct iwm_softc *);
282 static void	iwm_nic_config(struct iwm_softc *);
283 static int	iwm_nic_rx_init(struct iwm_softc *);
284 static int	iwm_nic_tx_init(struct iwm_softc *);
285 static int	iwm_nic_init(struct iwm_softc *);
286 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int	iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 		iwm_phy_db_get_section(struct iwm_softc *,
290 		    enum iwm_phy_db_section_type, uint16_t);
291 static int	iwm_phy_db_set_section(struct iwm_softc *,
292 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int	iwm_is_valid_channel(uint16_t);
294 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 		    uint8_t **, uint16_t *, uint16_t);
299 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 		    void *);
301 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 		    enum iwm_phy_db_section_type, uint8_t);
303 static int	iwm_send_phy_db_data(struct iwm_softc *);
304 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 		    struct iwm_time_event_cmd_v1 *);
306 static int	iwm_send_time_event_cmd(struct iwm_softc *,
307 		    const struct iwm_time_event_cmd_v2 *);
308 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 		    uint32_t, uint32_t);
310 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 		    uint16_t, uint8_t *, uint16_t *);
312 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 		    uint16_t *, size_t);
314 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 		    const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void	iwm_setup_ht_rates(struct iwm_softc *);
318 static void	iwm_htprot_task(void *);
319 static void	iwm_update_htprot(struct ieee80211com *,
320 		    struct ieee80211_node *);
321 static int	iwm_ampdu_rx_start(struct ieee80211com *,
322 		    struct ieee80211_node *, uint8_t);
323 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
324 		    struct ieee80211_node *, uint8_t);
325 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 		    uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int	iwm_ampdu_tx_start(struct ieee80211com *,
329 		    struct ieee80211_node *, uint8_t);
330 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
331 		    struct ieee80211_node *, uint8_t);
332 #endif
333 static void	iwm_ba_task(void *);
334 #endif
335 
336 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 		    const uint16_t *, const uint16_t *, const uint16_t *,
338 		    const uint16_t *, const uint16_t *);
339 static void	iwm_set_hw_address_8000(struct iwm_softc *,
340 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int	iwm_parse_nvm_sections(struct iwm_softc *,
342 		    struct iwm_nvm_section *);
343 static int	iwm_nvm_init(struct iwm_softc *);
344 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 		    const uint8_t *, uint32_t);
346 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 		    const uint8_t *, uint32_t);
348 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
350 		    struct iwm_fw_sects *, int , int *);
351 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
357 		    enum iwm_ucode_type);
358 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int	iwm_get_signal_strength(struct iwm_softc *,
362 		    struct iwm_rx_phy_info *);
363 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 		    struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 		    struct iwm_rx_data *);
368 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
369 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 		    struct iwm_rx_data *);
371 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 		    uint32_t);
373 #if 0
374 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 		    uint8_t, uint8_t);
382 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 		    uint8_t, uint8_t, uint32_t, uint32_t);
384 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 		    uint16_t, const void *);
387 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 		    uint32_t *);
389 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 		    const void *, uint32_t *);
391 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 		    uint16_t);
396 #endif
397 static const struct iwm_rate *
398 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
401 		    struct ieee80211_node *, int);
402 static void	iwm_led_enable(struct iwm_softc *);
403 static void	iwm_led_disable(struct iwm_softc *);
404 static int	iwm_led_is_enabled(struct iwm_softc *);
405 static void	iwm_led_blink_timeout(void *);
406 static void	iwm_led_blink_start(struct iwm_softc *);
407 static void	iwm_led_blink_stop(struct iwm_softc *);
408 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 		    struct iwm_beacon_filter_cmd *);
410 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 		    int);
414 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 		    struct iwm_mac_power_cmd *);
416 static int	iwm_power_mac_update_mode(struct iwm_softc *,
417 		    struct iwm_node *);
418 static int	iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int	iwm_disable_beacon_filter(struct iwm_softc *);
423 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int	iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 		    struct iwm_scan_channel_cfg_lmac *, int);
433 static int	iwm_fill_probe_req(struct iwm_softc *,
434 		    struct iwm_scan_probe_req *);
435 static int	iwm_lmac_scan(struct iwm_softc *);
436 static int	iwm_config_umac_scan(struct iwm_softc *);
437 static int	iwm_umac_scan(struct iwm_softc *);
438 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 		    int *);
441 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 		    struct iwm_mac_data_sta *, int);
445 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 		    uint32_t, int);
447 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int	iwm_auth(struct iwm_softc *);
449 static int	iwm_assoc(struct iwm_softc *);
450 static void	iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void	iwm_setrates_task(void *);
453 static int	iwm_setrates(struct iwm_node *);
454 #endif
455 static int	iwm_media_change(struct ifnet *);
456 static int	iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
457 		    int);
458 static void	iwm_newstate_cb(struct work *, void *);
459 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
460 static void	iwm_endscan(struct iwm_softc *);
461 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
462 		    struct ieee80211_node *);
463 static int	iwm_sf_config(struct iwm_softc *, int);
464 static int	iwm_send_bt_init_conf(struct iwm_softc *);
465 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
466 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
467 static int	iwm_init_hw(struct iwm_softc *);
468 static int	iwm_init(struct ifnet *);
469 static void	iwm_start(struct ifnet *);
470 static void	iwm_stop(struct ifnet *, int);
471 static void	iwm_watchdog(struct ifnet *);
472 static int	iwm_ioctl(struct ifnet *, u_long, void *);
473 #ifdef IWM_DEBUG
474 static const char *iwm_desc_lookup(uint32_t);
475 static void	iwm_nic_error(struct iwm_softc *);
476 static void	iwm_nic_umac_error(struct iwm_softc *);
477 #endif
478 static void	iwm_notif_intr(struct iwm_softc *);
479 static int	iwm_intr(void *);
480 static void	iwm_softintr(void *);
481 static int	iwm_preinit(struct iwm_softc *);
482 static void	iwm_attach_hook(device_t);
483 static void	iwm_attach(device_t, device_t, void *);
484 #if 0
485 static void	iwm_init_task(void *);
486 static int	iwm_activate(device_t, enum devact);
487 static void	iwm_wakeup(struct iwm_softc *);
488 #endif
489 static void	iwm_radiotap_attach(struct iwm_softc *);
490 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
491 
492 static int iwm_sysctl_root_num;
493 static int iwm_lar_disable;
494 
495 #ifndef	IWM_DEFAULT_MCC
496 #define	IWM_DEFAULT_MCC	"ZZ"
497 #endif
498 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
499 
500 static int
501 iwm_firmload(struct iwm_softc *sc)
502 {
503 	struct iwm_fw_info *fw = &sc->sc_fw;
504 	firmware_handle_t fwh;
505 	int err;
506 
507 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
508 		return 0;
509 
510 	/* Open firmware image. */
511 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
512 	if (err) {
513 		aprint_error_dev(sc->sc_dev,
514 		    "could not get firmware handle %s\n", sc->sc_fwname);
515 		return err;
516 	}
517 
518 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
519 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
520 		fw->fw_rawdata = NULL;
521 	}
522 
523 	fw->fw_rawsize = firmware_get_size(fwh);
524 	/*
525 	 * Well, this is how the Linux driver checks it ....
526 	 */
527 	if (fw->fw_rawsize < sizeof(uint32_t)) {
528 		aprint_error_dev(sc->sc_dev,
529 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
530 		err = EINVAL;
531 		goto out;
532 	}
533 
534 	/* Read the firmware. */
535 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
536 	if (fw->fw_rawdata == NULL) {
537 		aprint_error_dev(sc->sc_dev,
538 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
539 		err = ENOMEM;
540 		goto out;
541 	}
542 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
543 	if (err) {
544 		aprint_error_dev(sc->sc_dev,
545 		    "could not read firmware %s\n", sc->sc_fwname);
546 		goto out;
547 	}
548 
549 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
550  out:
551 	/* caller will release memory, if necessary */
552 
553 	firmware_close(fwh);
554 	return err;
555 }
556 
557 /*
558  * just maintaining status quo.
559  */
560 static void
561 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
562 {
563 	struct ieee80211com *ic = &sc->sc_ic;
564 	struct ieee80211_frame *wh;
565 	uint8_t subtype;
566 
567 	wh = mtod(m, struct ieee80211_frame *);
568 
569 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
570 		return;
571 
572 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
573 
574 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
575 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
576 		return;
577 
578 	int chan = le32toh(sc->sc_last_phy_info.channel);
579 	if (chan < __arraycount(ic->ic_channels))
580 		ic->ic_curchan = &ic->ic_channels[chan];
581 }
582 
583 static int
584 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
585 {
586 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
587 
588 	if (dlen < sizeof(*l) ||
589 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
590 		return EINVAL;
591 
592 	/* we don't actually store anything for now, always use s/w crypto */
593 
594 	return 0;
595 }
596 
597 static int
598 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
599     uint8_t *data, size_t dlen)
600 {
601 	struct iwm_fw_sects *fws;
602 	struct iwm_fw_onesect *fwone;
603 
604 	if (type >= IWM_UCODE_TYPE_MAX)
605 		return EINVAL;
606 	if (dlen < sizeof(uint32_t))
607 		return EINVAL;
608 
609 	fws = &sc->sc_fw.fw_sects[type];
610 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
611 		return EINVAL;
612 
613 	fwone = &fws->fw_sect[fws->fw_count];
614 
615 	/* first 32bit are device load offset */
616 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
617 
618 	/* rest is data */
619 	fwone->fws_data = data + sizeof(uint32_t);
620 	fwone->fws_len = dlen - sizeof(uint32_t);
621 
622 	/* for freeing the buffer during driver unload */
623 	fwone->fws_alloc = data;
624 	fwone->fws_allocsize = dlen;
625 
626 	fws->fw_count++;
627 	fws->fw_totlen += fwone->fws_len;
628 
629 	return 0;
630 }
631 
632 struct iwm_tlv_calib_data {
633 	uint32_t ucode_type;
634 	struct iwm_tlv_calib_ctrl calib;
635 } __packed;
636 
637 static int
638 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
639 {
640 	const struct iwm_tlv_calib_data *def_calib = data;
641 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
642 
643 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
644 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
645 		    DEVNAME(sc), ucode_type));
646 		return EINVAL;
647 	}
648 
649 	sc->sc_default_calib[ucode_type].flow_trigger =
650 	    def_calib->calib.flow_trigger;
651 	sc->sc_default_calib[ucode_type].event_trigger =
652 	    def_calib->calib.event_trigger;
653 
654 	return 0;
655 }
656 
657 static int
658 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
659 {
660 	struct iwm_fw_info *fw = &sc->sc_fw;
661 	struct iwm_tlv_ucode_header *uhdr;
662 	struct iwm_ucode_tlv tlv;
663 	enum iwm_ucode_tlv_type tlv_type;
664 	uint8_t *data;
665 	int err, status;
666 	size_t len;
667 
668 	if (ucode_type != IWM_UCODE_TYPE_INIT &&
669 	    fw->fw_status == IWM_FW_STATUS_DONE)
670 		return 0;
671 
672 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
673 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
674 	} else {
675 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
676 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
677 	}
678 	status = fw->fw_status;
679 
680 	if (status == IWM_FW_STATUS_DONE)
681 		return 0;
682 
683 	err = iwm_firmload(sc);
684 	if (err) {
685 		aprint_error_dev(sc->sc_dev,
686 		    "could not read firmware %s (error %d)\n",
687 		    sc->sc_fwname, err);
688 		goto out;
689 	}
690 
691 	sc->sc_capaflags = 0;
692 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
693 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
694 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
695 
696 	uhdr = (void *)fw->fw_rawdata;
697 	if (*(uint32_t *)fw->fw_rawdata != 0
698 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
699 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
700 		    sc->sc_fwname);
701 		err = EINVAL;
702 		goto out;
703 	}
704 
705 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
706 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
707 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
708 	    IWM_UCODE_API(le32toh(uhdr->ver)));
709 	data = uhdr->data;
710 	len = fw->fw_rawsize - sizeof(*uhdr);
711 
712 	while (len >= sizeof(tlv)) {
713 		size_t tlv_len;
714 		void *tlv_data;
715 
716 		memcpy(&tlv, data, sizeof(tlv));
717 		tlv_len = le32toh(tlv.length);
718 		tlv_type = le32toh(tlv.type);
719 
720 		len -= sizeof(tlv);
721 		data += sizeof(tlv);
722 		tlv_data = data;
723 
724 		if (len < tlv_len) {
725 			aprint_error_dev(sc->sc_dev,
726 			    "firmware too short: %zu bytes\n", len);
727 			err = EINVAL;
728 			goto parse_out;
729 		}
730 
731 		switch (tlv_type) {
732 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
733 			if (tlv_len < sizeof(uint32_t)) {
734 				err = EINVAL;
735 				goto parse_out;
736 			}
737 			sc->sc_capa_max_probe_len
738 			    = le32toh(*(uint32_t *)tlv_data);
739 			/* limit it to something sensible */
740 			if (sc->sc_capa_max_probe_len >
741 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
742 				err = EINVAL;
743 				goto parse_out;
744 			}
745 			break;
746 		case IWM_UCODE_TLV_PAN:
747 			if (tlv_len) {
748 				err = EINVAL;
749 				goto parse_out;
750 			}
751 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
752 			break;
753 		case IWM_UCODE_TLV_FLAGS:
754 			if (tlv_len < sizeof(uint32_t)) {
755 				err = EINVAL;
756 				goto parse_out;
757 			}
758 			/*
759 			 * Apparently there can be many flags, but Linux driver
760 			 * parses only the first one, and so do we.
761 			 *
762 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
763 			 * Intentional or a bug?  Observations from
764 			 * current firmware file:
765 			 *  1) TLV_PAN is parsed first
766 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
767 			 * ==> this resets TLV_PAN to itself... hnnnk
768 			 */
769 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
770 			break;
771 		case IWM_UCODE_TLV_CSCHEME:
772 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
773 			if (err)
774 				goto parse_out;
775 			break;
776 		case IWM_UCODE_TLV_NUM_OF_CPU: {
777 			uint32_t num_cpu;
778 			if (tlv_len != sizeof(uint32_t)) {
779 				err = EINVAL;
780 				goto parse_out;
781 			}
782 			num_cpu = le32toh(*(uint32_t *)tlv_data);
783 			if (num_cpu < 1 || num_cpu > 2) {
784 				err = EINVAL;
785 				goto parse_out;
786 			}
787 			break;
788 		}
789 		case IWM_UCODE_TLV_SEC_RT:
790 			err = iwm_firmware_store_section(sc,
791 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
792 			if (err)
793 				goto parse_out;
794 			break;
795 		case IWM_UCODE_TLV_SEC_INIT:
796 			err = iwm_firmware_store_section(sc,
797 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
798 			if (err)
799 				goto parse_out;
800 			break;
801 		case IWM_UCODE_TLV_SEC_WOWLAN:
802 			err = iwm_firmware_store_section(sc,
803 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
804 			if (err)
805 				goto parse_out;
806 			break;
807 		case IWM_UCODE_TLV_DEF_CALIB:
808 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
809 				err = EINVAL;
810 				goto parse_out;
811 			}
812 			err = iwm_set_default_calib(sc, tlv_data);
813 			if (err)
814 				goto parse_out;
815 			break;
816 		case IWM_UCODE_TLV_PHY_SKU:
817 			if (tlv_len != sizeof(uint32_t)) {
818 				err = EINVAL;
819 				goto parse_out;
820 			}
821 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
822 			break;
823 
824 		case IWM_UCODE_TLV_API_CHANGES_SET: {
825 			struct iwm_ucode_api *api;
826 			if (tlv_len != sizeof(*api)) {
827 				err = EINVAL;
828 				goto parse_out;
829 			}
830 			api = (struct iwm_ucode_api *)tlv_data;
831 			/* Flags may exceed 32 bits in future firmware. */
832 			if (le32toh(api->api_index) > 0) {
833 				err = EINVAL;
834 				goto parse_out;
835 			}
836 			sc->sc_ucode_api = le32toh(api->api_flags);
837 			break;
838 		}
839 
840 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
841 			struct iwm_ucode_capa *capa;
842 			int idx, i;
843 			if (tlv_len != sizeof(*capa)) {
844 				err = EINVAL;
845 				goto parse_out;
846 			}
847 			capa = (struct iwm_ucode_capa *)tlv_data;
848 			idx = le32toh(capa->api_index);
849 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
850 				err = EINVAL;
851 				goto parse_out;
852 			}
853 			for (i = 0; i < 32; i++) {
854 				if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
855 					continue;
856 				setbit(sc->sc_enabled_capa, i + (32 * idx));
857 			}
858 			break;
859 		}
860 
861 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
862 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
863 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
864 			/* ignore, not used by current driver */
865 			break;
866 
867 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
868 			err = iwm_firmware_store_section(sc,
869 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
870 			    tlv_len);
871 			if (err)
872 				goto parse_out;
873 			break;
874 
875 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
876 			if (tlv_len != sizeof(uint32_t)) {
877 				err = EINVAL;
878 				goto parse_out;
879 			}
880 			sc->sc_capa_n_scan_channels =
881 			  le32toh(*(uint32_t *)tlv_data);
882 			break;
883 
884 		case IWM_UCODE_TLV_FW_VERSION:
885 			if (tlv_len != sizeof(uint32_t) * 3) {
886 				err = EINVAL;
887 				goto parse_out;
888 			}
889 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
890 			    "%d.%d.%d",
891 			    le32toh(((uint32_t *)tlv_data)[0]),
892 			    le32toh(((uint32_t *)tlv_data)[1]),
893 			    le32toh(((uint32_t *)tlv_data)[2]));
894 			break;
895 
896 		default:
897 			DPRINTF(("%s: unknown firmware section %d, abort\n",
898 			    DEVNAME(sc), tlv_type));
899 			err = EINVAL;
900 			goto parse_out;
901 		}
902 
903 		len -= roundup(tlv_len, 4);
904 		data += roundup(tlv_len, 4);
905 	}
906 
907 	KASSERT(err == 0);
908 
909  parse_out:
910 	if (err) {
911 		aprint_error_dev(sc->sc_dev,
912 		    "firmware parse error, section type %d\n", tlv_type);
913 	}
914 
915 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
916 		aprint_error_dev(sc->sc_dev,
917 		    "device uses unsupported power ops\n");
918 		err = ENOTSUP;
919 	}
920 
921  out:
922 	if (err)
923 		fw->fw_status = IWM_FW_STATUS_NONE;
924 	else
925 		fw->fw_status = IWM_FW_STATUS_DONE;
926 	wakeup(&sc->sc_fw);
927 
928 	if (err && fw->fw_rawdata != NULL) {
929 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
930 		fw->fw_rawdata = NULL;
931 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
932 		/* don't touch fw->fw_status */
933 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
934 	}
935 	return err;
936 }
937 
938 static uint32_t
939 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
940 {
941 	IWM_WRITE(sc,
942 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
943 	IWM_BARRIER_READ_WRITE(sc);
944 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
945 }
946 
947 static void
948 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
949 {
950 	IWM_WRITE(sc,
951 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
952 	IWM_BARRIER_WRITE(sc);
953 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
954 }
955 
956 #ifdef IWM_DEBUG
957 static int
958 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
959 {
960 	int offs;
961 	uint32_t *vals = buf;
962 
963 	if (iwm_nic_lock(sc)) {
964 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
965 		for (offs = 0; offs < dwords; offs++)
966 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
967 		iwm_nic_unlock(sc);
968 		return 0;
969 	}
970 	return EBUSY;
971 }
972 #endif
973 
974 static int
975 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
976 {
977 	int offs;
978 	const uint32_t *vals = buf;
979 
980 	if (iwm_nic_lock(sc)) {
981 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
982 		/* WADDR auto-increments */
983 		for (offs = 0; offs < dwords; offs++) {
984 			uint32_t val = vals ? vals[offs] : 0;
985 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
986 		}
987 		iwm_nic_unlock(sc);
988 		return 0;
989 	}
990 	return EBUSY;
991 }
992 
993 static int
994 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
995 {
996 	return iwm_write_mem(sc, addr, &val, 1);
997 }
998 
999 static int
1000 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1001     int timo)
1002 {
1003 	for (;;) {
1004 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1005 			return 1;
1006 		}
1007 		if (timo < 10) {
1008 			return 0;
1009 		}
1010 		timo -= 10;
1011 		DELAY(10);
1012 	}
1013 }
1014 
1015 static int
1016 iwm_nic_lock(struct iwm_softc *sc)
1017 {
1018 	int rv = 0;
1019 
1020 	if (sc->sc_cmd_hold_nic_awake)
1021 		return 1;
1022 
1023 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1024 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1025 
1026 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1027 		DELAY(2);
1028 
1029 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1030 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1031 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1032 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1033 		rv = 1;
1034 	} else {
1035 		DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1036 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1037 	}
1038 
1039 	return rv;
1040 }
1041 
1042 static void
1043 iwm_nic_unlock(struct iwm_softc *sc)
1044 {
1045 
1046 	if (sc->sc_cmd_hold_nic_awake)
1047 		return;
1048 
1049 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1050 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1051 }
1052 
1053 static void
1054 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1055     uint32_t mask)
1056 {
1057 	uint32_t val;
1058 
1059 	/* XXX: no error path? */
1060 	if (iwm_nic_lock(sc)) {
1061 		val = iwm_read_prph(sc, reg) & mask;
1062 		val |= bits;
1063 		iwm_write_prph(sc, reg, val);
1064 		iwm_nic_unlock(sc);
1065 	}
1066 }
1067 
1068 static void
1069 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1070 {
1071 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1072 }
1073 
1074 static void
1075 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1076 {
1077 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1078 }
1079 
1080 static int
1081 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1082     bus_size_t size, bus_size_t alignment)
1083 {
1084 	int nsegs, err;
1085 	void *va;
1086 
1087 	dma->tag = tag;
1088 	dma->size = size;
1089 
1090 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1091 	    &dma->map);
1092 	if (err)
1093 		goto fail;
1094 
1095 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1096 	    BUS_DMA_NOWAIT);
1097 	if (err)
1098 		goto fail;
1099 
1100 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1101 	if (err)
1102 		goto fail;
1103 	dma->vaddr = va;
1104 
1105 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1106 	    BUS_DMA_NOWAIT);
1107 	if (err)
1108 		goto fail;
1109 
1110 	memset(dma->vaddr, 0, size);
1111 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1112 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1113 
1114 	return 0;
1115 
1116 fail:	iwm_dma_contig_free(dma);
1117 	return err;
1118 }
1119 
1120 static void
1121 iwm_dma_contig_free(struct iwm_dma_info *dma)
1122 {
1123 	if (dma->map != NULL) {
1124 		if (dma->vaddr != NULL) {
1125 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1126 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1127 			bus_dmamap_unload(dma->tag, dma->map);
1128 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1129 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1130 			dma->vaddr = NULL;
1131 		}
1132 		bus_dmamap_destroy(dma->tag, dma->map);
1133 		dma->map = NULL;
1134 	}
1135 }
1136 
1137 static int
1138 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1139 {
1140 	bus_size_t size;
1141 	int i, err;
1142 
1143 	ring->cur = 0;
1144 
1145 	/* Allocate RX descriptors (256-byte aligned). */
1146 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1147 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1148 	if (err) {
1149 		aprint_error_dev(sc->sc_dev,
1150 		    "could not allocate RX ring DMA memory\n");
1151 		goto fail;
1152 	}
1153 	ring->desc = ring->desc_dma.vaddr;
1154 
1155 	/* Allocate RX status area (16-byte aligned). */
1156 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1157 	    sizeof(*ring->stat), 16);
1158 	if (err) {
1159 		aprint_error_dev(sc->sc_dev,
1160 		    "could not allocate RX status DMA memory\n");
1161 		goto fail;
1162 	}
1163 	ring->stat = ring->stat_dma.vaddr;
1164 
1165 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1166 		struct iwm_rx_data *data = &ring->data[i];
1167 
1168 		memset(data, 0, sizeof(*data));
1169 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1170 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1171 		    &data->map);
1172 		if (err) {
1173 			aprint_error_dev(sc->sc_dev,
1174 			    "could not create RX buf DMA map\n");
1175 			goto fail;
1176 		}
1177 
1178 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1179 		if (err)
1180 			goto fail;
1181 	}
1182 	return 0;
1183 
1184 fail:	iwm_free_rx_ring(sc, ring);
1185 	return err;
1186 }
1187 
1188 static void
1189 iwm_disable_rx_dma(struct iwm_softc *sc)
1190 {
1191 	int ntries;
1192 
1193 	if (iwm_nic_lock(sc)) {
1194 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1195 		for (ntries = 0; ntries < 1000; ntries++) {
1196 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1197 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1198 				break;
1199 			DELAY(10);
1200 		}
1201 		iwm_nic_unlock(sc);
1202 	}
1203 }
1204 
1205 void
1206 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1207 {
1208 	ring->cur = 0;
1209 	memset(ring->stat, 0, sizeof(*ring->stat));
1210 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1211 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1212 }
1213 
1214 static void
1215 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1216 {
1217 	int i;
1218 
1219 	iwm_dma_contig_free(&ring->desc_dma);
1220 	iwm_dma_contig_free(&ring->stat_dma);
1221 
1222 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1223 		struct iwm_rx_data *data = &ring->data[i];
1224 
1225 		if (data->m != NULL) {
1226 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1227 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1228 			bus_dmamap_unload(sc->sc_dmat, data->map);
1229 			m_freem(data->m);
1230 			data->m = NULL;
1231 		}
1232 		if (data->map != NULL) {
1233 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1234 			data->map = NULL;
1235 		}
1236 	}
1237 }
1238 
1239 static int
1240 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1241 {
1242 	bus_addr_t paddr;
1243 	bus_size_t size;
1244 	int i, err, nsegs;
1245 
1246 	ring->qid = qid;
1247 	ring->queued = 0;
1248 	ring->cur = 0;
1249 
1250 	/* Allocate TX descriptors (256-byte aligned). */
1251 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1252 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1253 	if (err) {
1254 		aprint_error_dev(sc->sc_dev,
1255 		    "could not allocate TX ring DMA memory\n");
1256 		goto fail;
1257 	}
1258 	ring->desc = ring->desc_dma.vaddr;
1259 
1260 	/*
1261 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1262 	 * to allocate commands space for other rings.
1263 	 */
1264 	if (qid > IWM_CMD_QUEUE)
1265 		return 0;
1266 
1267 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1268 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1269 	if (err) {
1270 		aprint_error_dev(sc->sc_dev,
1271 		    "could not allocate TX cmd DMA memory\n");
1272 		goto fail;
1273 	}
1274 	ring->cmd = ring->cmd_dma.vaddr;
1275 
1276 	paddr = ring->cmd_dma.paddr;
1277 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1278 		struct iwm_tx_data *data = &ring->data[i];
1279 		size_t mapsize;
1280 
1281 		data->cmd_paddr = paddr;
1282 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1283 		    + offsetof(struct iwm_tx_cmd, scratch);
1284 		paddr += sizeof(struct iwm_device_cmd);
1285 
1286 		/* FW commands may require more mapped space than packets. */
1287 		if (qid == IWM_CMD_QUEUE) {
1288 			mapsize = IWM_RBUF_SIZE;
1289 			nsegs = 1;
1290 		} else {
1291 			mapsize = MCLBYTES;
1292 			nsegs = IWM_NUM_OF_TBS - 2;
1293 		}
1294 		err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1295 		    0, BUS_DMA_NOWAIT, &data->map);
1296 		if (err) {
1297 			aprint_error_dev(sc->sc_dev,
1298 			    "could not create TX buf DMA map\n");
1299 			goto fail;
1300 		}
1301 	}
1302 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1303 	return 0;
1304 
1305 fail:	iwm_free_tx_ring(sc, ring);
1306 	return err;
1307 }
1308 
1309 static void
1310 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1311 {
1312 
1313 	if (!sc->apmg_wake_up_wa)
1314 		return;
1315 
1316 	if (!sc->sc_cmd_hold_nic_awake) {
1317 		aprint_error_dev(sc->sc_dev,
1318 		    "cmd_hold_nic_awake not set\n");
1319 		return;
1320 	}
1321 
1322 	sc->sc_cmd_hold_nic_awake = 0;
1323 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1324 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1325 }
1326 
1327 static int
1328 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1329 {
1330 	int ret;
1331 
1332 	/*
1333 	 * wake up the NIC to make sure that the firmware will see the host
1334 	 * command - we will let the NIC sleep once all the host commands
1335 	 * returned. This needs to be done only on NICs that have
1336 	 * apmg_wake_up_wa set.
1337 	 */
1338 	if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1339 
1340 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1341 		    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1342 
1343 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1344 		    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1345 		    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1346 		     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1347 		    15000);
1348 		if (ret == 0) {
1349 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1350 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1351 			aprint_error_dev(sc->sc_dev,
1352 			    "failed to wake NIC for hcmd\n");
1353 			return EIO;
1354 		}
1355 		sc->sc_cmd_hold_nic_awake = 1;
1356 	}
1357 
1358 	return 0;
1359 }
1360 static void
1361 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1362 {
1363 	int i;
1364 
1365 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1366 		struct iwm_tx_data *data = &ring->data[i];
1367 
1368 		if (data->m != NULL) {
1369 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1370 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1371 			bus_dmamap_unload(sc->sc_dmat, data->map);
1372 			m_freem(data->m);
1373 			data->m = NULL;
1374 		}
1375 	}
1376 	/* Clear TX descriptors. */
1377 	memset(ring->desc, 0, ring->desc_dma.size);
1378 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1379 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1380 	sc->qfullmsk &= ~(1 << ring->qid);
1381 	ring->queued = 0;
1382 	ring->cur = 0;
1383 
1384 	if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1385 		iwm_clear_cmd_in_flight(sc);
1386 }
1387 
1388 static void
1389 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1390 {
1391 	int i;
1392 
1393 	iwm_dma_contig_free(&ring->desc_dma);
1394 	iwm_dma_contig_free(&ring->cmd_dma);
1395 
1396 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1397 		struct iwm_tx_data *data = &ring->data[i];
1398 
1399 		if (data->m != NULL) {
1400 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1401 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1402 			bus_dmamap_unload(sc->sc_dmat, data->map);
1403 			m_freem(data->m);
1404 			data->m = NULL;
1405 		}
1406 		if (data->map != NULL) {
1407 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1408 			data->map = NULL;
1409 		}
1410 	}
1411 }
1412 
1413 static void
1414 iwm_enable_rfkill_int(struct iwm_softc *sc)
1415 {
1416 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1417 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1418 }
1419 
1420 static int
1421 iwm_check_rfkill(struct iwm_softc *sc)
1422 {
1423 	uint32_t v;
1424 	int s;
1425 	int rv;
1426 
1427 	s = splnet();
1428 
1429 	/*
1430 	 * "documentation" is not really helpful here:
1431 	 *  27:	HW_RF_KILL_SW
1432 	 *	Indicates state of (platform's) hardware RF-Kill switch
1433 	 *
1434 	 * But apparently when it's off, it's on ...
1435 	 */
1436 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1437 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1438 	if (rv) {
1439 		sc->sc_flags |= IWM_FLAG_RFKILL;
1440 	} else {
1441 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1442 	}
1443 
1444 	splx(s);
1445 	return rv;
1446 }
1447 
1448 static void
1449 iwm_enable_interrupts(struct iwm_softc *sc)
1450 {
1451 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1452 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1453 }
1454 
1455 static void
1456 iwm_restore_interrupts(struct iwm_softc *sc)
1457 {
1458 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1459 }
1460 
1461 static void
1462 iwm_disable_interrupts(struct iwm_softc *sc)
1463 {
1464 	int s = splnet();
1465 
1466 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1467 
1468 	/* acknowledge all interrupts */
1469 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1470 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1471 
1472 	splx(s);
1473 }
1474 
1475 static void
1476 iwm_ict_reset(struct iwm_softc *sc)
1477 {
1478 	iwm_disable_interrupts(sc);
1479 
1480 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1481 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1482 	    BUS_DMASYNC_PREWRITE);
1483 	sc->ict_cur = 0;
1484 
1485 	/* Set physical address of ICT (4KB aligned). */
1486 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1487 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1488 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1489 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1490 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1491 
1492 	/* Switch to ICT interrupt mode in driver. */
1493 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1494 
1495 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1496 	iwm_enable_interrupts(sc);
1497 }
1498 
1499 #define IWM_HW_READY_TIMEOUT 50
1500 static int
1501 iwm_set_hw_ready(struct iwm_softc *sc)
1502 {
1503 	int ready;
1504 
1505 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1506 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1507 
1508 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1509 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1510 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1511 	    IWM_HW_READY_TIMEOUT);
1512 	if (ready)
1513 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1514 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1515 
1516 	return ready;
1517 }
1518 #undef IWM_HW_READY_TIMEOUT
1519 
1520 static int
1521 iwm_prepare_card_hw(struct iwm_softc *sc)
1522 {
1523 	int t = 0;
1524 
1525 	if (iwm_set_hw_ready(sc))
1526 		return 0;
1527 
1528 	DELAY(100);
1529 
1530 	/* If HW is not ready, prepare the conditions to check again */
1531 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1532 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1533 
1534 	do {
1535 		if (iwm_set_hw_ready(sc))
1536 			return 0;
1537 		DELAY(200);
1538 		t += 200;
1539 	} while (t < 150000);
1540 
1541 	return ETIMEDOUT;
1542 }
1543 
1544 static void
1545 iwm_apm_config(struct iwm_softc *sc)
1546 {
1547 	pcireg_t reg;
1548 
1549 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1550 	    sc->sc_cap_off + PCIE_LCSR);
1551 	if (reg & PCIE_LCSR_ASPM_L1) {
1552 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1553 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1554 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1555 	} else {
1556 		/* ... and "Enabling" here */
1557 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1558 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1559 	}
1560 }
1561 
1562 /*
1563  * Start up NIC's basic functionality after it has been reset
1564  * e.g. after platform boot or shutdown.
1565  * NOTE:  This does not load uCode nor start the embedded processor
1566  */
1567 static int
1568 iwm_apm_init(struct iwm_softc *sc)
1569 {
1570 	int err = 0;
1571 
1572 	/* Disable L0S exit timer (platform NMI workaround) */
1573 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1574 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1575 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1576 	}
1577 
1578 	/*
1579 	 * Disable L0s without affecting L1;
1580 	 *  don't wait for ICH L0s (ICH bug W/A)
1581 	 */
1582 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1583 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1584 
1585 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1586 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1587 
1588 	/*
1589 	 * Enable HAP INTA (interrupt from management bus) to
1590 	 * wake device's PCI Express link L1a -> L0s
1591 	 */
1592 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1593 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1594 
1595 	iwm_apm_config(sc);
1596 
1597 #if 0 /* not for 7k/8k */
1598 	/* Configure analog phase-lock-loop before activating to D0A */
1599 	if (trans->cfg->base_params->pll_cfg_val)
1600 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1601 		    trans->cfg->base_params->pll_cfg_val);
1602 #endif
1603 
1604 	/*
1605 	 * Set "initialization complete" bit to move adapter from
1606 	 * D0U* --> D0A* (powered-up active) state.
1607 	 */
1608 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1609 
1610 	/*
1611 	 * Wait for clock stabilization; once stabilized, access to
1612 	 * device-internal resources is supported, e.g. iwm_write_prph()
1613 	 * and accesses to uCode SRAM.
1614 	 */
1615 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1616 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1617 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1618 		aprint_error_dev(sc->sc_dev,
1619 		    "timeout waiting for clock stabilization\n");
1620 		err = ETIMEDOUT;
1621 		goto out;
1622 	}
1623 
1624 	if (sc->host_interrupt_operation_mode) {
1625 		/*
1626 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1627 		 * only check host_interrupt_operation_mode even if this is
1628 		 * not related to host_interrupt_operation_mode.
1629 		 *
1630 		 * Enable the oscillator to count wake up time for L1 exit. This
1631 		 * consumes slightly more power (100uA) - but allows to be sure
1632 		 * that we wake up from L1 on time.
1633 		 *
1634 		 * This looks weird: read twice the same register, discard the
1635 		 * value, set a bit, and yet again, read that same register
1636 		 * just to discard the value. But that's the way the hardware
1637 		 * seems to like it.
1638 		 */
1639 		iwm_read_prph(sc, IWM_OSC_CLK);
1640 		iwm_read_prph(sc, IWM_OSC_CLK);
1641 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1642 		iwm_read_prph(sc, IWM_OSC_CLK);
1643 		iwm_read_prph(sc, IWM_OSC_CLK);
1644 	}
1645 
1646 	/*
1647 	 * Enable DMA clock and wait for it to stabilize.
1648 	 *
1649 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1650 	 * do not disable clocks.  This preserves any hardware bits already
1651 	 * set by default in "CLK_CTRL_REG" after reset.
1652 	 */
1653 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1654 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1655 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1656 		DELAY(20);
1657 
1658 		/* Disable L1-Active */
1659 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1660 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1661 
1662 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1663 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1664 		    IWM_APMG_RTC_INT_STT_RFKILL);
1665 	}
1666  out:
1667 	if (err)
1668 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1669 	return err;
1670 }
1671 
1672 static void
1673 iwm_apm_stop(struct iwm_softc *sc)
1674 {
1675 	/* stop device's busmaster DMA activity */
1676 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1677 
1678 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1679 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1680 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1681 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1682 	DPRINTF(("iwm apm stop\n"));
1683 }
1684 
1685 static int
1686 iwm_start_hw(struct iwm_softc *sc)
1687 {
1688 	int err;
1689 
1690 	err = iwm_prepare_card_hw(sc);
1691 	if (err)
1692 		return err;
1693 
1694 	/* Reset the entire device */
1695 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1696 	DELAY(10);
1697 
1698 	err = iwm_apm_init(sc);
1699 	if (err)
1700 		return err;
1701 
1702 	iwm_enable_rfkill_int(sc);
1703 	iwm_check_rfkill(sc);
1704 
1705 	return 0;
1706 }
1707 
1708 static void
1709 iwm_stop_device(struct iwm_softc *sc)
1710 {
1711 	int chnl, ntries;
1712 	int qid;
1713 
1714 	iwm_disable_interrupts(sc);
1715 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1716 
1717 	/* Deactivate TX scheduler. */
1718 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1719 
1720 	/* Stop all DMA channels. */
1721 	if (iwm_nic_lock(sc)) {
1722 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1723 			IWM_WRITE(sc,
1724 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1725 			for (ntries = 0; ntries < 200; ntries++) {
1726 				uint32_t r;
1727 
1728 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1729 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1730 				    chnl))
1731 					break;
1732 				DELAY(20);
1733 			}
1734 		}
1735 		iwm_nic_unlock(sc);
1736 	}
1737 	iwm_disable_rx_dma(sc);
1738 
1739 	iwm_reset_rx_ring(sc, &sc->rxq);
1740 
1741 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1742 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1743 
1744 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1745 		/* Power-down device's busmaster DMA clocks */
1746 		if (iwm_nic_lock(sc)) {
1747 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1748 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1749 			DELAY(5);
1750 			iwm_nic_unlock(sc);
1751 		}
1752 	}
1753 
1754 	/* Make sure (redundant) we've released our request to stay awake */
1755 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1756 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1757 
1758 	/* Stop the device, and put it in low power state */
1759 	iwm_apm_stop(sc);
1760 
1761 	/*
1762 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1763 	 * Clean again the interrupt here
1764 	 */
1765 	iwm_disable_interrupts(sc);
1766 
1767 	/* Reset the on-board processor. */
1768 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1769 
1770 	/* Even though we stop the HW we still want the RF kill interrupt. */
1771 	iwm_enable_rfkill_int(sc);
1772 	iwm_check_rfkill(sc);
1773 }
1774 
1775 static void
1776 iwm_nic_config(struct iwm_softc *sc)
1777 {
1778 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1779 	uint32_t reg_val = 0;
1780 
1781 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1782 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1783 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1784 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1785 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1786 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1787 
1788 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1789 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1790 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1791 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1792 
1793 	/* radio configuration */
1794 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1795 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1796 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1797 
1798 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1799 
1800 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1801 	    radio_cfg_step, radio_cfg_dash));
1802 
1803 	/*
1804 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1805 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1806 	 * to lose ownership and not being able to obtain it back.
1807 	 */
1808 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1809 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1810 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1811 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1812 	}
1813 }
1814 
1815 static int
1816 iwm_nic_rx_init(struct iwm_softc *sc)
1817 {
1818 	if (!iwm_nic_lock(sc))
1819 		return EBUSY;
1820 
1821 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1822 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1823 	    0, sc->rxq.stat_dma.size,
1824 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1825 
1826 	iwm_disable_rx_dma(sc);
1827 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1828 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1829 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1830 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1831 
1832 	/* Set physical address of RX ring (256-byte aligned). */
1833 	IWM_WRITE(sc,
1834 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1835 
1836 	/* Set physical address of RX status (16-byte aligned). */
1837 	IWM_WRITE(sc,
1838 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1839 
1840 	/* Enable RX. */
1841 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1842 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1843 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1844 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1845 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1846 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1847 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1848 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1849 
1850 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1851 
1852 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1853 	if (sc->host_interrupt_operation_mode)
1854 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1855 
1856 	/*
1857 	 * This value should initially be 0 (before preparing any RBs),
1858 	 * and should be 8 after preparing the first 8 RBs (for example).
1859 	 */
1860 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1861 
1862 	iwm_nic_unlock(sc);
1863 
1864 	return 0;
1865 }
1866 
1867 static int
1868 iwm_nic_tx_init(struct iwm_softc *sc)
1869 {
1870 	int qid;
1871 
1872 	if (!iwm_nic_lock(sc))
1873 		return EBUSY;
1874 
1875 	/* Deactivate TX scheduler. */
1876 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1877 
1878 	/* Set physical address of "keep warm" page (16-byte aligned). */
1879 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1880 
1881 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1882 		struct iwm_tx_ring *txq = &sc->txq[qid];
1883 
1884 		/* Set physical address of TX ring (256-byte aligned). */
1885 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1886 		    txq->desc_dma.paddr >> 8);
1887 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1888 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1889 	}
1890 
1891 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1892 
1893 	iwm_nic_unlock(sc);
1894 
1895 	return 0;
1896 }
1897 
1898 static int
1899 iwm_nic_init(struct iwm_softc *sc)
1900 {
1901 	int err;
1902 
1903 	iwm_apm_init(sc);
1904 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1905 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1906 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1907 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1908 	}
1909 
1910 	iwm_nic_config(sc);
1911 
1912 	err = iwm_nic_rx_init(sc);
1913 	if (err)
1914 		return err;
1915 
1916 	err = iwm_nic_tx_init(sc);
1917 	if (err)
1918 		return err;
1919 
1920 	DPRINTF(("shadow registers enabled\n"));
1921 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1922 
1923 	return 0;
1924 }
1925 
1926 static const uint8_t iwm_ac_to_tx_fifo[] = {
1927 	IWM_TX_FIFO_VO,
1928 	IWM_TX_FIFO_VI,
1929 	IWM_TX_FIFO_BE,
1930 	IWM_TX_FIFO_BK,
1931 };
1932 
1933 static int
1934 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1935 {
1936 	if (!iwm_nic_lock(sc)) {
1937 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1938 		return EBUSY;
1939 	}
1940 
1941 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1942 
1943 	if (qid == IWM_CMD_QUEUE) {
1944 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1945 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1946 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1947 
1948 		iwm_nic_unlock(sc);
1949 
1950 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1951 
1952 		if (!iwm_nic_lock(sc))
1953 			return EBUSY;
1954 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1955 		iwm_nic_unlock(sc);
1956 
1957 		iwm_write_mem32(sc,
1958 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1959 
1960 		/* Set scheduler window size and frame limit. */
1961 		iwm_write_mem32(sc,
1962 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1963 		    sizeof(uint32_t),
1964 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1965 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1966 		    ((IWM_FRAME_LIMIT
1967 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1968 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1969 
1970 		if (!iwm_nic_lock(sc))
1971 			return EBUSY;
1972 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1973 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1974 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1975 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1976 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1977 	} else {
1978 		struct iwm_scd_txq_cfg_cmd cmd;
1979 		int err;
1980 
1981 		iwm_nic_unlock(sc);
1982 
1983 		memset(&cmd, 0, sizeof(cmd));
1984 		cmd.scd_queue = qid;
1985 		cmd.enable = 1;
1986 		cmd.sta_id = sta_id;
1987 		cmd.tx_fifo = fifo;
1988 		cmd.aggregate = 0;
1989 		cmd.window = IWM_FRAME_LIMIT;
1990 
1991 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1992 		    &cmd);
1993 		if (err)
1994 			return err;
1995 
1996 		if (!iwm_nic_lock(sc))
1997 			return EBUSY;
1998 	}
1999 
2000 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2001 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2002 
2003 	iwm_nic_unlock(sc);
2004 
2005 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2006 
2007 	return 0;
2008 }
2009 
2010 static int
2011 iwm_post_alive(struct iwm_softc *sc)
2012 {
2013 	int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2014 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2015 	int err, chnl;
2016 	uint32_t base;
2017 
2018 	if (!iwm_nic_lock(sc))
2019 		return EBUSY;
2020 
2021 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2022 	if (sc->sched_base != base) {
2023 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2024 		    DEVNAME(sc), sc->sched_base, base));
2025 		sc->sched_base = base;
2026 	}
2027 
2028 	iwm_nic_unlock(sc);
2029 
2030 	iwm_ict_reset(sc);
2031 
2032 	/* Clear TX scheduler state in SRAM. */
2033 	err = iwm_write_mem(sc,
2034 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2035 	if (err)
2036 		return err;
2037 
2038 	if (!iwm_nic_lock(sc))
2039 		return EBUSY;
2040 
2041 	/* Set physical address of TX scheduler rings (1KB aligned). */
2042 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2043 
2044 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2045 
2046 	iwm_nic_unlock(sc);
2047 
2048 	/* enable command channel */
2049 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2050 	if (err)
2051 		return err;
2052 
2053 	if (!iwm_nic_lock(sc))
2054 		return EBUSY;
2055 
2056 	/* Activate TX scheduler. */
2057 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2058 
2059 	/* Enable DMA channels. */
2060 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2061 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2062 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2063 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2064 	}
2065 
2066 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2067 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2068 
2069 	/* Enable L1-Active */
2070 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2071 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2072 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2073 	}
2074 
2075 	iwm_nic_unlock(sc);
2076 
2077 	return 0;
2078 }
2079 
2080 static struct iwm_phy_db_entry *
2081 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2082     uint16_t chg_id)
2083 {
2084 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2085 
2086 	if (type >= IWM_PHY_DB_MAX)
2087 		return NULL;
2088 
2089 	switch (type) {
2090 	case IWM_PHY_DB_CFG:
2091 		return &phy_db->cfg;
2092 	case IWM_PHY_DB_CALIB_NCH:
2093 		return &phy_db->calib_nch;
2094 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2095 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2096 			return NULL;
2097 		return &phy_db->calib_ch_group_papd[chg_id];
2098 	case IWM_PHY_DB_CALIB_CHG_TXP:
2099 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2100 			return NULL;
2101 		return &phy_db->calib_ch_group_txp[chg_id];
2102 	default:
2103 		return NULL;
2104 	}
2105 	return NULL;
2106 }
2107 
2108 static int
2109 iwm_phy_db_set_section(struct iwm_softc *sc,
2110     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2111 {
2112 	struct iwm_phy_db_entry *entry;
2113 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2114 	uint16_t chg_id = 0;
2115 
2116 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2117 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2118 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2119 
2120 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2121 	if (!entry)
2122 		return EINVAL;
2123 
2124 	if (entry->data)
2125 		kmem_intr_free(entry->data, entry->size);
2126 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2127 	if (!entry->data) {
2128 		entry->size = 0;
2129 		return ENOMEM;
2130 	}
2131 	memcpy(entry->data, phy_db_notif->data, size);
2132 	entry->size = size;
2133 
2134 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2135 	    __func__, __LINE__, type, size, entry->data));
2136 
2137 	return 0;
2138 }
2139 
2140 static int
2141 iwm_is_valid_channel(uint16_t ch_id)
2142 {
2143 	if (ch_id <= 14 ||
2144 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2145 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2146 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2147 		return 1;
2148 	return 0;
2149 }
2150 
2151 static uint8_t
2152 iwm_ch_id_to_ch_index(uint16_t ch_id)
2153 {
2154 	if (!iwm_is_valid_channel(ch_id))
2155 		return 0xff;
2156 
2157 	if (ch_id <= 14)
2158 		return ch_id - 1;
2159 	if (ch_id <= 64)
2160 		return (ch_id + 20) / 4;
2161 	if (ch_id <= 140)
2162 		return (ch_id - 12) / 4;
2163 	return (ch_id - 13) / 4;
2164 }
2165 
2166 
2167 static uint16_t
2168 iwm_channel_id_to_papd(uint16_t ch_id)
2169 {
2170 	if (!iwm_is_valid_channel(ch_id))
2171 		return 0xff;
2172 
2173 	if (1 <= ch_id && ch_id <= 14)
2174 		return 0;
2175 	if (36 <= ch_id && ch_id <= 64)
2176 		return 1;
2177 	if (100 <= ch_id && ch_id <= 140)
2178 		return 2;
2179 	return 3;
2180 }
2181 
2182 static uint16_t
2183 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2184 {
2185 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2186 	struct iwm_phy_db_chg_txp *txp_chg;
2187 	int i;
2188 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2189 
2190 	if (ch_index == 0xff)
2191 		return 0xff;
2192 
2193 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2194 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2195 		if (!txp_chg)
2196 			return 0xff;
2197 		/*
2198 		 * Looking for the first channel group the max channel
2199 		 * of which is higher than the requested channel.
2200 		 */
2201 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2202 			return i;
2203 	}
2204 	return 0xff;
2205 }
2206 
2207 static int
2208 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2209     uint16_t *size, uint16_t ch_id)
2210 {
2211 	struct iwm_phy_db_entry *entry;
2212 	uint16_t ch_group_id = 0;
2213 
2214 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2215 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2216 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2217 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2218 
2219 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2220 	if (!entry)
2221 		return EINVAL;
2222 
2223 	*data = entry->data;
2224 	*size = entry->size;
2225 
2226 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2227 		       __func__, __LINE__, type, *size));
2228 
2229 	return 0;
2230 }
2231 
2232 static int
2233 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2234     void *data)
2235 {
2236 	struct iwm_phy_db_cmd phy_db_cmd;
2237 	struct iwm_host_cmd cmd = {
2238 		.id = IWM_PHY_DB_CMD,
2239 		.flags = IWM_CMD_ASYNC,
2240 	};
2241 
2242 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2243 	    type, length));
2244 
2245 	phy_db_cmd.type = le16toh(type);
2246 	phy_db_cmd.length = le16toh(length);
2247 
2248 	cmd.data[0] = &phy_db_cmd;
2249 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2250 	cmd.data[1] = data;
2251 	cmd.len[1] = length;
2252 
2253 	return iwm_send_cmd(sc, &cmd);
2254 }
2255 
2256 static int
2257 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2258     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2259 {
2260 	uint16_t i;
2261 	int err;
2262 	struct iwm_phy_db_entry *entry;
2263 
2264 	/* Send all the channel-specific groups to operational fw */
2265 	for (i = 0; i < max_ch_groups; i++) {
2266 		entry = iwm_phy_db_get_section(sc, type, i);
2267 		if (!entry)
2268 			return EINVAL;
2269 
2270 		if (!entry->size)
2271 			continue;
2272 
2273 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2274 		if (err) {
2275 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2276 			    "err %d\n", DEVNAME(sc), type, i, err));
2277 			return err;
2278 		}
2279 
2280 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2281 		    DEVNAME(sc), type, i));
2282 
2283 		DELAY(1000);
2284 	}
2285 
2286 	return 0;
2287 }
2288 
2289 static int
2290 iwm_send_phy_db_data(struct iwm_softc *sc)
2291 {
2292 	uint8_t *data = NULL;
2293 	uint16_t size = 0;
2294 	int err;
2295 
2296 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2297 	if (err)
2298 		return err;
2299 
2300 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2301 	if (err)
2302 		return err;
2303 
2304 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2305 	    &data, &size, 0);
2306 	if (err)
2307 		return err;
2308 
2309 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2310 	if (err)
2311 		return err;
2312 
2313 	err = iwm_phy_db_send_all_channel_groups(sc,
2314 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2315 	if (err)
2316 		return err;
2317 
2318 	err = iwm_phy_db_send_all_channel_groups(sc,
2319 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2320 	if (err)
2321 		return err;
2322 
2323 	return 0;
2324 }
2325 
2326 /*
2327  * For the high priority TE use a time event type that has similar priority to
2328  * the FW's action scan priority.
2329  */
2330 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2331 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2332 
2333 /* used to convert from time event API v2 to v1 */
2334 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2335 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2336 static inline uint16_t
2337 iwm_te_v2_get_notify(uint16_t policy)
2338 {
2339 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2340 }
2341 
2342 static inline uint16_t
2343 iwm_te_v2_get_dep_policy(uint16_t policy)
2344 {
2345 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2346 		IWM_TE_V2_PLACEMENT_POS;
2347 }
2348 
2349 static inline uint16_t
2350 iwm_te_v2_get_absence(uint16_t policy)
2351 {
2352 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2353 }
2354 
2355 static void
2356 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2357     struct iwm_time_event_cmd_v1 *cmd_v1)
2358 {
2359 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2360 	cmd_v1->action = cmd_v2->action;
2361 	cmd_v1->id = cmd_v2->id;
2362 	cmd_v1->apply_time = cmd_v2->apply_time;
2363 	cmd_v1->max_delay = cmd_v2->max_delay;
2364 	cmd_v1->depends_on = cmd_v2->depends_on;
2365 	cmd_v1->interval = cmd_v2->interval;
2366 	cmd_v1->duration = cmd_v2->duration;
2367 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2368 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2369 	else
2370 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2371 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2372 	cmd_v1->interval_reciprocal = 0; /* unused */
2373 
2374 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2375 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2376 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2377 }
2378 
2379 static int
2380 iwm_send_time_event_cmd(struct iwm_softc *sc,
2381     const struct iwm_time_event_cmd_v2 *cmd)
2382 {
2383 	struct iwm_time_event_cmd_v1 cmd_v1;
2384 
2385 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2386 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2387 		    cmd);
2388 
2389 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2390 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2391 	    &cmd_v1);
2392 }
2393 
2394 static void
2395 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2396     uint32_t duration, uint32_t max_delay)
2397 {
2398 	struct iwm_time_event_cmd_v2 time_cmd;
2399 
2400 	memset(&time_cmd, 0, sizeof(time_cmd));
2401 
2402 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2403 	time_cmd.id_and_color =
2404 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2405 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2406 
2407 	time_cmd.apply_time = htole32(0);
2408 
2409 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2410 	time_cmd.max_delay = htole32(max_delay);
2411 	/* TODO: why do we need to interval = bi if it is not periodic? */
2412 	time_cmd.interval = htole32(1);
2413 	time_cmd.duration = htole32(duration);
2414 	time_cmd.repeat = 1;
2415 	time_cmd.policy
2416 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2417 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2418 		IWM_T2_V2_START_IMMEDIATELY);
2419 
2420 	iwm_send_time_event_cmd(sc, &time_cmd);
2421 }
2422 
2423 /*
2424  * NVM read access and content parsing.  We do not support
2425  * external NVM or writing NVM.
2426  */
2427 
2428 /* list of NVM sections we are allowed/need to read */
2429 static const int iwm_nvm_to_read[] = {
2430 	IWM_NVM_SECTION_TYPE_HW,
2431 	IWM_NVM_SECTION_TYPE_SW,
2432 	IWM_NVM_SECTION_TYPE_REGULATORY,
2433 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2434 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2435 	IWM_NVM_SECTION_TYPE_HW_8000,
2436 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2437 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2438 };
2439 
2440 /* Default NVM size to read */
2441 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2442 #define IWM_MAX_NVM_SECTION_SIZE_7000	(16 * 512 * sizeof(uint16_t)) /*16 KB*/
2443 #define IWM_MAX_NVM_SECTION_SIZE_8000	(32 * 512 * sizeof(uint16_t)) /*32 KB*/
2444 
2445 #define IWM_NVM_WRITE_OPCODE 1
2446 #define IWM_NVM_READ_OPCODE 0
2447 
2448 static int
2449 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2450     uint16_t length, uint8_t *data, uint16_t *len)
2451 {
2452 	offset = 0;
2453 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2454 		.offset = htole16(offset),
2455 		.length = htole16(length),
2456 		.type = htole16(section),
2457 		.op_code = IWM_NVM_READ_OPCODE,
2458 	};
2459 	struct iwm_nvm_access_resp *nvm_resp;
2460 	struct iwm_rx_packet *pkt;
2461 	struct iwm_host_cmd cmd = {
2462 		.id = IWM_NVM_ACCESS_CMD,
2463 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2464 		.data = { &nvm_access_cmd, },
2465 	};
2466 	int err, offset_read;
2467 	size_t bytes_read;
2468 	uint8_t *resp_data;
2469 
2470 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2471 
2472 	err = iwm_send_cmd(sc, &cmd);
2473 	if (err) {
2474 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2475 		    DEVNAME(sc), err));
2476 		return err;
2477 	}
2478 
2479 	pkt = cmd.resp_pkt;
2480 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2481 		err = EIO;
2482 		goto exit;
2483 	}
2484 
2485 	/* Extract NVM response */
2486 	nvm_resp = (void *)pkt->data;
2487 
2488 	err = le16toh(nvm_resp->status);
2489 	bytes_read = le16toh(nvm_resp->length);
2490 	offset_read = le16toh(nvm_resp->offset);
2491 	resp_data = nvm_resp->data;
2492 	if (err) {
2493 		err = EINVAL;
2494 		goto exit;
2495 	}
2496 
2497 	if (offset_read != offset) {
2498 		err = EINVAL;
2499 		goto exit;
2500 	}
2501 	if (bytes_read > length) {
2502 		err = EINVAL;
2503 		goto exit;
2504 	}
2505 
2506 	memcpy(data + offset, resp_data, bytes_read);
2507 	*len = bytes_read;
2508 
2509  exit:
2510 	iwm_free_resp(sc, &cmd);
2511 	return err;
2512 }
2513 
2514 /*
2515  * Reads an NVM section completely.
2516  * NICs prior to 7000 family doesn't have a real NVM, but just read
2517  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2518  * by uCode, we need to manually check in this case that we don't
2519  * overflow and try to read more than the EEPROM size.
2520  */
2521 static int
2522 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2523     uint16_t *len, size_t max_len)
2524 {
2525 	uint16_t chunklen, seglen;
2526 	int err;
2527 
2528 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2529 	*len = 0;
2530 
2531 	/* Read NVM chunks until exhausted (reading less than requested) */
2532 	while (seglen == chunklen && *len < max_len) {
2533 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2534 		    &seglen);
2535 		if (err) {
2536 			DPRINTF(("%s: Cannot read NVM from section %d "
2537 			    "offset %d, length %d\n",
2538 			    DEVNAME(sc), section, *len, chunklen));
2539 			return err;
2540 		}
2541 		*len += seglen;
2542 	}
2543 
2544 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2545 	return 0;
2546 }
2547 
2548 static uint8_t
2549 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2550 {
2551 	uint8_t tx_ant;
2552 
2553 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2554 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2555 
2556 	if (sc->sc_nvm.valid_tx_ant)
2557 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2558 
2559 	return tx_ant;
2560 }
2561 
2562 static uint8_t
2563 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2564 {
2565 	uint8_t rx_ant;
2566 
2567 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2568 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2569 
2570 	if (sc->sc_nvm.valid_rx_ant)
2571 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2572 
2573 	return rx_ant;
2574 }
2575 
2576 static void
2577 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2578     const uint8_t *nvm_channels, size_t nchan)
2579 {
2580 	struct ieee80211com *ic = &sc->sc_ic;
2581 	struct iwm_nvm_data *data = &sc->sc_nvm;
2582 	int ch_idx;
2583 	struct ieee80211_channel *channel;
2584 	uint16_t ch_flags;
2585 	int is_5ghz;
2586 	int flags, hw_value;
2587 
2588 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2589 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2590 		aprint_debug_dev(sc->sc_dev,
2591 		    "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2592 		    " %cwide %c40MHz %c80MHz %c160MHz\n",
2593 		    nvm_channels[ch_idx],
2594 		    ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2595 		    ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2596 		    ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2597 		    ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2598 		    ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2599 		    ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2600 		    ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2601 		    ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2602 		    ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2603 
2604 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2605 		    !data->sku_cap_band_52GHz_enable)
2606 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2607 
2608 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2609 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2610 			    nvm_channels[ch_idx], ch_flags,
2611 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2612 			continue;
2613 		}
2614 
2615 		hw_value = nvm_channels[ch_idx];
2616 		channel = &ic->ic_channels[hw_value];
2617 
2618 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2619 		if (!is_5ghz) {
2620 			flags = IEEE80211_CHAN_2GHZ;
2621 			channel->ic_flags
2622 			    = IEEE80211_CHAN_CCK
2623 			    | IEEE80211_CHAN_OFDM
2624 			    | IEEE80211_CHAN_DYN
2625 			    | IEEE80211_CHAN_2GHZ;
2626 		} else {
2627 			flags = IEEE80211_CHAN_5GHZ;
2628 			channel->ic_flags =
2629 			    IEEE80211_CHAN_A;
2630 		}
2631 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2632 
2633 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2634 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2635 
2636 #ifndef IEEE80211_NO_HT
2637 		if (data->sku_cap_11n_enable)
2638 			channel->ic_flags |= IEEE80211_CHAN_HT;
2639 #endif
2640 	}
2641 }
2642 
2643 #ifndef IEEE80211_NO_HT
2644 static void
2645 iwm_setup_ht_rates(struct iwm_softc *sc)
2646 {
2647 	struct ieee80211com *ic = &sc->sc_ic;
2648 
2649 	/* TX is supported with the same MCS as RX. */
2650 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2651 
2652 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2653 
2654 #ifdef notyet
2655 	if (sc->sc_nvm.sku_cap_mimo_disable)
2656 		return;
2657 
2658 	if (iwm_fw_valid_rx_ant(sc) > 1)
2659 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2660 	if (iwm_fw_valid_rx_ant(sc) > 2)
2661 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2662 #endif
2663 }
2664 
2665 #define IWM_MAX_RX_BA_SESSIONS 16
2666 
2667 static void
2668 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2669     uint16_t ssn, int start)
2670 {
2671 	struct ieee80211com *ic = &sc->sc_ic;
2672 	struct iwm_add_sta_cmd_v7 cmd;
2673 	struct iwm_node *in = (struct iwm_node *)ni;
2674 	int err, s;
2675 	uint32_t status;
2676 
2677 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2678 		ieee80211_addba_req_refuse(ic, ni, tid);
2679 		return;
2680 	}
2681 
2682 	memset(&cmd, 0, sizeof(cmd));
2683 
2684 	cmd.sta_id = IWM_STATION_ID;
2685 	cmd.mac_id_n_color
2686 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2687 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2688 
2689 	if (start) {
2690 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2691 		cmd.add_immediate_ba_ssn = ssn;
2692 	} else {
2693 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2694 	}
2695 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2696 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2697 
2698 	status = IWM_ADD_STA_SUCCESS;
2699 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2700 	    &status);
2701 
2702 	s = splnet();
2703 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2704 		if (start) {
2705 			sc->sc_rx_ba_sessions++;
2706 			ieee80211_addba_req_accept(ic, ni, tid);
2707 		} else if (sc->sc_rx_ba_sessions > 0)
2708 			sc->sc_rx_ba_sessions--;
2709 	} else if (start)
2710 		ieee80211_addba_req_refuse(ic, ni, tid);
2711 	splx(s);
2712 }
2713 
2714 static void
2715 iwm_htprot_task(void *arg)
2716 {
2717 	struct iwm_softc *sc = arg;
2718 	struct ieee80211com *ic = &sc->sc_ic;
2719 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2720 	int err;
2721 
2722 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2723 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2724 	if (err)
2725 		aprint_error_dev(sc->sc_dev,
2726 		    "could not change HT protection: error %d\n", err);
2727 }
2728 
2729 /*
2730  * This function is called by upper layer when HT protection settings in
2731  * beacons have changed.
2732  */
2733 static void
2734 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2735 {
2736 	struct iwm_softc *sc = ic->ic_softc;
2737 
2738 	/* assumes that ni == ic->ic_bss */
2739 	task_add(systq, &sc->htprot_task);
2740 }
2741 
2742 static void
2743 iwm_ba_task(void *arg)
2744 {
2745 	struct iwm_softc *sc = arg;
2746 	struct ieee80211com *ic = &sc->sc_ic;
2747 	struct ieee80211_node *ni = ic->ic_bss;
2748 
2749 	if (sc->ba_start)
2750 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2751 	else
2752 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2753 }
2754 
2755 /*
2756  * This function is called by upper layer when an ADDBA request is received
2757  * from another STA and before the ADDBA response is sent.
2758  */
2759 static int
2760 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2761     uint8_t tid)
2762 {
2763 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2764 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2765 
2766 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2767 		return ENOSPC;
2768 
2769 	sc->ba_start = 1;
2770 	sc->ba_tid = tid;
2771 	sc->ba_ssn = htole16(ba->ba_winstart);
2772 	task_add(systq, &sc->ba_task);
2773 
2774 	return EBUSY;
2775 }
2776 
2777 /*
2778  * This function is called by upper layer on teardown of an HT-immediate
2779  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2780  */
2781 static void
2782 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2783     uint8_t tid)
2784 {
2785 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2786 
2787 	sc->ba_start = 0;
2788 	sc->ba_tid = tid;
2789 	task_add(systq, &sc->ba_task);
2790 }
2791 #endif
2792 
2793 static void
2794 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2795     const uint16_t *mac_override, const uint16_t *nvm_hw)
2796 {
2797 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2798 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2799 	};
2800 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2801 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2802 	};
2803 	const uint8_t *hw_addr;
2804 
2805 	if (mac_override) {
2806 		hw_addr = (const uint8_t *)(mac_override +
2807 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
2808 
2809 		/*
2810 		 * Store the MAC address from MAO section.
2811 		 * No byte swapping is required in MAO section
2812 		 */
2813 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2814 
2815 		/*
2816 		 * Force the use of the OTP MAC address in case of reserved MAC
2817 		 * address in the NVM, or if address is given but invalid.
2818 		 */
2819 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2820 		    (memcmp(etherbroadcastaddr, data->hw_addr,
2821 		    sizeof(etherbroadcastaddr)) != 0) &&
2822 		    (memcmp(etheranyaddr, data->hw_addr,
2823 		    sizeof(etheranyaddr)) != 0) &&
2824 		    !ETHER_IS_MULTICAST(data->hw_addr))
2825 			return;
2826 	}
2827 
2828 	if (nvm_hw) {
2829 		/* Read the mac address from WFMP registers. */
2830 		uint32_t mac_addr0 =
2831 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2832 		uint32_t mac_addr1 =
2833 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2834 
2835 		hw_addr = (const uint8_t *)&mac_addr0;
2836 		data->hw_addr[0] = hw_addr[3];
2837 		data->hw_addr[1] = hw_addr[2];
2838 		data->hw_addr[2] = hw_addr[1];
2839 		data->hw_addr[3] = hw_addr[0];
2840 
2841 		hw_addr = (const uint8_t *)&mac_addr1;
2842 		data->hw_addr[4] = hw_addr[1];
2843 		data->hw_addr[5] = hw_addr[0];
2844 
2845 		return;
2846 	}
2847 
2848 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
2849 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2850 }
2851 
2852 static int
2853 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2854     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2855     const uint16_t *mac_override, const uint16_t *phy_sku,
2856     const uint16_t *regulatory)
2857 {
2858 	struct iwm_nvm_data *data = &sc->sc_nvm;
2859 	uint8_t hw_addr[ETHER_ADDR_LEN];
2860 	uint32_t sku;
2861 
2862 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2863 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2864 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2865 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2866 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2867 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2868 
2869 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2870 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
2871 	} else {
2872 		uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
2873 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2874 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2875 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2876 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2877 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2878 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2879 
2880 		data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
2881 		sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
2882 	}
2883 
2884 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2885 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2886 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2887 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2888 
2889 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2890 
2891 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2892 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2893 		data->hw_addr[0] = hw_addr[1];
2894 		data->hw_addr[1] = hw_addr[0];
2895 		data->hw_addr[2] = hw_addr[3];
2896 		data->hw_addr[3] = hw_addr[2];
2897 		data->hw_addr[4] = hw_addr[5];
2898 		data->hw_addr[5] = hw_addr[4];
2899 	} else
2900 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2901 
2902 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2903 		uint16_t lar_offset, lar_config;
2904 		lar_offset = data->nvm_version < 0xE39 ?
2905 		    IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
2906 		lar_config = le16_to_cpup(regulatory + lar_offset);
2907                 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
2908 	}
2909 
2910 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2911 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2912 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2913 	else
2914 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
2915 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2916 
2917 	data->calib_version = 255;   /* TODO:
2918 					this value will prevent some checks from
2919 					failing, we need to check if this
2920 					field is still needed, and if it does,
2921 					where is it in the NVM */
2922 
2923 	return 0;
2924 }
2925 
2926 static int
2927 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2928 {
2929 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2930 	const uint16_t *regulatory = NULL;
2931 
2932 	/* Checking for required sections */
2933 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2934 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2935 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2936 			return ENOENT;
2937 		}
2938 
2939 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2940 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2941 		/* SW and REGULATORY sections are mandatory */
2942 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2943 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2944 			return ENOENT;
2945 		}
2946 		/* MAC_OVERRIDE or at least HW section must exist */
2947 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2948 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2949 			return ENOENT;
2950 		}
2951 
2952 		/* PHY_SKU section is mandatory in B0 */
2953 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2954 			return ENOENT;
2955 		}
2956 
2957 		regulatory = (const uint16_t *)
2958 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2959 		hw = (const uint16_t *)
2960 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2961 		mac_override =
2962 			(const uint16_t *)
2963 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2964 		phy_sku = (const uint16_t *)
2965 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2966 	} else {
2967 		panic("unknown device family %d\n", sc->sc_device_family);
2968 	}
2969 
2970 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2971 	calib = (const uint16_t *)
2972 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2973 
2974 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2975 	    phy_sku, regulatory);
2976 }
2977 
2978 static int
2979 iwm_nvm_init(struct iwm_softc *sc)
2980 {
2981 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2982 	int i, section, err;
2983 	uint16_t len;
2984 	uint8_t *buf;
2985 	const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
2986 	    IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
2987 
2988 	/* Read From FW NVM */
2989 	DPRINTF(("Read NVM\n"));
2990 
2991 	memset(nvm_sections, 0, sizeof(nvm_sections));
2992 
2993 	buf = kmem_alloc(bufsz, KM_SLEEP);
2994 	if (buf == NULL)
2995 		return ENOMEM;
2996 
2997 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2998 		section = iwm_nvm_to_read[i];
2999 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3000 
3001 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3002 		if (err) {
3003 			err = 0;
3004 			continue;
3005 		}
3006 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3007 		if (nvm_sections[section].data == NULL) {
3008 			err = ENOMEM;
3009 			break;
3010 		}
3011 		memcpy(nvm_sections[section].data, buf, len);
3012 		nvm_sections[section].length = len;
3013 	}
3014 	kmem_free(buf, bufsz);
3015 	if (err == 0)
3016 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3017 
3018 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3019 		if (nvm_sections[i].data != NULL)
3020 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3021 	}
3022 
3023 	return err;
3024 }
3025 
3026 static int
3027 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3028     const uint8_t *section, uint32_t byte_cnt)
3029 {
3030 	int err = EINVAL;
3031 	uint32_t chunk_sz, offset;
3032 
3033 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3034 
3035 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3036 		uint32_t addr, len;
3037 		const uint8_t *data;
3038 		bool is_extended = false;
3039 
3040 		addr = dst_addr + offset;
3041 		len = MIN(chunk_sz, byte_cnt - offset);
3042 		data = section + offset;
3043 
3044 		if (addr >= IWM_FW_MEM_EXTENDED_START &&
3045 		    addr <= IWM_FW_MEM_EXTENDED_END)
3046 			is_extended = true;
3047 
3048 		if (is_extended)
3049 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3050 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3051 
3052 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3053 
3054 		if (is_extended)
3055 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3056 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3057 
3058 		if (err)
3059 			break;
3060 	}
3061 
3062 	return err;
3063 }
3064 
3065 static int
3066 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3067     const uint8_t *section, uint32_t byte_cnt)
3068 {
3069 	struct iwm_dma_info *dma = &sc->fw_dma;
3070 	int err;
3071 
3072 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3073 	memcpy(dma->vaddr, section, byte_cnt);
3074 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3075 	    BUS_DMASYNC_PREWRITE);
3076 
3077 	sc->sc_fw_chunk_done = 0;
3078 
3079 	if (!iwm_nic_lock(sc))
3080 		return EBUSY;
3081 
3082 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3083 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3084 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3085 	    dst_addr);
3086 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3087 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3088 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3089 	    (iwm_get_dma_hi_addr(dma->paddr)
3090 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3091 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3092 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3093 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3094 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3095 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3096 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3097 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3098 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3099 
3100 	iwm_nic_unlock(sc);
3101 
3102 	/* Wait for this segment to load. */
3103 	err = 0;
3104 	while (!sc->sc_fw_chunk_done) {
3105 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3106 		if (err)
3107 			break;
3108 	}
3109 	if (!sc->sc_fw_chunk_done) {
3110 		DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3111 		    DEVNAME(sc), dst_addr, byte_cnt));
3112 	}
3113 
3114 	return err;
3115 }
3116 
3117 static int
3118 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3119 {
3120 	struct iwm_fw_sects *fws;
3121 	int err, i;
3122 	void *data;
3123 	uint32_t dlen;
3124 	uint32_t offset;
3125 
3126 	fws = &sc->sc_fw.fw_sects[ucode_type];
3127 	for (i = 0; i < fws->fw_count; i++) {
3128 		data = fws->fw_sect[i].fws_data;
3129 		dlen = fws->fw_sect[i].fws_len;
3130 		offset = fws->fw_sect[i].fws_devoff;
3131 		if (dlen > sc->sc_fwdmasegsz) {
3132 			err = EFBIG;
3133 		} else
3134 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3135 		if (err) {
3136 			DPRINTF(("%s: could not load firmware chunk %u of %u\n",
3137 			    DEVNAME(sc), i, fws->fw_count));
3138 			return err;
3139 		}
3140 	}
3141 
3142 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3143 
3144 	return 0;
3145 }
3146 
3147 static int
3148 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3149     int cpu, int *first_ucode_section)
3150 {
3151 	int shift_param;
3152 	int i, err = 0, sec_num = 0x1;
3153 	uint32_t val, last_read_idx = 0;
3154 	void *data;
3155 	uint32_t dlen;
3156 	uint32_t offset;
3157 
3158 	if (cpu == 1) {
3159 		shift_param = 0;
3160 		*first_ucode_section = 0;
3161 	} else {
3162 		shift_param = 16;
3163 		(*first_ucode_section)++;
3164 	}
3165 
3166 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3167 		last_read_idx = i;
3168 		data = fws->fw_sect[i].fws_data;
3169 		dlen = fws->fw_sect[i].fws_len;
3170 		offset = fws->fw_sect[i].fws_devoff;
3171 
3172 		/*
3173 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3174 		 * CPU1 to CPU2.
3175 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3176 		 * CPU2 non paged to CPU2 paging sec.
3177 		 */
3178 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3179 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3180 			break;
3181 
3182 		if (dlen > sc->sc_fwdmasegsz) {
3183 			err = EFBIG;
3184 		} else
3185 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3186 		if (err) {
3187 			DPRINTF(("%s: could not load firmware chunk %d "
3188 			    "(error %d)\n", DEVNAME(sc), i, err));
3189 			return err;
3190 		}
3191 
3192 		/* Notify the ucode of the loaded section number and status */
3193 		if (iwm_nic_lock(sc)) {
3194 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3195 			val = val | (sec_num << shift_param);
3196 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3197 			sec_num = (sec_num << 1) | 0x1;
3198 			iwm_nic_unlock(sc);
3199 
3200 			/*
3201 			 * The firmware won't load correctly without this delay.
3202 			 */
3203 			DELAY(8000);
3204 		}
3205 	}
3206 
3207 	*first_ucode_section = last_read_idx;
3208 
3209 	if (iwm_nic_lock(sc)) {
3210 		if (cpu == 1)
3211 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3212 		else
3213 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3214 		iwm_nic_unlock(sc);
3215 	}
3216 
3217 	return 0;
3218 }
3219 
3220 static int
3221 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3222 {
3223 	struct iwm_fw_sects *fws;
3224 	int err = 0;
3225 	int first_ucode_section;
3226 
3227 	fws = &sc->sc_fw.fw_sects[ucode_type];
3228 
3229 	/* configure the ucode to be ready to get the secured image */
3230 	/* release CPU reset */
3231 	if (iwm_nic_lock(sc)) {
3232 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3233 		    IWM_RELEASE_CPU_RESET_BIT);
3234 		iwm_nic_unlock(sc);
3235 	}
3236 
3237 	/* load to FW the binary Secured sections of CPU1 */
3238 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3239 	if (err)
3240 		return err;
3241 
3242 	/* load to FW the binary sections of CPU2 */
3243 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3244 }
3245 
3246 static int
3247 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3248 {
3249 	int err, w;
3250 
3251 	sc->sc_uc.uc_intr = 0;
3252 
3253 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3254 		err = iwm_load_firmware_8000(sc, ucode_type);
3255 	else
3256 		err = iwm_load_firmware_7000(sc, ucode_type);
3257 	if (err)
3258 		return err;
3259 
3260 	/* wait for the firmware to load */
3261 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3262 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3263 	if (err || !sc->sc_uc.uc_ok) {
3264 		aprint_error_dev(sc->sc_dev,
3265 		    "could not load firmware (error %d, ok %d)\n",
3266 		    err, sc->sc_uc.uc_ok);
3267 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3268 			aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3269 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3270 			aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3271 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3272 		}
3273 	}
3274 
3275 	return err;
3276 }
3277 
3278 static int
3279 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3280 {
3281 	int err;
3282 
3283 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3284 
3285 	err = iwm_nic_init(sc);
3286 	if (err) {
3287 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3288 		return err;
3289 	}
3290 
3291 	/* make sure rfkill handshake bits are cleared */
3292 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3293 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3294 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3295 
3296 	/* clear (again), then enable host interrupts */
3297 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3298 	iwm_enable_interrupts(sc);
3299 
3300 	/* really make sure rfkill handshake bits are cleared */
3301 	/* maybe we should write a few times more?  just to make sure */
3302 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3303 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3304 
3305 	return iwm_load_firmware(sc, ucode_type);
3306 }
3307 
3308 static int
3309 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3310 {
3311 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3312 		.valid = htole32(valid_tx_ant),
3313 	};
3314 
3315 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3316 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
3317 }
3318 
3319 static int
3320 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3321 {
3322 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3323 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3324 
3325 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3326 	phy_cfg_cmd.calib_control.event_trigger =
3327 	    sc->sc_default_calib[ucode_type].event_trigger;
3328 	phy_cfg_cmd.calib_control.flow_trigger =
3329 	    sc->sc_default_calib[ucode_type].flow_trigger;
3330 
3331 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3332 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3333 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3334 }
3335 
3336 static int
3337 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3338 {
3339 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3340 	int err;
3341 
3342 	err = iwm_read_firmware(sc, ucode_type);
3343 	if (err)
3344 		return err;
3345 
3346 	sc->sc_uc_current = ucode_type;
3347 	err = iwm_start_fw(sc, ucode_type);
3348 	if (err) {
3349 		sc->sc_uc_current = old_type;
3350 		return err;
3351 	}
3352 
3353 	return iwm_post_alive(sc);
3354 }
3355 
3356 static int
3357 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3358 {
3359 	int err;
3360 
3361 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3362 		aprint_error_dev(sc->sc_dev,
3363 		    "radio is disabled by hardware switch\n");
3364 		return EPERM;
3365 	}
3366 
3367 	sc->sc_init_complete = 0;
3368 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3369 	if (err) {
3370 		DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3371 		return err;
3372 	}
3373 
3374 	if (justnvm) {
3375 		err = iwm_nvm_init(sc);
3376 		if (err) {
3377 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3378 			return err;
3379 		}
3380 
3381 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3382 		    ETHER_ADDR_LEN);
3383 		return 0;
3384 	}
3385 
3386 	err = iwm_send_bt_init_conf(sc);
3387 	if (err)
3388 		return err;
3389 
3390 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3391 	if (err)
3392 		return err;
3393 
3394 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3395 	if (err)
3396 		return err;
3397 
3398 	/*
3399 	 * Send phy configurations command to init uCode
3400 	 * to start the 16.0 uCode init image internal calibrations.
3401 	 */
3402 	err = iwm_send_phy_cfg_cmd(sc);
3403 	if (err)
3404 		return err;
3405 
3406 	/*
3407 	 * Nothing to do but wait for the init complete notification
3408 	 * from the firmware
3409 	 */
3410 	while (!sc->sc_init_complete) {
3411 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3412 		if (err)
3413 			break;
3414 	}
3415 
3416 	return err;
3417 }
3418 
3419 static int
3420 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3421 {
3422 	struct iwm_rx_ring *ring = &sc->rxq;
3423 	struct iwm_rx_data *data = &ring->data[idx];
3424 	struct mbuf *m;
3425 	int err;
3426 	int fatal = 0;
3427 
3428 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3429 	if (m == NULL)
3430 		return ENOBUFS;
3431 
3432 	if (size <= MCLBYTES) {
3433 		MCLGET(m, M_DONTWAIT);
3434 	} else {
3435 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3436 	}
3437 	if ((m->m_flags & M_EXT) == 0) {
3438 		m_freem(m);
3439 		return ENOBUFS;
3440 	}
3441 
3442 	if (data->m != NULL) {
3443 		bus_dmamap_unload(sc->sc_dmat, data->map);
3444 		fatal = 1;
3445 	}
3446 
3447 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3448 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3449 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3450 	if (err) {
3451 		/* XXX */
3452 		if (fatal)
3453 			panic("iwm: could not load RX mbuf");
3454 		m_freem(m);
3455 		return err;
3456 	}
3457 	data->m = m;
3458 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3459 
3460 	/* Update RX descriptor. */
3461 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3462 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3463 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3464 
3465 	return 0;
3466 }
3467 
3468 #define IWM_RSSI_OFFSET 50
3469 static int
3470 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3471 {
3472 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3473 	uint32_t agc_a, agc_b;
3474 	uint32_t val;
3475 
3476 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3477 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3478 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3479 
3480 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3481 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3482 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3483 
3484 	/*
3485 	 * dBm = rssi dB - agc dB - constant.
3486 	 * Higher AGC (higher radio gain) means lower signal.
3487 	 */
3488 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3489 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3490 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3491 
3492 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3493 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3494 
3495 	return max_rssi_dbm;
3496 }
3497 
3498 /*
3499  * RSSI values are reported by the FW as positive values - need to negate
3500  * to obtain their dBM.  Account for missing antennas by replacing 0
3501  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3502  */
3503 static int
3504 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3505 {
3506 	int energy_a, energy_b, energy_c, max_energy;
3507 	uint32_t val;
3508 
3509 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3510 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3511 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3512 	energy_a = energy_a ? -energy_a : -256;
3513 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3514 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3515 	energy_b = energy_b ? -energy_b : -256;
3516 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3517 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3518 	energy_c = energy_c ? -energy_c : -256;
3519 	max_energy = MAX(energy_a, energy_b);
3520 	max_energy = MAX(max_energy, energy_c);
3521 
3522 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3523 	    energy_a, energy_b, energy_c, max_energy));
3524 
3525 	return max_energy;
3526 }
3527 
3528 static void
3529 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3530     struct iwm_rx_data *data)
3531 {
3532 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3533 
3534 	DPRINTFN(20, ("received PHY stats\n"));
3535 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3536 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3537 
3538 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3539 }
3540 
3541 /*
3542  * Retrieve the average noise (in dBm) among receivers.
3543  */
3544 static int
3545 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3546 {
3547 	int i, total, nbant, noise;
3548 
3549 	total = nbant = noise = 0;
3550 	for (i = 0; i < 3; i++) {
3551 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3552 		if (noise) {
3553 			total += noise;
3554 			nbant++;
3555 		}
3556 	}
3557 
3558 	/* There should be at least one antenna but check anyway. */
3559 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3560 }
3561 
3562 static void
3563 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3564     struct iwm_rx_data *data)
3565 {
3566 	struct ieee80211com *ic = &sc->sc_ic;
3567 	struct ieee80211_frame *wh;
3568 	struct ieee80211_node *ni;
3569 	struct ieee80211_channel *c = NULL;
3570 	struct mbuf *m;
3571 	struct iwm_rx_phy_info *phy_info;
3572 	struct iwm_rx_mpdu_res_start *rx_res;
3573 	int device_timestamp;
3574 	uint32_t len;
3575 	uint32_t rx_pkt_status;
3576 	int rssi;
3577 	int s;
3578 
3579 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3580 	    BUS_DMASYNC_POSTREAD);
3581 
3582 	phy_info = &sc->sc_last_phy_info;
3583 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3584 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3585 	len = le16toh(rx_res->byte_count);
3586 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3587 	    sizeof(*rx_res) + len));
3588 
3589 	m = data->m;
3590 	m->m_data = pkt->data + sizeof(*rx_res);
3591 	m->m_pkthdr.len = m->m_len = len;
3592 
3593 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3594 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3595 		    phy_info->cfg_phy_cnt));
3596 		return;
3597 	}
3598 
3599 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3600 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3601 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3602 		return; /* drop */
3603 	}
3604 
3605 	device_timestamp = le32toh(phy_info->system_timestamp);
3606 
3607 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3608 		rssi = iwm_get_signal_strength(sc, phy_info);
3609 	} else {
3610 		rssi = iwm_calc_rssi(sc, phy_info);
3611 	}
3612 	rssi = -rssi;
3613 
3614 	if (ic->ic_state == IEEE80211_S_SCAN)
3615 		iwm_fix_channel(sc, m);
3616 
3617 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3618 		return;
3619 
3620 	m_set_rcvif(m, IC2IFP(ic));
3621 
3622 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3623 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3624 
3625 	s = splnet();
3626 
3627 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3628 	if (c)
3629 		ni->ni_chan = c;
3630 
3631 	if (__predict_false(sc->sc_drvbpf != NULL)) {
3632 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3633 
3634 		tap->wr_flags = 0;
3635 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3636 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3637 		tap->wr_chan_freq =
3638 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3639 		tap->wr_chan_flags =
3640 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3641 		tap->wr_dbm_antsignal = (int8_t)rssi;
3642 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3643 		tap->wr_tsft = phy_info->system_timestamp;
3644 		if (phy_info->phy_flags &
3645 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3646 			uint8_t mcs = (phy_info->rate_n_flags &
3647 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3648 			      IWM_RATE_HT_MCS_NSS_MSK));
3649 			tap->wr_rate = (0x80 | mcs);
3650 		} else {
3651 			uint8_t rate = (phy_info->rate_n_flags &
3652 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3653 			switch (rate) {
3654 			/* CCK rates. */
3655 			case  10: tap->wr_rate =   2; break;
3656 			case  20: tap->wr_rate =   4; break;
3657 			case  55: tap->wr_rate =  11; break;
3658 			case 110: tap->wr_rate =  22; break;
3659 			/* OFDM rates. */
3660 			case 0xd: tap->wr_rate =  12; break;
3661 			case 0xf: tap->wr_rate =  18; break;
3662 			case 0x5: tap->wr_rate =  24; break;
3663 			case 0x7: tap->wr_rate =  36; break;
3664 			case 0x9: tap->wr_rate =  48; break;
3665 			case 0xb: tap->wr_rate =  72; break;
3666 			case 0x1: tap->wr_rate =  96; break;
3667 			case 0x3: tap->wr_rate = 108; break;
3668 			/* Unknown rate: should not happen. */
3669 			default:  tap->wr_rate =   0;
3670 			}
3671 		}
3672 
3673 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3674 	}
3675 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
3676 	ieee80211_free_node(ni);
3677 
3678 	splx(s);
3679 }
3680 
3681 static void
3682 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3683     struct iwm_node *in)
3684 {
3685 	struct ieee80211com *ic = &sc->sc_ic;
3686 	struct ifnet *ifp = IC2IFP(ic);
3687 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3688 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3689 	int failack = tx_resp->failure_frame;
3690 
3691 	KASSERT(tx_resp->frame_count == 1);
3692 
3693 	/* Update rate control statistics. */
3694 	in->in_amn.amn_txcnt++;
3695 	if (failack > 0) {
3696 		in->in_amn.amn_retrycnt++;
3697 	}
3698 
3699 	if (status != IWM_TX_STATUS_SUCCESS &&
3700 	    status != IWM_TX_STATUS_DIRECT_DONE)
3701 		ifp->if_oerrors++;
3702 	else
3703 		ifp->if_opackets++;
3704 }
3705 
3706 static void
3707 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3708     struct iwm_rx_data *data)
3709 {
3710 	struct ieee80211com *ic = &sc->sc_ic;
3711 	struct ifnet *ifp = IC2IFP(ic);
3712 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3713 	int idx = cmd_hdr->idx;
3714 	int qid = cmd_hdr->qid;
3715 	struct iwm_tx_ring *ring = &sc->txq[qid];
3716 	struct iwm_tx_data *txd = &ring->data[idx];
3717 	struct iwm_node *in = txd->in;
3718 	int s;
3719 
3720 	s = splnet();
3721 
3722 	if (txd->done) {
3723 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3724 		    DEVNAME(sc)));
3725 		splx(s);
3726 		return;
3727 	}
3728 
3729 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3730 	    BUS_DMASYNC_POSTREAD);
3731 
3732 	sc->sc_tx_timer = 0;
3733 
3734 	iwm_rx_tx_cmd_single(sc, pkt, in);
3735 
3736 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3737 	    BUS_DMASYNC_POSTWRITE);
3738 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3739 	m_freem(txd->m);
3740 
3741 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3742 	KASSERT(txd->done == 0);
3743 	txd->done = 1;
3744 	KASSERT(txd->in);
3745 
3746 	txd->m = NULL;
3747 	txd->in = NULL;
3748 	ieee80211_free_node(&in->in_ni);
3749 
3750 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3751 		sc->qfullmsk &= ~(1 << qid);
3752 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3753 			ifp->if_flags &= ~IFF_OACTIVE;
3754 			KASSERT(KERNEL_LOCKED_P());
3755 			iwm_start(ifp);
3756 		}
3757 	}
3758 
3759 	splx(s);
3760 }
3761 
3762 static int
3763 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3764 {
3765 	struct iwm_binding_cmd cmd;
3766 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3767 	int i, err;
3768 	uint32_t status;
3769 
3770 	memset(&cmd, 0, sizeof(cmd));
3771 
3772 	cmd.id_and_color
3773 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3774 	cmd.action = htole32(action);
3775 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3776 
3777 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3778 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3779 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3780 
3781 	status = 0;
3782 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3783 	    sizeof(cmd), &cmd, &status);
3784 	if (err == 0 && status != 0)
3785 		err = EIO;
3786 
3787 	return err;
3788 }
3789 
3790 static void
3791 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3792     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3793 {
3794 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3795 
3796 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3797 	    ctxt->color));
3798 	cmd->action = htole32(action);
3799 	cmd->apply_time = htole32(apply_time);
3800 }
3801 
3802 static void
3803 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3804     struct ieee80211_channel *chan, uint8_t chains_static,
3805     uint8_t chains_dynamic)
3806 {
3807 	struct ieee80211com *ic = &sc->sc_ic;
3808 	uint8_t active_cnt, idle_cnt;
3809 
3810 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3811 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3812 
3813 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3814 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3815 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3816 
3817 	/* Set rx the chains */
3818 	idle_cnt = chains_static;
3819 	active_cnt = chains_dynamic;
3820 
3821 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3822 	    IWM_PHY_RX_CHAIN_VALID_POS);
3823 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3824 	cmd->rxchain_info |= htole32(active_cnt <<
3825 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3826 
3827 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3828 }
3829 
3830 static int
3831 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3832     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3833     uint32_t apply_time)
3834 {
3835 	struct iwm_phy_context_cmd cmd;
3836 
3837 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3838 
3839 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3840 	    chains_static, chains_dynamic);
3841 
3842 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3843 	    sizeof(struct iwm_phy_context_cmd), &cmd);
3844 }
3845 
3846 static int
3847 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3848 {
3849 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3850 	struct iwm_tfd *desc;
3851 	struct iwm_tx_data *txdata;
3852 	struct iwm_device_cmd *cmd;
3853 	struct mbuf *m;
3854 	bus_addr_t paddr;
3855 	uint32_t addr_lo;
3856 	int err = 0, i, paylen, off, s;
3857 	int code;
3858 	int async, wantresp;
3859 	int group_id;
3860 	size_t hdrlen, datasz;
3861 	uint8_t *data;
3862 
3863 	code = hcmd->id;
3864 	async = hcmd->flags & IWM_CMD_ASYNC;
3865 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3866 
3867 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3868 		paylen += hcmd->len[i];
3869 	}
3870 
3871 	/* if the command wants an answer, busy sc_cmd_resp */
3872 	if (wantresp) {
3873 		KASSERT(!async);
3874 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3875 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3876 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
3877 	}
3878 
3879 	/*
3880 	 * Is the hardware still available?  (after e.g. above wait).
3881 	 */
3882 	s = splnet();
3883 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
3884 		err = ENXIO;
3885 		goto out;
3886 	}
3887 
3888 	desc = &ring->desc[ring->cur];
3889 	txdata = &ring->data[ring->cur];
3890 
3891 	group_id = iwm_cmd_groupid(code);
3892 	if (group_id != 0) {
3893 		hdrlen = sizeof(cmd->hdr_wide);
3894 		datasz = sizeof(cmd->data_wide);
3895 	} else {
3896 		hdrlen = sizeof(cmd->hdr);
3897 		datasz = sizeof(cmd->data);
3898 	}
3899 
3900 	if (paylen > datasz) {
3901 		/* Command is too large to fit in pre-allocated space. */
3902 		size_t totlen = hdrlen + paylen;
3903 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3904 			aprint_error_dev(sc->sc_dev,
3905 			    "firmware command too long (%zd bytes)\n", totlen);
3906 			err = EINVAL;
3907 			goto out;
3908 		}
3909 		m = m_gethdr(M_DONTWAIT, MT_DATA);
3910 		if (m == NULL) {
3911 			err = ENOMEM;
3912 			goto out;
3913 		}
3914 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3915 		if (!(m->m_flags & M_EXT)) {
3916 			aprint_error_dev(sc->sc_dev,
3917 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3918 			m_freem(m);
3919 			err = ENOMEM;
3920 			goto out;
3921 		}
3922 		cmd = mtod(m, struct iwm_device_cmd *);
3923 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3924 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3925 		if (err) {
3926 			aprint_error_dev(sc->sc_dev,
3927 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3928 			m_freem(m);
3929 			goto out;
3930 		}
3931 		txdata->m = m;
3932 		paddr = txdata->map->dm_segs[0].ds_addr;
3933 	} else {
3934 		cmd = &ring->cmd[ring->cur];
3935 		paddr = txdata->cmd_paddr;
3936 	}
3937 
3938 	if (group_id != 0) {
3939 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3940 		cmd->hdr_wide.group_id = group_id;
3941 		cmd->hdr_wide.qid = ring->qid;
3942 		cmd->hdr_wide.idx = ring->cur;
3943 		cmd->hdr_wide.length = htole16(paylen);
3944 		cmd->hdr_wide.version = iwm_cmd_version(code);
3945 		data = cmd->data_wide;
3946 	} else {
3947 		cmd->hdr.code = code;
3948 		cmd->hdr.flags = 0;
3949 		cmd->hdr.qid = ring->qid;
3950 		cmd->hdr.idx = ring->cur;
3951 		data = cmd->data;
3952 	}
3953 
3954 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3955 		if (hcmd->len[i] == 0)
3956 			continue;
3957 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3958 		off += hcmd->len[i];
3959 	}
3960 	KASSERT(off == paylen);
3961 
3962 	/* lo field is not aligned */
3963 	addr_lo = htole32((uint32_t)paddr);
3964 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3965 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
3966 	    | ((hdrlen + paylen) << 4));
3967 	desc->num_tbs = 1;
3968 
3969 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3970 	    code, hdrlen + paylen, async ? " (async)" : ""));
3971 
3972 	if (paylen > datasz) {
3973 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
3974 		    BUS_DMASYNC_PREWRITE);
3975 	} else {
3976 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3977 		    (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
3978 		    BUS_DMASYNC_PREWRITE);
3979 	}
3980 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3981 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
3982 	    BUS_DMASYNC_PREWRITE);
3983 
3984 	err = iwm_set_cmd_in_flight(sc);
3985 	if (err)
3986 		goto out;
3987 	ring->queued++;
3988 
3989 #if 0
3990 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3991 #endif
3992 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3993 	    code, ring->qid, ring->cur));
3994 
3995 	/* Kick command ring. */
3996 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3997 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3998 
3999 	if (!async) {
4000 		int generation = sc->sc_generation;
4001 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4002 		if (err == 0) {
4003 			/* if hardware is no longer up, return error */
4004 			if (generation != sc->sc_generation) {
4005 				err = ENXIO;
4006 			} else {
4007 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4008 			}
4009 		}
4010 	}
4011  out:
4012 	if (wantresp && err) {
4013 		iwm_free_resp(sc, hcmd);
4014 	}
4015 	splx(s);
4016 
4017 	return err;
4018 }
4019 
4020 static int
4021 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4022     uint16_t len, const void *data)
4023 {
4024 	struct iwm_host_cmd cmd = {
4025 		.id = id,
4026 		.len = { len, },
4027 		.data = { data, },
4028 		.flags = flags,
4029 	};
4030 
4031 	return iwm_send_cmd(sc, &cmd);
4032 }
4033 
4034 static int
4035 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4036     uint32_t *status)
4037 {
4038 	struct iwm_rx_packet *pkt;
4039 	struct iwm_cmd_response *resp;
4040 	int err, resp_len;
4041 
4042 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4043 	cmd->flags |= IWM_CMD_WANT_SKB;
4044 
4045 	err = iwm_send_cmd(sc, cmd);
4046 	if (err)
4047 		return err;
4048 	pkt = cmd->resp_pkt;
4049 
4050 	/* Can happen if RFKILL is asserted */
4051 	if (!pkt) {
4052 		err = 0;
4053 		goto out_free_resp;
4054 	}
4055 
4056 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4057 		err = EIO;
4058 		goto out_free_resp;
4059 	}
4060 
4061 	resp_len = iwm_rx_packet_payload_len(pkt);
4062 	if (resp_len != sizeof(*resp)) {
4063 		err = EIO;
4064 		goto out_free_resp;
4065 	}
4066 
4067 	resp = (void *)pkt->data;
4068 	*status = le32toh(resp->status);
4069  out_free_resp:
4070 	iwm_free_resp(sc, cmd);
4071 	return err;
4072 }
4073 
4074 static int
4075 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4076     const void *data, uint32_t *status)
4077 {
4078 	struct iwm_host_cmd cmd = {
4079 		.id = id,
4080 		.len = { len, },
4081 		.data = { data, },
4082 	};
4083 
4084 	return iwm_send_cmd_status(sc, &cmd, status);
4085 }
4086 
4087 static void
4088 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4089 {
4090 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4091 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4092 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4093 	wakeup(&sc->sc_wantresp);
4094 }
4095 
4096 static void
4097 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4098 {
4099 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4100 	struct iwm_tx_data *data;
4101 	int s;
4102 
4103 	if (qid != IWM_CMD_QUEUE) {
4104 		return;	/* Not a command ack. */
4105 	}
4106 
4107 	s = splnet();
4108 
4109 	data = &ring->data[idx];
4110 
4111 	if (data->m != NULL) {
4112 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4113 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4114 		bus_dmamap_unload(sc->sc_dmat, data->map);
4115 		m_freem(data->m);
4116 		data->m = NULL;
4117 	}
4118 	wakeup(&ring->desc[idx]);
4119 
4120 	if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4121 		aprint_error_dev(sc->sc_dev,
4122 		    "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4123 		    idx, ring->queued, ring->cur);
4124 	}
4125 
4126 	KASSERT(ring->queued > 0);
4127 	if (--ring->queued == 0)
4128 		iwm_clear_cmd_in_flight(sc);
4129 
4130 	splx(s);
4131 }
4132 
4133 #if 0
4134 /*
4135  * necessary only for block ack mode
4136  */
4137 void
4138 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4139     uint16_t len)
4140 {
4141 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4142 	uint16_t w_val;
4143 
4144 	scd_bc_tbl = sc->sched_dma.vaddr;
4145 
4146 	len += 8; /* magic numbers came naturally from paris */
4147 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4148 		len = roundup(len, 4) / 4;
4149 
4150 	w_val = htole16(sta_id << 12 | len);
4151 
4152 	/* Update TX scheduler. */
4153 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4154 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4155 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4156 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4157 
4158 	/* I really wonder what this is ?!? */
4159 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4160 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4161 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4162 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4163 		    (char *)(void *)sc->sched_dma.vaddr,
4164 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4165 	}
4166 }
4167 #endif
4168 
4169 /*
4170  * Fill in various bit for management frames, and leave them
4171  * unfilled for data frames (firmware takes care of that).
4172  * Return the selected TX rate.
4173  */
4174 static const struct iwm_rate *
4175 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4176     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4177 {
4178 	struct ieee80211com *ic = &sc->sc_ic;
4179 	struct ieee80211_node *ni = &in->in_ni;
4180 	const struct iwm_rate *rinfo;
4181 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4182 	int ridx, rate_flags, i, ind;
4183 	int nrates = ni->ni_rates.rs_nrates;
4184 
4185 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4186 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4187 
4188 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4189 	    type != IEEE80211_FC0_TYPE_DATA) {
4190 		/* for non-data, use the lowest supported rate */
4191 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4192 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4193 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4194 #ifndef IEEE80211_NO_HT
4195 	} else if (ic->ic_fixed_mcs != -1) {
4196 		ridx = sc->sc_fixed_ridx;
4197 #endif
4198 	} else if (ic->ic_fixed_rate != -1) {
4199 		ridx = sc->sc_fixed_ridx;
4200 	} else {
4201 		/* for data frames, use RS table */
4202 		tx->initial_rate_index = 0;
4203 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4204 		DPRINTFN(12, ("start with txrate %d\n",
4205 		    tx->initial_rate_index));
4206 #ifndef IEEE80211_NO_HT
4207 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4208 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4209 			return &iwm_rates[ridx];
4210 		}
4211 #endif
4212 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4213 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4214 		for (i = 0; i < nrates; i++) {
4215 			if (iwm_rates[i].rate == (ni->ni_txrate &
4216 			    IEEE80211_RATE_VAL)) {
4217 				ridx = i;
4218 				break;
4219 			}
4220 		}
4221 		return &iwm_rates[ridx];
4222 	}
4223 
4224 	rinfo = &iwm_rates[ridx];
4225 	for (i = 0, ind = sc->sc_mgmt_last_antenna;
4226 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4227 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4228 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4229 			sc->sc_mgmt_last_antenna = ind;
4230 			break;
4231 		}
4232 	}
4233 	rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4234 	if (IWM_RIDX_IS_CCK(ridx))
4235 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4236 #ifndef IEEE80211_NO_HT
4237 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4238 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4239 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4240 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4241 	} else
4242 #endif
4243 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4244 
4245 	return rinfo;
4246 }
4247 
4248 #define TB0_SIZE 16
4249 static int
4250 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4251 {
4252 	struct ieee80211com *ic = &sc->sc_ic;
4253 	struct iwm_node *in = (struct iwm_node *)ni;
4254 	struct iwm_tx_ring *ring;
4255 	struct iwm_tx_data *data;
4256 	struct iwm_tfd *desc;
4257 	struct iwm_device_cmd *cmd;
4258 	struct iwm_tx_cmd *tx;
4259 	struct ieee80211_frame *wh;
4260 	struct ieee80211_key *k = NULL;
4261 	struct mbuf *m1;
4262 	const struct iwm_rate *rinfo;
4263 	uint32_t flags;
4264 	u_int hdrlen;
4265 	bus_dma_segment_t *seg;
4266 	uint8_t tid, type;
4267 	int i, totlen, err, pad;
4268 
4269 	wh = mtod(m, struct ieee80211_frame *);
4270 	hdrlen = ieee80211_anyhdrsize(wh);
4271 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4272 
4273 	tid = 0;
4274 
4275 	ring = &sc->txq[ac];
4276 	desc = &ring->desc[ring->cur];
4277 	memset(desc, 0, sizeof(*desc));
4278 	data = &ring->data[ring->cur];
4279 
4280 	cmd = &ring->cmd[ring->cur];
4281 	cmd->hdr.code = IWM_TX_CMD;
4282 	cmd->hdr.flags = 0;
4283 	cmd->hdr.qid = ring->qid;
4284 	cmd->hdr.idx = ring->cur;
4285 
4286 	tx = (void *)cmd->data;
4287 	memset(tx, 0, sizeof(*tx));
4288 
4289 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4290 
4291 	if (__predict_false(sc->sc_drvbpf != NULL)) {
4292 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4293 
4294 		tap->wt_flags = 0;
4295 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4296 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4297 #ifndef IEEE80211_NO_HT
4298 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4299 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4300 		    type == IEEE80211_FC0_TYPE_DATA &&
4301 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
4302 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4303 		} else
4304 #endif
4305 			tap->wt_rate = rinfo->rate;
4306 		tap->wt_hwqueue = ac;
4307 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4308 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4309 
4310 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4311 	}
4312 
4313 	/* Encrypt the frame if need be. */
4314 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4315 		k = ieee80211_crypto_encap(ic, ni, m);
4316 		if (k == NULL) {
4317 			m_freem(m);
4318 			return ENOBUFS;
4319 		}
4320 		/* Packet header may have moved, reset our local pointer. */
4321 		wh = mtod(m, struct ieee80211_frame *);
4322 	}
4323 	totlen = m->m_pkthdr.len;
4324 
4325 	flags = 0;
4326 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4327 		flags |= IWM_TX_CMD_FLG_ACK;
4328 	}
4329 
4330 	if (type == IEEE80211_FC0_TYPE_DATA &&
4331 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4332 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4333 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
4334 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4335 
4336 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4337 	    type != IEEE80211_FC0_TYPE_DATA)
4338 		tx->sta_id = IWM_AUX_STA_ID;
4339 	else
4340 		tx->sta_id = IWM_STATION_ID;
4341 
4342 	if (type == IEEE80211_FC0_TYPE_MGT) {
4343 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4344 
4345 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4346 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4347 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4348 		else
4349 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4350 	} else {
4351 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4352 	}
4353 
4354 	if (hdrlen & 3) {
4355 		/* First segment length must be a multiple of 4. */
4356 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4357 		pad = 4 - (hdrlen & 3);
4358 	} else
4359 		pad = 0;
4360 
4361 	tx->driver_txop = 0;
4362 	tx->next_frame_len = 0;
4363 
4364 	tx->len = htole16(totlen);
4365 	tx->tid_tspec = tid;
4366 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4367 
4368 	/* Set physical address of "scratch area". */
4369 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4370 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4371 
4372 	/* Copy 802.11 header in TX command. */
4373 	memcpy(tx + 1, wh, hdrlen);
4374 
4375 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4376 
4377 	tx->sec_ctl = 0;
4378 	tx->tx_flags |= htole32(flags);
4379 
4380 	/* Trim 802.11 header. */
4381 	m_adj(m, hdrlen);
4382 
4383 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4384 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4385 	if (err) {
4386 		if (err != EFBIG) {
4387 			aprint_error_dev(sc->sc_dev,
4388 			    "can't map mbuf (error %d)\n", err);
4389 			m_freem(m);
4390 			return err;
4391 		}
4392 		/* Too many DMA segments, linearize mbuf. */
4393 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
4394 		if (m1 == NULL) {
4395 			m_freem(m);
4396 			return ENOBUFS;
4397 		}
4398 		if (m->m_pkthdr.len > MHLEN) {
4399 			MCLGET(m1, M_DONTWAIT);
4400 			if (!(m1->m_flags & M_EXT)) {
4401 				m_freem(m);
4402 				m_freem(m1);
4403 				return ENOBUFS;
4404 			}
4405 		}
4406 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4407 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4408 		m_freem(m);
4409 		m = m1;
4410 
4411 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4412 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4413 		if (err) {
4414 			aprint_error_dev(sc->sc_dev,
4415 			    "can't map mbuf (error %d)\n", err);
4416 			m_freem(m);
4417 			return err;
4418 		}
4419 	}
4420 	data->m = m;
4421 	data->in = in;
4422 	data->done = 0;
4423 
4424 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4425 	KASSERT(data->in != NULL);
4426 
4427 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4428 	    "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4429 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4430 	    (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4431 	    le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4432 	    le32toh(tx->rate_n_flags)));
4433 
4434 	/* Fill TX descriptor. */
4435 	desc->num_tbs = 2 + data->map->dm_nsegs;
4436 
4437 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4438 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4439 	    (TB0_SIZE << 4);
4440 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4441 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4442 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4443 	      + hdrlen + pad - TB0_SIZE) << 4);
4444 
4445 	/* Other DMA segments are for data payload. */
4446 	seg = data->map->dm_segs;
4447 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4448 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4449 		desc->tbs[i+2].hi_n_len =
4450 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4451 		    | ((seg->ds_len) << 4);
4452 	}
4453 
4454 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4455 	    BUS_DMASYNC_PREWRITE);
4456 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4457 	    (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4458 	    BUS_DMASYNC_PREWRITE);
4459 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4460 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4461 	    BUS_DMASYNC_PREWRITE);
4462 
4463 #if 0
4464 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4465 	    le16toh(tx->len));
4466 #endif
4467 
4468 	/* Kick TX ring. */
4469 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4470 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4471 
4472 	/* Mark TX ring as full if we reach a certain threshold. */
4473 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4474 		sc->qfullmsk |= 1 << ring->qid;
4475 	}
4476 
4477 	return 0;
4478 }
4479 
4480 #if 0
4481 /* not necessary? */
4482 static int
4483 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4484 {
4485 	struct iwm_tx_path_flush_cmd flush_cmd = {
4486 		.queues_ctl = htole32(tfd_msk),
4487 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4488 	};
4489 	int err;
4490 
4491 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4492 	    sizeof(flush_cmd), &flush_cmd);
4493 	if (err)
4494 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4495 		    err);
4496 	return err;
4497 }
4498 #endif
4499 
4500 static void
4501 iwm_led_enable(struct iwm_softc *sc)
4502 {
4503 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4504 }
4505 
4506 static void
4507 iwm_led_disable(struct iwm_softc *sc)
4508 {
4509 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4510 }
4511 
4512 static int
4513 iwm_led_is_enabled(struct iwm_softc *sc)
4514 {
4515 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4516 }
4517 
4518 static void
4519 iwm_led_blink_timeout(void *arg)
4520 {
4521 	struct iwm_softc *sc = arg;
4522 
4523 	if (iwm_led_is_enabled(sc))
4524 		iwm_led_disable(sc);
4525 	else
4526 		iwm_led_enable(sc);
4527 
4528 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4529 }
4530 
4531 static void
4532 iwm_led_blink_start(struct iwm_softc *sc)
4533 {
4534 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4535 }
4536 
4537 static void
4538 iwm_led_blink_stop(struct iwm_softc *sc)
4539 {
4540 	callout_stop(&sc->sc_led_blink_to);
4541 	iwm_led_disable(sc);
4542 }
4543 
4544 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4545 
4546 static int
4547 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4548     struct iwm_beacon_filter_cmd *cmd)
4549 {
4550 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4551 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4552 }
4553 
4554 static void
4555 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4556     struct iwm_beacon_filter_cmd *cmd)
4557 {
4558 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4559 }
4560 
4561 static int
4562 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4563 {
4564 	struct iwm_beacon_filter_cmd cmd = {
4565 		IWM_BF_CMD_CONFIG_DEFAULTS,
4566 		.bf_enable_beacon_filter = htole32(1),
4567 		.ba_enable_beacon_abort = htole32(enable),
4568 	};
4569 
4570 	if (!sc->sc_bf.bf_enabled)
4571 		return 0;
4572 
4573 	sc->sc_bf.ba_enabled = enable;
4574 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4575 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4576 }
4577 
4578 static void
4579 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4580     struct iwm_mac_power_cmd *cmd)
4581 {
4582 	struct ieee80211_node *ni = &in->in_ni;
4583 	int dtim_period, dtim_msec, keep_alive;
4584 
4585 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4586 	    in->in_color));
4587 	if (ni->ni_dtim_period)
4588 		dtim_period = ni->ni_dtim_period;
4589 	else
4590 		dtim_period = 1;
4591 
4592 	/*
4593 	 * Regardless of power management state the driver must set
4594 	 * keep alive period. FW will use it for sending keep alive NDPs
4595 	 * immediately after association. Check that keep alive period
4596 	 * is at least 3 * DTIM.
4597 	 */
4598 	dtim_msec = dtim_period * ni->ni_intval;
4599 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4600 	keep_alive = roundup(keep_alive, 1000) / 1000;
4601 	cmd->keep_alive_seconds = htole16(keep_alive);
4602 
4603 #ifdef notyet
4604 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4605 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4606 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4607 #endif
4608 }
4609 
4610 static int
4611 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4612 {
4613 	int err;
4614 	int ba_enable;
4615 	struct iwm_mac_power_cmd cmd;
4616 
4617 	memset(&cmd, 0, sizeof(cmd));
4618 
4619 	iwm_power_build_cmd(sc, in, &cmd);
4620 
4621 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4622 	    sizeof(cmd), &cmd);
4623 	if (err)
4624 		return err;
4625 
4626 	ba_enable = !!(cmd.flags &
4627 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4628 	return iwm_update_beacon_abort(sc, in, ba_enable);
4629 }
4630 
4631 static int
4632 iwm_power_update_device(struct iwm_softc *sc)
4633 {
4634 	struct iwm_device_power_cmd cmd = {
4635 #ifdef notyet
4636 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4637 #else
4638 		.flags = 0,
4639 #endif
4640 	};
4641 
4642 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4643 		return 0;
4644 
4645 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4646 	DPRINTF(("Sending device power command with flags = 0x%X\n",
4647 	    cmd.flags));
4648 
4649 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4650 }
4651 
4652 #ifdef notyet
4653 static int
4654 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4655 {
4656 	struct iwm_beacon_filter_cmd cmd = {
4657 		IWM_BF_CMD_CONFIG_DEFAULTS,
4658 		.bf_enable_beacon_filter = htole32(1),
4659 	};
4660 	int err;
4661 
4662 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4663 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4664 
4665 	if (err == 0)
4666 		sc->sc_bf.bf_enabled = 1;
4667 
4668 	return err;
4669 }
4670 #endif
4671 
4672 static int
4673 iwm_disable_beacon_filter(struct iwm_softc *sc)
4674 {
4675 	struct iwm_beacon_filter_cmd cmd;
4676 	int err;
4677 
4678 	memset(&cmd, 0, sizeof(cmd));
4679 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4680 		return 0;
4681 
4682 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4683 	if (err == 0)
4684 		sc->sc_bf.bf_enabled = 0;
4685 
4686 	return err;
4687 }
4688 
4689 static int
4690 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4691 {
4692 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
4693 	int err;
4694 	uint32_t status;
4695 
4696 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4697 
4698 	add_sta_cmd.sta_id = IWM_STATION_ID;
4699 	add_sta_cmd.mac_id_n_color
4700 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4701 	if (!update) {
4702 		int ac;
4703 		for (ac = 0; ac < WME_NUM_AC; ac++) {
4704 			add_sta_cmd.tfd_queue_msk |=
4705 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4706 		}
4707 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4708 	}
4709 	add_sta_cmd.add_modify = update ? 1 : 0;
4710 	add_sta_cmd.station_flags_msk
4711 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4712 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4713 	if (update)
4714 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4715 
4716 #ifndef IEEE80211_NO_HT
4717 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4718 		add_sta_cmd.station_flags_msk
4719 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4720 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4721 
4722 		add_sta_cmd.station_flags
4723 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4724 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4725 		case IEEE80211_AMPDU_PARAM_SS_2:
4726 			add_sta_cmd.station_flags
4727 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4728 			break;
4729 		case IEEE80211_AMPDU_PARAM_SS_4:
4730 			add_sta_cmd.station_flags
4731 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4732 			break;
4733 		case IEEE80211_AMPDU_PARAM_SS_8:
4734 			add_sta_cmd.station_flags
4735 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4736 			break;
4737 		case IEEE80211_AMPDU_PARAM_SS_16:
4738 			add_sta_cmd.station_flags
4739 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4740 			break;
4741 		default:
4742 			break;
4743 		}
4744 	}
4745 #endif
4746 
4747 	status = IWM_ADD_STA_SUCCESS;
4748 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4749 	    &add_sta_cmd, &status);
4750 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4751 		err = EIO;
4752 
4753 	return err;
4754 }
4755 
4756 static int
4757 iwm_add_aux_sta(struct iwm_softc *sc)
4758 {
4759 	struct iwm_add_sta_cmd_v7 cmd;
4760 	int err;
4761 	uint32_t status;
4762 
4763 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4764 	if (err)
4765 		return err;
4766 
4767 	memset(&cmd, 0, sizeof(cmd));
4768 	cmd.sta_id = IWM_AUX_STA_ID;
4769 	cmd.mac_id_n_color =
4770 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4771 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4772 	cmd.tid_disable_tx = htole16(0xffff);
4773 
4774 	status = IWM_ADD_STA_SUCCESS;
4775 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4776 	    &status);
4777 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4778 		err = EIO;
4779 
4780 	return err;
4781 }
4782 
4783 #define IWM_PLCP_QUIET_THRESH 1
4784 #define IWM_ACTIVE_QUIET_TIME 10
4785 #define LONG_OUT_TIME_PERIOD 600
4786 #define SHORT_OUT_TIME_PERIOD 200
4787 #define SUSPEND_TIME_PERIOD 100
4788 
4789 static uint16_t
4790 iwm_scan_rx_chain(struct iwm_softc *sc)
4791 {
4792 	uint16_t rx_chain;
4793 	uint8_t rx_ant;
4794 
4795 	rx_ant = iwm_fw_valid_rx_ant(sc);
4796 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4797 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4798 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4799 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4800 	return htole16(rx_chain);
4801 }
4802 
4803 static uint32_t
4804 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4805 {
4806 	uint32_t tx_ant;
4807 	int i, ind;
4808 
4809 	for (i = 0, ind = sc->sc_scan_last_antenna;
4810 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4811 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4812 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4813 			sc->sc_scan_last_antenna = ind;
4814 			break;
4815 		}
4816 	}
4817 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4818 
4819 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4820 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4821 				   tx_ant);
4822 	else
4823 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
4824 }
4825 
4826 #ifdef notyet
4827 /*
4828  * If req->n_ssids > 0, it means we should do an active scan.
4829  * In case of active scan w/o directed scan, we receive a zero-length SSID
4830  * just to notify that this scan is active and not passive.
4831  * In order to notify the FW of the number of SSIDs we wish to scan (including
4832  * the zero-length one), we need to set the corresponding bits in chan->type,
4833  * one for each SSID, and set the active bit (first). If the first SSID is
4834  * already included in the probe template, so we need to set only
4835  * req->n_ssids - 1 bits in addition to the first bit.
4836  */
4837 static uint16_t
4838 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4839 {
4840 	if (flags & IEEE80211_CHAN_2GHZ)
4841 		return 30  + 3 * (n_ssids + 1);
4842 	return 20  + 2 * (n_ssids + 1);
4843 }
4844 
4845 static uint16_t
4846 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4847 {
4848 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4849 }
4850 #endif
4851 
4852 static uint8_t
4853 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4854     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4855 {
4856 	struct ieee80211com *ic = &sc->sc_ic;
4857 	struct ieee80211_channel *c;
4858 	uint8_t nchan;
4859 
4860 	for (nchan = 0, c = &ic->ic_channels[1];
4861 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4862 	    nchan < sc->sc_capa_n_scan_channels;
4863 	    c++) {
4864 		if (c->ic_flags == 0)
4865 			continue;
4866 
4867 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4868 		chan->iter_count = htole16(1);
4869 		chan->iter_interval = htole32(0);
4870 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4871 		chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
4872 		if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
4873 			chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4874 		chan++;
4875 		nchan++;
4876 	}
4877 
4878 	return nchan;
4879 }
4880 
4881 static uint8_t
4882 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4883     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4884 {
4885 	struct ieee80211com *ic = &sc->sc_ic;
4886 	struct ieee80211_channel *c;
4887 	uint8_t nchan;
4888 
4889 	for (nchan = 0, c = &ic->ic_channels[1];
4890 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4891 	    nchan < sc->sc_capa_n_scan_channels;
4892 	    c++) {
4893 		if (c->ic_flags == 0)
4894 			continue;
4895 
4896 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4897 		chan->iter_count = 1;
4898 		chan->iter_interval = htole16(0);
4899 		chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
4900 		chan++;
4901 		nchan++;
4902 	}
4903 
4904 	return nchan;
4905 }
4906 
4907 static int
4908 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4909 {
4910 	struct ieee80211com *ic = &sc->sc_ic;
4911 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4912 	struct ieee80211_rateset *rs;
4913 	size_t remain = sizeof(preq->buf);
4914 	uint8_t *frm, *pos;
4915 
4916 	memset(preq, 0, sizeof(*preq));
4917 
4918 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4919 		return ENOBUFS;
4920 
4921 	/*
4922 	 * Build a probe request frame.  Most of the following code is a
4923 	 * copy & paste of what is done in net80211.
4924 	 */
4925 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4926 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4927 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4928 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4929 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4930 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4931 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4932 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4933 
4934 	frm = (uint8_t *)(wh + 1);
4935 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4936 
4937 	/* Tell the firmware where the MAC header is. */
4938 	preq->mac_header.offset = 0;
4939 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4940 	remain -= frm - (uint8_t *)wh;
4941 
4942 	/* Fill in 2GHz IEs and tell firmware where they are. */
4943 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4944 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4945 		if (remain < 4 + rs->rs_nrates)
4946 			return ENOBUFS;
4947 	} else if (remain < 2 + rs->rs_nrates)
4948 		return ENOBUFS;
4949 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4950 	pos = frm;
4951 	frm = ieee80211_add_rates(frm, rs);
4952 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4953 		frm = ieee80211_add_xrates(frm, rs);
4954 	preq->band_data[0].len = htole16(frm - pos);
4955 	remain -= frm - pos;
4956 
4957 	if (isset(sc->sc_enabled_capa,
4958 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4959 		if (remain < 3)
4960 			return ENOBUFS;
4961 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4962 		*frm++ = 1;
4963 		*frm++ = 0;
4964 		remain -= 3;
4965 	}
4966 
4967 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4968 		/* Fill in 5GHz IEs. */
4969 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4970 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4971 			if (remain < 4 + rs->rs_nrates)
4972 				return ENOBUFS;
4973 		} else if (remain < 2 + rs->rs_nrates)
4974 			return ENOBUFS;
4975 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4976 		pos = frm;
4977 		frm = ieee80211_add_rates(frm, rs);
4978 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4979 			frm = ieee80211_add_xrates(frm, rs);
4980 		preq->band_data[1].len = htole16(frm - pos);
4981 		remain -= frm - pos;
4982 	}
4983 
4984 #ifndef IEEE80211_NO_HT
4985 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4986 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4987 	pos = frm;
4988 	if (ic->ic_flags & IEEE80211_F_HTON) {
4989 		if (remain < 28)
4990 			return ENOBUFS;
4991 		frm = ieee80211_add_htcaps(frm, ic);
4992 		/* XXX add WME info? */
4993 	}
4994 #endif
4995 
4996 	preq->common_data.len = htole16(frm - pos);
4997 
4998 	return 0;
4999 }
5000 
5001 static int
5002 iwm_lmac_scan(struct iwm_softc *sc)
5003 {
5004 	struct ieee80211com *ic = &sc->sc_ic;
5005 	struct iwm_host_cmd hcmd = {
5006 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5007 		.len = { 0, },
5008 		.data = { NULL, },
5009 		.flags = 0,
5010 	};
5011 	struct iwm_scan_req_lmac *req;
5012 	size_t req_len;
5013 	int err;
5014 
5015 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5016 
5017 	req_len = sizeof(struct iwm_scan_req_lmac) +
5018 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5019 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5020 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5021 		return ENOMEM;
5022 	req = kmem_zalloc(req_len, KM_SLEEP);
5023 	if (req == NULL)
5024 		return ENOMEM;
5025 
5026 	hcmd.len[0] = (uint16_t)req_len;
5027 	hcmd.data[0] = (void *)req;
5028 
5029 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5030 	req->active_dwell = 10;
5031 	req->passive_dwell = 110;
5032 	req->fragmented_dwell = 44;
5033 	req->extended_dwell = 90;
5034 	req->max_out_time = 0;
5035 	req->suspend_time = 0;
5036 
5037 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5038 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5039 	req->iter_num = htole32(1);
5040 	req->delay = 0;
5041 
5042 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5043 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5044 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5045 	if (ic->ic_des_esslen == 0)
5046 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5047 	else
5048 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5049 	if (isset(sc->sc_enabled_capa,
5050 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5051 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5052 
5053 	req->flags = htole32(IWM_PHY_BAND_24);
5054 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5055 		req->flags |= htole32(IWM_PHY_BAND_5);
5056 	req->filter_flags =
5057 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5058 
5059 	/* Tx flags 2 GHz. */
5060 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5061 	    IWM_TX_CMD_FLG_BT_DIS);
5062 	req->tx_cmd[0].rate_n_flags =
5063 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5064 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5065 
5066 	/* Tx flags 5 GHz. */
5067 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5068 	    IWM_TX_CMD_FLG_BT_DIS);
5069 	req->tx_cmd[1].rate_n_flags =
5070 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5071 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5072 
5073 	/* Check if we're doing an active directed scan. */
5074 	if (ic->ic_des_esslen != 0) {
5075 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5076 		req->direct_scan[0].len = ic->ic_des_esslen;
5077 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5078 		    ic->ic_des_esslen);
5079 	}
5080 
5081 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5082 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5083 	    ic->ic_des_esslen != 0);
5084 
5085 	err = iwm_fill_probe_req(sc,
5086 	    (struct iwm_scan_probe_req *)(req->data +
5087 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5088 	     sc->sc_capa_n_scan_channels)));
5089 	if (err) {
5090 		kmem_free(req, req_len);
5091 		return err;
5092 	}
5093 
5094 	/* Specify the scan plan: We'll do one iteration. */
5095 	req->schedule[0].iterations = 1;
5096 	req->schedule[0].full_scan_mul = 1;
5097 
5098 	/* Disable EBS. */
5099 	req->channel_opt[0].non_ebs_ratio = 1;
5100 	req->channel_opt[1].non_ebs_ratio = 1;
5101 
5102 	err = iwm_send_cmd(sc, &hcmd);
5103 	kmem_free(req, req_len);
5104 	return err;
5105 }
5106 
5107 static int
5108 iwm_config_umac_scan(struct iwm_softc *sc)
5109 {
5110 	struct ieee80211com *ic = &sc->sc_ic;
5111 	struct iwm_scan_config *scan_config;
5112 	int err, nchan;
5113 	size_t cmd_size;
5114 	struct ieee80211_channel *c;
5115 	struct iwm_host_cmd hcmd = {
5116 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5117 		.flags = 0,
5118 	};
5119 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5120 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5121 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5122 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5123 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5124 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5125 	    IWM_SCAN_CONFIG_RATE_54M);
5126 
5127 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5128 
5129 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5130 	if (scan_config == NULL)
5131 		return ENOMEM;
5132 
5133 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5134 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5135 	scan_config->legacy_rates = htole32(rates |
5136 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5137 
5138 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5139 	scan_config->dwell_active = 10;
5140 	scan_config->dwell_passive = 110;
5141 	scan_config->dwell_fragmented = 44;
5142 	scan_config->dwell_extended = 90;
5143 	scan_config->out_of_channel_time = htole32(0);
5144 	scan_config->suspend_time = htole32(0);
5145 
5146 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5147 
5148 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5149 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5150 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5151 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5152 
5153 	for (c = &ic->ic_channels[1], nchan = 0;
5154 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5155 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5156 		if (c->ic_flags == 0)
5157 			continue;
5158 		scan_config->channel_array[nchan++] =
5159 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5160 	}
5161 
5162 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5163 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5164 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5165 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5166 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5167 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5168 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5169 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5170 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5171 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5172 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5173 
5174 	hcmd.data[0] = scan_config;
5175 	hcmd.len[0] = cmd_size;
5176 
5177 	err = iwm_send_cmd(sc, &hcmd);
5178 	kmem_free(scan_config, cmd_size);
5179 	return err;
5180 }
5181 
5182 static int
5183 iwm_umac_scan(struct iwm_softc *sc)
5184 {
5185 	struct ieee80211com *ic = &sc->sc_ic;
5186 	struct iwm_host_cmd hcmd = {
5187 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5188 		.len = { 0, },
5189 		.data = { NULL, },
5190 		.flags = 0,
5191 	};
5192 	struct iwm_scan_req_umac *req;
5193 	struct iwm_scan_req_umac_tail *tail;
5194 	size_t req_len;
5195 	int err;
5196 
5197 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5198 
5199 	req_len = sizeof(struct iwm_scan_req_umac) +
5200 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5201 	    sc->sc_capa_n_scan_channels) +
5202 	    sizeof(struct iwm_scan_req_umac_tail);
5203 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5204 		return ENOMEM;
5205 	req = kmem_zalloc(req_len, KM_SLEEP);
5206 	if (req == NULL)
5207 		return ENOMEM;
5208 
5209 	hcmd.len[0] = (uint16_t)req_len;
5210 	hcmd.data[0] = (void *)req;
5211 
5212 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5213 	req->active_dwell = 10;
5214 	req->passive_dwell = 110;
5215 	req->fragmented_dwell = 44;
5216 	req->extended_dwell = 90;
5217 	req->max_out_time = 0;
5218 	req->suspend_time = 0;
5219 
5220 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5221 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5222 
5223 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5224 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5225 	    ic->ic_des_esslen != 0);
5226 
5227 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5228 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5229 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5230 
5231 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
5232 		sizeof(struct iwm_scan_channel_cfg_umac) *
5233 			sc->sc_capa_n_scan_channels);
5234 
5235 	/* Check if we're doing an active directed scan. */
5236 	if (ic->ic_des_esslen != 0) {
5237 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5238 		tail->direct_scan[0].len = ic->ic_des_esslen;
5239 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5240 		    ic->ic_des_esslen);
5241 		req->general_flags |=
5242 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5243 	} else
5244 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5245 
5246 	if (isset(sc->sc_enabled_capa,
5247 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5248 		req->general_flags |=
5249 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5250 
5251 	err = iwm_fill_probe_req(sc, &tail->preq);
5252 	if (err) {
5253 		kmem_free(req, req_len);
5254 		return err;
5255 	}
5256 
5257 	/* Specify the scan plan: We'll do one iteration. */
5258 	tail->schedule[0].interval = 0;
5259 	tail->schedule[0].iter_count = 1;
5260 
5261 	err = iwm_send_cmd(sc, &hcmd);
5262 	kmem_free(req, req_len);
5263 	return err;
5264 }
5265 
5266 static uint8_t
5267 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5268 {
5269 	int i;
5270 	uint8_t rval;
5271 
5272 	for (i = 0; i < rs->rs_nrates; i++) {
5273 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5274 		if (rval == iwm_rates[ridx].rate)
5275 			return rs->rs_rates[i];
5276 	}
5277 	return 0;
5278 }
5279 
5280 static void
5281 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5282     int *ofdm_rates)
5283 {
5284 	struct ieee80211_node *ni = &in->in_ni;
5285 	struct ieee80211_rateset *rs = &ni->ni_rates;
5286 	int lowest_present_ofdm = -1;
5287 	int lowest_present_cck = -1;
5288 	uint8_t cck = 0;
5289 	uint8_t ofdm = 0;
5290 	int i;
5291 
5292 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5293 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5294 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5295 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5296 				continue;
5297 			cck |= (1 << i);
5298 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5299 				lowest_present_cck = i;
5300 		}
5301 	}
5302 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5303 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5304 			continue;
5305 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5306 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5307 			lowest_present_ofdm = i;
5308 	}
5309 
5310 	/*
5311 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5312 	 * variables. This isn't sufficient though, as there might not
5313 	 * be all the right rates in the bitmap. E.g. if the only basic
5314 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5315 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5316 	 *
5317 	 *    [...] a STA responding to a received frame shall transmit
5318 	 *    its Control Response frame [...] at the highest rate in the
5319 	 *    BSSBasicRateSet parameter that is less than or equal to the
5320 	 *    rate of the immediately previous frame in the frame exchange
5321 	 *    sequence ([...]) and that is of the same modulation class
5322 	 *    ([...]) as the received frame. If no rate contained in the
5323 	 *    BSSBasicRateSet parameter meets these conditions, then the
5324 	 *    control frame sent in response to a received frame shall be
5325 	 *    transmitted at the highest mandatory rate of the PHY that is
5326 	 *    less than or equal to the rate of the received frame, and
5327 	 *    that is of the same modulation class as the received frame.
5328 	 *
5329 	 * As a consequence, we need to add all mandatory rates that are
5330 	 * lower than all of the basic rates to these bitmaps.
5331 	 */
5332 
5333 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5334 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5335 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5336 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5337 	/* 6M already there or needed so always add */
5338 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5339 
5340 	/*
5341 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5342 	 * Note, however:
5343 	 *  - if no CCK rates are basic, it must be ERP since there must
5344 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5345 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5346 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5347 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5348 	 *  - if 2M is basic, 1M is mandatory
5349 	 *  - if 1M is basic, that's the only valid ACK rate.
5350 	 * As a consequence, it's not as complicated as it sounds, just add
5351 	 * any lower rates to the ACK rate bitmap.
5352 	 */
5353 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5354 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5355 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5356 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5357 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5358 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5359 	/* 1M already there or needed so always add */
5360 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5361 
5362 	*cck_rates = cck;
5363 	*ofdm_rates = ofdm;
5364 }
5365 
5366 static void
5367 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5368     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5369 {
5370 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5371 	struct ieee80211com *ic = &sc->sc_ic;
5372 	struct ieee80211_node *ni = ic->ic_bss;
5373 	int cck_ack_rates, ofdm_ack_rates;
5374 	int i;
5375 
5376 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5377 	    in->in_color));
5378 	cmd->action = htole32(action);
5379 
5380 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5381 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5382 
5383 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5384 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5385 
5386 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5387 	cmd->cck_rates = htole32(cck_ack_rates);
5388 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5389 
5390 	cmd->cck_short_preamble
5391 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5392 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5393 	cmd->short_slot
5394 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5395 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5396 
5397 	for (i = 0; i < WME_NUM_AC; i++) {
5398 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5399 		int txf = iwm_ac_to_tx_fifo[i];
5400 
5401 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5402 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5403 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5404 		cmd->ac[txf].fifos_mask = (1 << txf);
5405 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5406 	}
5407 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5408 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5409 
5410 #ifndef IEEE80211_NO_HT
5411 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5412 		enum ieee80211_htprot htprot =
5413 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5414 		switch (htprot) {
5415 		case IEEE80211_HTPROT_NONE:
5416 			break;
5417 		case IEEE80211_HTPROT_NONMEMBER:
5418 		case IEEE80211_HTPROT_NONHT_MIXED:
5419 			cmd->protection_flags |=
5420 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5421 		case IEEE80211_HTPROT_20MHZ:
5422 			cmd->protection_flags |=
5423 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5424 			    IWM_MAC_PROT_FLG_FAT_PROT);
5425 			break;
5426 		default:
5427 			break;
5428 		}
5429 
5430 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5431 	}
5432 #endif
5433 
5434 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5435 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5436 
5437 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5438 #undef IWM_EXP2
5439 }
5440 
5441 static void
5442 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5443     struct iwm_mac_data_sta *sta, int assoc)
5444 {
5445 	struct ieee80211_node *ni = &in->in_ni;
5446 	uint32_t dtim_off;
5447 	uint64_t tsf;
5448 
5449 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5450 	tsf = le64toh(ni->ni_tstamp.tsf);
5451 
5452 	sta->is_assoc = htole32(assoc);
5453 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5454 	sta->dtim_tsf = htole64(tsf + dtim_off);
5455 	sta->bi = htole32(ni->ni_intval);
5456 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5457 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5458 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5459 	sta->listen_interval = htole32(10);
5460 	sta->assoc_id = htole32(ni->ni_associd);
5461 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5462 }
5463 
5464 static int
5465 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5466     int assoc)
5467 {
5468 	struct ieee80211_node *ni = &in->in_ni;
5469 	struct iwm_mac_ctx_cmd cmd;
5470 
5471 	memset(&cmd, 0, sizeof(cmd));
5472 
5473 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5474 
5475 	/* Allow beacons to pass through as long as we are not associated or we
5476 	 * do not have dtim period information */
5477 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5478 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5479 	else
5480 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5481 
5482 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5483 }
5484 
5485 #define IWM_MISSED_BEACONS_THRESHOLD 8
5486 
5487 static void
5488 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5489 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5490 {
5491 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5492 	int s;
5493 
5494 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5495 	    le32toh(mb->mac_id),
5496 	    le32toh(mb->consec_missed_beacons),
5497 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5498 	    le32toh(mb->num_recvd_beacons),
5499 	    le32toh(mb->num_expected_beacons)));
5500 
5501 	/*
5502 	 * TODO: the threshold should be adjusted based on latency conditions,
5503 	 * and/or in case of a CS flow on one of the other AP vifs.
5504 	 */
5505 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5506 	    IWM_MISSED_BEACONS_THRESHOLD) {
5507 		s = splnet();
5508 		ieee80211_beacon_miss(&sc->sc_ic);
5509 		splx(s);
5510 	}
5511 }
5512 
5513 static int
5514 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5515 {
5516 	struct iwm_time_quota_cmd cmd;
5517 	int i, idx, num_active_macs, quota, quota_rem;
5518 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5519 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5520 	uint16_t id;
5521 
5522 	memset(&cmd, 0, sizeof(cmd));
5523 
5524 	/* currently, PHY ID == binding ID */
5525 	if (in) {
5526 		id = in->in_phyctxt->id;
5527 		KASSERT(id < IWM_MAX_BINDINGS);
5528 		colors[id] = in->in_phyctxt->color;
5529 
5530 		if (1)
5531 			n_ifs[id] = 1;
5532 	}
5533 
5534 	/*
5535 	 * The FW's scheduling session consists of
5536 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5537 	 * equally between all the bindings that require quota
5538 	 */
5539 	num_active_macs = 0;
5540 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5541 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5542 		num_active_macs += n_ifs[i];
5543 	}
5544 
5545 	quota = 0;
5546 	quota_rem = 0;
5547 	if (num_active_macs) {
5548 		quota = IWM_MAX_QUOTA / num_active_macs;
5549 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5550 	}
5551 
5552 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5553 		if (colors[i] < 0)
5554 			continue;
5555 
5556 		cmd.quotas[idx].id_and_color =
5557 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5558 
5559 		if (n_ifs[i] <= 0) {
5560 			cmd.quotas[idx].quota = htole32(0);
5561 			cmd.quotas[idx].max_duration = htole32(0);
5562 		} else {
5563 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5564 			cmd.quotas[idx].max_duration = htole32(0);
5565 		}
5566 		idx++;
5567 	}
5568 
5569 	/* Give the remainder of the session to the first binding */
5570 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5571 
5572 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5573 }
5574 
5575 static int
5576 iwm_auth(struct iwm_softc *sc)
5577 {
5578 	struct ieee80211com *ic = &sc->sc_ic;
5579 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5580 	uint32_t duration;
5581 	int err;
5582 
5583 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5584 	if (err)
5585 		return err;
5586 
5587 	err = iwm_allow_mcast(sc);
5588 	if (err)
5589 		return err;
5590 
5591 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5592 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5593 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5594 	if (err)
5595 		return err;
5596 	in->in_phyctxt = &sc->sc_phyctxt[0];
5597 
5598 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5599 	if (err) {
5600 		aprint_error_dev(sc->sc_dev,
5601 		    "could not add MAC context (error %d)\n", err);
5602 		return err;
5603 	}
5604 
5605 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5606 	if (err)
5607 		return err;
5608 
5609 	err = iwm_add_sta_cmd(sc, in, 0);
5610 	if (err)
5611 		return err;
5612 
5613 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5614 	if (err) {
5615 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5616 		return err;
5617 	}
5618 
5619 	/*
5620 	 * Prevent the FW from wandering off channel during association
5621 	 * by "protecting" the session with a time event.
5622 	 */
5623 	if (in->in_ni.ni_intval)
5624 		duration = in->in_ni.ni_intval * 2;
5625 	else
5626 		duration = IEEE80211_DUR_TU;
5627 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5628 	DELAY(100);
5629 
5630 	return 0;
5631 }
5632 
5633 static int
5634 iwm_assoc(struct iwm_softc *sc)
5635 {
5636 	struct ieee80211com *ic = &sc->sc_ic;
5637 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5638 	int err;
5639 
5640 	err = iwm_add_sta_cmd(sc, in, 1);
5641 	if (err)
5642 		return err;
5643 
5644 	return 0;
5645 }
5646 
5647 static struct ieee80211_node *
5648 iwm_node_alloc(struct ieee80211_node_table *nt)
5649 {
5650 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5651 }
5652 
5653 static void
5654 iwm_calib_timeout(void *arg)
5655 {
5656 	struct iwm_softc *sc = arg;
5657 	struct ieee80211com *ic = &sc->sc_ic;
5658 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5659 #ifndef IEEE80211_NO_HT
5660 	struct ieee80211_node *ni = &in->in_ni;
5661 	int otxrate;
5662 #endif
5663 	int s;
5664 
5665 	s = splnet();
5666 	if ((ic->ic_fixed_rate == -1
5667 #ifndef IEEE80211_NO_HT
5668 	    || ic->ic_fixed_mcs == -1
5669 #endif
5670 	    ) &&
5671 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5672 #ifndef IEEE80211_NO_HT
5673 		if (ni->ni_flags & IEEE80211_NODE_HT)
5674 			otxrate = ni->ni_txmcs;
5675 		else
5676 			otxrate = ni->ni_txrate;
5677 #endif
5678 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5679 
5680 #ifndef IEEE80211_NO_HT
5681 		/*
5682 		 * If AMRR has chosen a new TX rate we must update
5683 		 * the firwmare's LQ rate table from process context.
5684 		 */
5685 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5686 		    otxrate != ni->ni_txmcs)
5687 			softint_schedule(sc->setrates_task);
5688 		else if (otxrate != ni->ni_txrate)
5689 			softint_schedule(sc->setrates_task);
5690 #endif
5691 	}
5692 	splx(s);
5693 
5694 	callout_schedule(&sc->sc_calib_to, mstohz(500));
5695 }
5696 
5697 #ifndef IEEE80211_NO_HT
5698 static void
5699 iwm_setrates_task(void *arg)
5700 {
5701 	struct iwm_softc *sc = arg;
5702 	struct ieee80211com *ic = &sc->sc_ic;
5703 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5704 
5705 	/* Update rates table based on new TX rate determined by AMRR. */
5706 	iwm_setrates(in);
5707 }
5708 
5709 static int
5710 iwm_setrates(struct iwm_node *in)
5711 {
5712 	struct ieee80211_node *ni = &in->in_ni;
5713 	struct ieee80211com *ic = ni->ni_ic;
5714 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5715 	struct iwm_lq_cmd *lq = &in->in_lq;
5716 	struct ieee80211_rateset *rs = &ni->ni_rates;
5717 	int i, j, ridx, ridx_min, tab = 0;
5718 #ifndef IEEE80211_NO_HT
5719 	int sgi_ok;
5720 #endif
5721 	struct iwm_host_cmd cmd = {
5722 		.id = IWM_LQ_CMD,
5723 		.len = { sizeof(in->in_lq), },
5724 	};
5725 
5726 	memset(lq, 0, sizeof(*lq));
5727 	lq->sta_id = IWM_STATION_ID;
5728 
5729 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5730 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5731 
5732 #ifndef IEEE80211_NO_HT
5733 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5734 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5735 #endif
5736 
5737 
5738 	/*
5739 	 * Fill the LQ rate selection table with legacy and/or HT rates
5740 	 * in descending order, i.e. with the node's current TX rate first.
5741 	 * In cases where throughput of an HT rate corresponds to a legacy
5742 	 * rate it makes no sense to add both. We rely on the fact that
5743 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
5744 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5745 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5746 	 */
5747 	j = 0;
5748 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5749 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
5750 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5751 		if (j >= __arraycount(lq->rs_table))
5752 			break;
5753 		tab = 0;
5754 #ifndef IEEE80211_NO_HT
5755 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5756 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5757 			for (i = ni->ni_txmcs; i >= 0; i--) {
5758 				if (isclr(ni->ni_rxmcs, i))
5759 					continue;
5760 				if (ridx == iwm_mcs2ridx[i]) {
5761 					tab = iwm_rates[ridx].ht_plcp;
5762 					tab |= IWM_RATE_MCS_HT_MSK;
5763 					if (sgi_ok)
5764 						tab |= IWM_RATE_MCS_SGI_MSK;
5765 					break;
5766 				}
5767 			}
5768 		}
5769 #endif
5770 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5771 			for (i = ni->ni_txrate; i >= 0; i--) {
5772 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5773 				    IEEE80211_RATE_VAL)) {
5774 					tab = iwm_rates[ridx].plcp;
5775 					break;
5776 				}
5777 			}
5778 		}
5779 
5780 		if (tab == 0)
5781 			continue;
5782 
5783 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
5784 		if (IWM_RIDX_IS_CCK(ridx))
5785 			tab |= IWM_RATE_MCS_CCK_MSK;
5786 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
5787 		lq->rs_table[j++] = htole32(tab);
5788 	}
5789 
5790 	/* Fill the rest with the lowest possible rate */
5791 	i = j > 0 ? j - 1 : 0;
5792 	while (j < __arraycount(lq->rs_table))
5793 		lq->rs_table[j++] = lq->rs_table[i];
5794 
5795 	lq->single_stream_ant_msk = IWM_ANT_A;
5796 	lq->dual_stream_ant_msk = IWM_ANT_AB;
5797 
5798 	lq->agg_time_limit = htole16(4000);	/* 4ms */
5799 	lq->agg_disable_start_th = 3;
5800 #ifdef notyet
5801 	lq->agg_frame_cnt_limit = 0x3f;
5802 #else
5803 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5804 #endif
5805 
5806 	cmd.data[0] = &in->in_lq;
5807 	return iwm_send_cmd(sc, &cmd);
5808 }
5809 #endif
5810 
5811 static int
5812 iwm_media_change(struct ifnet *ifp)
5813 {
5814 	struct iwm_softc *sc = ifp->if_softc;
5815 	struct ieee80211com *ic = &sc->sc_ic;
5816 	uint8_t rate, ridx;
5817 	int err;
5818 
5819 	err = ieee80211_media_change(ifp);
5820 	if (err != ENETRESET)
5821 		return err;
5822 
5823 #ifndef IEEE80211_NO_HT
5824 	if (ic->ic_fixed_mcs != -1)
5825 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5826 	else
5827 #endif
5828 	if (ic->ic_fixed_rate != -1) {
5829 		rate = ic->ic_sup_rates[ic->ic_curmode].
5830 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5831 		/* Map 802.11 rate to HW rate index. */
5832 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5833 			if (iwm_rates[ridx].rate == rate)
5834 				break;
5835 		sc->sc_fixed_ridx = ridx;
5836 	}
5837 
5838 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5839 	    (IFF_UP | IFF_RUNNING)) {
5840 		iwm_stop(ifp, 0);
5841 		err = iwm_init(ifp);
5842 	}
5843 	return err;
5844 }
5845 
5846 static int
5847 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5848 {
5849 	struct ifnet *ifp = IC2IFP(ic);
5850 	struct iwm_softc *sc = ifp->if_softc;
5851 	enum ieee80211_state ostate = ic->ic_state;
5852 	struct iwm_node *in;
5853 	int err;
5854 
5855 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5856 	    ieee80211_state_name[nstate]));
5857 
5858 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5859 		iwm_led_blink_stop(sc);
5860 
5861 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
5862 		iwm_disable_beacon_filter(sc);
5863 
5864 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5865 	/* XXX Is there a way to switch states without a full reset? */
5866 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5867 		/*
5868 		 * Upon receiving a deauth frame from AP the net80211 stack
5869 		 * puts the driver into AUTH state. This will fail with this
5870 		 * driver so bring the FSM from RUN to SCAN in this case.
5871 		 */
5872 		if (nstate != IEEE80211_S_INIT) {
5873 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5874 			/* Always pass arg as -1 since we can't Tx right now. */
5875 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5876 			iwm_stop(ifp, 0);
5877 			iwm_init(ifp);
5878 			return 0;
5879 		}
5880 
5881 		iwm_stop_device(sc);
5882 		iwm_init_hw(sc);
5883 	}
5884 
5885 	switch (nstate) {
5886 	case IEEE80211_S_INIT:
5887 		break;
5888 
5889 	case IEEE80211_S_SCAN:
5890 		if (ostate == nstate &&
5891 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5892 			return 0;
5893 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5894 			err = iwm_umac_scan(sc);
5895 		else
5896 			err = iwm_lmac_scan(sc);
5897 		if (err) {
5898 			DPRINTF(("%s: could not initiate scan: %d\n",
5899 			    DEVNAME(sc), err));
5900 			return err;
5901 		}
5902 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
5903 		ic->ic_state = nstate;
5904 		iwm_led_blink_start(sc);
5905 		return 0;
5906 
5907 	case IEEE80211_S_AUTH:
5908 		err = iwm_auth(sc);
5909 		if (err) {
5910 			DPRINTF(("%s: could not move to auth state: %d\n",
5911 			    DEVNAME(sc), err));
5912 			return err;
5913 		}
5914 		break;
5915 
5916 	case IEEE80211_S_ASSOC:
5917 		err = iwm_assoc(sc);
5918 		if (err) {
5919 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5920 			    err));
5921 			return err;
5922 		}
5923 		break;
5924 
5925 	case IEEE80211_S_RUN:
5926 		in = (struct iwm_node *)ic->ic_bss;
5927 
5928 		/* We have now been assigned an associd by the AP. */
5929 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5930 		if (err) {
5931 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5932 			return err;
5933 		}
5934 
5935 		err = iwm_power_update_device(sc);
5936 		if (err) {
5937 			aprint_error_dev(sc->sc_dev,
5938 			    "could send power command (error %d)\n", err);
5939 			return err;
5940 		}
5941 #ifdef notyet
5942 		/*
5943 		 * Disabled for now. Default beacon filter settings
5944 		 * prevent net80211 from getting ERP and HT protection
5945 		 * updates from beacons.
5946 		 */
5947 		err = iwm_enable_beacon_filter(sc, in);
5948 		if (err) {
5949 			aprint_error_dev(sc->sc_dev,
5950 			    "could not enable beacon filter\n");
5951 			return err;
5952 		}
5953 #endif
5954 		err = iwm_power_mac_update_mode(sc, in);
5955 		if (err) {
5956 			aprint_error_dev(sc->sc_dev,
5957 			    "could not update MAC power (error %d)\n", err);
5958 			return err;
5959 		}
5960 
5961 		err = iwm_update_quotas(sc, in);
5962 		if (err) {
5963 			aprint_error_dev(sc->sc_dev,
5964 			    "could not update quotas (error %d)\n", err);
5965 			return err;
5966 		}
5967 
5968 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5969 
5970 		/* Start at lowest available bit-rate, AMRR will raise. */
5971 		in->in_ni.ni_txrate = 0;
5972 #ifndef IEEE80211_NO_HT
5973 		in->in_ni.ni_txmcs = 0;
5974 		iwm_setrates(in);
5975 #endif
5976 
5977 		callout_schedule(&sc->sc_calib_to, mstohz(500));
5978 		iwm_led_enable(sc);
5979 		break;
5980 
5981 	default:
5982 		break;
5983 	}
5984 
5985 	return sc->sc_newstate(ic, nstate, arg);
5986 }
5987 
5988 static void
5989 iwm_newstate_cb(struct work *wk, void *v)
5990 {
5991 	struct iwm_softc *sc = v;
5992 	struct ieee80211com *ic = &sc->sc_ic;
5993 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5994 	enum ieee80211_state nstate = iwmns->ns_nstate;
5995 	int generation = iwmns->ns_generation;
5996 	int arg = iwmns->ns_arg;
5997 	int s;
5998 
5999 	kmem_free(iwmns, sizeof(*iwmns));
6000 
6001 	s = splnet();
6002 
6003 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6004 	if (sc->sc_generation != generation) {
6005 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6006 		if (nstate == IEEE80211_S_INIT) {
6007 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6008 			    "calling sc_newstate()\n"));
6009 			(void) sc->sc_newstate(ic, nstate, arg);
6010 		}
6011 	} else
6012 		(void) iwm_do_newstate(ic, nstate, arg);
6013 
6014 	splx(s);
6015 }
6016 
6017 static int
6018 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6019 {
6020 	struct iwm_newstate_state *iwmns;
6021 	struct ifnet *ifp = IC2IFP(ic);
6022 	struct iwm_softc *sc = ifp->if_softc;
6023 
6024 	callout_stop(&sc->sc_calib_to);
6025 
6026 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6027 	if (!iwmns) {
6028 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6029 		return ENOMEM;
6030 	}
6031 
6032 	iwmns->ns_nstate = nstate;
6033 	iwmns->ns_arg = arg;
6034 	iwmns->ns_generation = sc->sc_generation;
6035 
6036 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6037 
6038 	return 0;
6039 }
6040 
6041 static void
6042 iwm_endscan(struct iwm_softc *sc)
6043 {
6044 	struct ieee80211com *ic = &sc->sc_ic;
6045 	int s;
6046 
6047 	DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6048 
6049 	s = splnet();
6050 	if (ic->ic_state == IEEE80211_S_SCAN)
6051 		ieee80211_end_scan(ic);
6052 	splx(s);
6053 }
6054 
6055 /*
6056  * Aging and idle timeouts for the different possible scenarios
6057  * in default configuration
6058  */
6059 static const uint32_t
6060 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6061 	{
6062 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6063 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6064 	},
6065 	{
6066 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6067 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6068 	},
6069 	{
6070 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6071 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6072 	},
6073 	{
6074 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
6075 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6076 	},
6077 	{
6078 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6079 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6080 	},
6081 };
6082 
6083 /*
6084  * Aging and idle timeouts for the different possible scenarios
6085  * in single BSS MAC configuration.
6086  */
6087 static const uint32_t
6088 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6089 	{
6090 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6091 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6092 	},
6093 	{
6094 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6095 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6096 	},
6097 	{
6098 		htole32(IWM_SF_MCAST_AGING_TIMER),
6099 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6100 	},
6101 	{
6102 		htole32(IWM_SF_BA_AGING_TIMER),
6103 		htole32(IWM_SF_BA_IDLE_TIMER)
6104 	},
6105 	{
6106 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6107 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6108 	},
6109 };
6110 
6111 static void
6112 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6113     struct ieee80211_node *ni)
6114 {
6115 	int i, j, watermark;
6116 
6117 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6118 
6119 	/*
6120 	 * If we are in association flow - check antenna configuration
6121 	 * capabilities of the AP station, and choose the watermark accordingly.
6122 	 */
6123 	if (ni) {
6124 #ifndef IEEE80211_NO_HT
6125 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6126 #ifdef notyet
6127 			if (ni->ni_rxmcs[2] != 0)
6128 				watermark = IWM_SF_W_MARK_MIMO3;
6129 			else if (ni->ni_rxmcs[1] != 0)
6130 				watermark = IWM_SF_W_MARK_MIMO2;
6131 			else
6132 #endif
6133 				watermark = IWM_SF_W_MARK_SISO;
6134 		} else
6135 #endif
6136 			watermark = IWM_SF_W_MARK_LEGACY;
6137 	/* default watermark value for unassociated mode. */
6138 	} else {
6139 		watermark = IWM_SF_W_MARK_MIMO2;
6140 	}
6141 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6142 
6143 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6144 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6145 			sf_cmd->long_delay_timeouts[i][j] =
6146 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6147 		}
6148 	}
6149 
6150 	if (ni) {
6151 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6152 		       sizeof(iwm_sf_full_timeout));
6153 	} else {
6154 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6155 		       sizeof(iwm_sf_full_timeout_def));
6156 	}
6157 }
6158 
6159 static int
6160 iwm_sf_config(struct iwm_softc *sc, int new_state)
6161 {
6162 	struct ieee80211com *ic = &sc->sc_ic;
6163 	struct iwm_sf_cfg_cmd sf_cmd = {
6164 		.state = htole32(IWM_SF_FULL_ON),
6165 	};
6166 
6167 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6168 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6169 
6170 	switch (new_state) {
6171 	case IWM_SF_UNINIT:
6172 	case IWM_SF_INIT_OFF:
6173 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6174 		break;
6175 	case IWM_SF_FULL_ON:
6176 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6177 		break;
6178 	default:
6179 		return EINVAL;
6180 	}
6181 
6182 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6183 	    sizeof(sf_cmd), &sf_cmd);
6184 }
6185 
6186 static int
6187 iwm_send_bt_init_conf(struct iwm_softc *sc)
6188 {
6189 	struct iwm_bt_coex_cmd bt_cmd;
6190 
6191 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6192 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6193 
6194 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6195 }
6196 
6197 static bool
6198 iwm_is_lar_supported(struct iwm_softc *sc)
6199 {
6200 	bool nvm_lar = sc->sc_nvm.lar_enabled;
6201 	bool tlv_lar = isset(sc->sc_enabled_capa,
6202 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6203 
6204 	if (iwm_lar_disable)
6205 		return false;
6206 
6207 	/*
6208 	 * Enable LAR only if it is supported by the FW (TLV) &&
6209 	 * enabled in the NVM
6210 	 */
6211 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6212 		return nvm_lar && tlv_lar;
6213 	else
6214 		return tlv_lar;
6215 }
6216 
6217 static int
6218 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6219 {
6220 	struct iwm_mcc_update_cmd mcc_cmd;
6221 	struct iwm_host_cmd hcmd = {
6222 		.id = IWM_MCC_UPDATE_CMD,
6223 		.flags = IWM_CMD_WANT_SKB,
6224 		.data = { &mcc_cmd },
6225 	};
6226 	int err;
6227 	int resp_v2 = isset(sc->sc_enabled_capa,
6228 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6229 
6230 	if (!iwm_is_lar_supported(sc)) {
6231 		DPRINTF(("%s: no LAR support\n", __func__));
6232 		return 0;
6233 	}
6234 
6235 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6236 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6237 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6238 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6239 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6240 	else
6241 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6242 
6243 	if (resp_v2)
6244 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6245 	else
6246 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6247 
6248 	err = iwm_send_cmd(sc, &hcmd);
6249 	if (err)
6250 		return err;
6251 
6252 	iwm_free_resp(sc, &hcmd);
6253 
6254 	return 0;
6255 }
6256 
6257 static void
6258 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6259 {
6260 	struct iwm_host_cmd cmd = {
6261 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6262 		.len = { sizeof(uint32_t), },
6263 		.data = { &backoff, },
6264 	};
6265 
6266 	iwm_send_cmd(sc, &cmd);
6267 }
6268 
6269 static int
6270 iwm_init_hw(struct iwm_softc *sc)
6271 {
6272 	struct ieee80211com *ic = &sc->sc_ic;
6273 	int err, i, ac;
6274 
6275 	err = iwm_preinit(sc);
6276 	if (err)
6277 		return err;
6278 
6279 	err = iwm_start_hw(sc);
6280 	if (err) {
6281 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6282 		return err;
6283 	}
6284 
6285 	err = iwm_run_init_mvm_ucode(sc, 0);
6286 	if (err)
6287 		return err;
6288 
6289 	/* Should stop and start HW since INIT image just loaded. */
6290 	iwm_stop_device(sc);
6291 	err = iwm_start_hw(sc);
6292 	if (err) {
6293 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6294 		return err;
6295 	}
6296 
6297 	/* Restart, this time with the regular firmware */
6298 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6299 	if (err) {
6300 		aprint_error_dev(sc->sc_dev,
6301 		    "could not load firmware (error %d)\n", err);
6302 		goto err;
6303 	}
6304 
6305 	err = iwm_send_bt_init_conf(sc);
6306 	if (err) {
6307 		aprint_error_dev(sc->sc_dev,
6308 		    "could not init bt coex (error %d)\n", err);
6309 		goto err;
6310 	}
6311 
6312 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6313 	if (err) {
6314 		aprint_error_dev(sc->sc_dev,
6315 		    "could not init tx ant config (error %d)\n", err);
6316 		goto err;
6317 	}
6318 
6319 	/* Send phy db control command and then phy db calibration*/
6320 	err = iwm_send_phy_db_data(sc);
6321 	if (err) {
6322 		aprint_error_dev(sc->sc_dev,
6323 		    "could not init phy db (error %d)\n", err);
6324 		goto err;
6325 	}
6326 
6327 	err = iwm_send_phy_cfg_cmd(sc);
6328 	if (err) {
6329 		aprint_error_dev(sc->sc_dev,
6330 		    "could not send phy config (error %d)\n", err);
6331 		goto err;
6332 	}
6333 
6334 	/* Add auxiliary station for scanning */
6335 	err = iwm_add_aux_sta(sc);
6336 	if (err) {
6337 		aprint_error_dev(sc->sc_dev,
6338 		    "could not add aux station (error %d)\n", err);
6339 		goto err;
6340 	}
6341 
6342 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6343 		/*
6344 		 * The channel used here isn't relevant as it's
6345 		 * going to be overwritten in the other flows.
6346 		 * For now use the first channel we have.
6347 		 */
6348 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6349 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6350 		    IWM_FW_CTXT_ACTION_ADD, 0);
6351 		if (err) {
6352 			aprint_error_dev(sc->sc_dev,
6353 			    "could not add phy context %d (error %d)\n",
6354 			    i, err);
6355 			goto err;
6356 		}
6357 	}
6358 
6359 	/* Initialize tx backoffs to the minimum. */
6360 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6361 		iwm_tt_tx_backoff(sc, 0);
6362 
6363 	err = iwm_power_update_device(sc);
6364 	if (err) {
6365 		aprint_error_dev(sc->sc_dev,
6366 		    "could send power command (error %d)\n", err);
6367 		goto err;
6368 	}
6369 
6370 	err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6371 	if (err) {
6372 		aprint_error_dev(sc->sc_dev,
6373 		    "could not init LAR (error %d)\n", err);
6374 		goto err;
6375 	}
6376 
6377 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6378 		err = iwm_config_umac_scan(sc);
6379 		if (err) {
6380 			aprint_error_dev(sc->sc_dev,
6381 			    "could not configure scan (error %d)\n", err);
6382 			goto err;
6383 		}
6384 	}
6385 
6386 	for (ac = 0; ac < WME_NUM_AC; ac++) {
6387 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6388 		    iwm_ac_to_tx_fifo[ac]);
6389 		if (err) {
6390 			aprint_error_dev(sc->sc_dev,
6391 			    "could not enable Tx queue %d (error %d)\n",
6392 			    i, err);
6393 			goto err;
6394 		}
6395 	}
6396 
6397 	err = iwm_disable_beacon_filter(sc);
6398 	if (err) {
6399 		aprint_error_dev(sc->sc_dev,
6400 		    "could not disable beacon filter (error %d)\n", err);
6401 		goto err;
6402 	}
6403 
6404 	return 0;
6405 
6406  err:
6407 	iwm_stop_device(sc);
6408 	return err;
6409 }
6410 
6411 /* Allow multicast from our BSSID. */
6412 static int
6413 iwm_allow_mcast(struct iwm_softc *sc)
6414 {
6415 	struct ieee80211com *ic = &sc->sc_ic;
6416 	struct ieee80211_node *ni = ic->ic_bss;
6417 	struct iwm_mcast_filter_cmd *cmd;
6418 	size_t size;
6419 	int err;
6420 
6421 	size = roundup(sizeof(*cmd), 4);
6422 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6423 	if (cmd == NULL)
6424 		return ENOMEM;
6425 	cmd->filter_own = 1;
6426 	cmd->port_id = 0;
6427 	cmd->count = 0;
6428 	cmd->pass_all = 1;
6429 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6430 
6431 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6432 	kmem_intr_free(cmd, size);
6433 	return err;
6434 }
6435 
6436 static int
6437 iwm_init(struct ifnet *ifp)
6438 {
6439 	struct iwm_softc *sc = ifp->if_softc;
6440 	int err;
6441 
6442 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6443 		return 0;
6444 
6445 	sc->sc_generation++;
6446 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
6447 
6448 	err = iwm_init_hw(sc);
6449 	if (err) {
6450 		iwm_stop(ifp, 1);
6451 		return err;
6452 	}
6453 
6454 	ifp->if_flags &= ~IFF_OACTIVE;
6455 	ifp->if_flags |= IFF_RUNNING;
6456 
6457 	ieee80211_begin_scan(&sc->sc_ic, 0);
6458 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6459 
6460 	return 0;
6461 }
6462 
6463 static void
6464 iwm_start(struct ifnet *ifp)
6465 {
6466 	struct iwm_softc *sc = ifp->if_softc;
6467 	struct ieee80211com *ic = &sc->sc_ic;
6468 	struct ieee80211_node *ni;
6469 	struct ether_header *eh;
6470 	struct mbuf *m;
6471 	int ac;
6472 
6473 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6474 		return;
6475 
6476 	for (;;) {
6477 		/* why isn't this done per-queue? */
6478 		if (sc->qfullmsk != 0) {
6479 			ifp->if_flags |= IFF_OACTIVE;
6480 			break;
6481 		}
6482 
6483 		/* need to send management frames even if we're not RUNning */
6484 		IF_DEQUEUE(&ic->ic_mgtq, m);
6485 		if (m) {
6486 			ni = M_GETCTX(m, struct ieee80211_node *);
6487 			M_CLEARCTX(m);
6488 			ac = WME_AC_BE;
6489 			goto sendit;
6490 		}
6491 		if (ic->ic_state != IEEE80211_S_RUN) {
6492 			break;
6493 		}
6494 
6495 		IFQ_DEQUEUE(&ifp->if_snd, m);
6496 		if (m == NULL)
6497 			break;
6498 
6499 		if (m->m_len < sizeof (*eh) &&
6500 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
6501 			ifp->if_oerrors++;
6502 			continue;
6503 		}
6504 
6505 		eh = mtod(m, struct ether_header *);
6506 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6507 		if (ni == NULL) {
6508 			m_freem(m);
6509 			ifp->if_oerrors++;
6510 			continue;
6511 		}
6512 
6513 		/* classify mbuf so we can find which tx ring to use */
6514 		if (ieee80211_classify(ic, m, ni) != 0) {
6515 			m_freem(m);
6516 			ieee80211_free_node(ni);
6517 			ifp->if_oerrors++;
6518 			continue;
6519 		}
6520 
6521 		/* No QoS encapsulation for EAPOL frames. */
6522 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6523 		    M_WME_GETAC(m) : WME_AC_BE;
6524 
6525 		bpf_mtap(ifp, m);
6526 
6527 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6528 			ieee80211_free_node(ni);
6529 			ifp->if_oerrors++;
6530 			continue;
6531 		}
6532 
6533  sendit:
6534 		bpf_mtap3(ic->ic_rawbpf, m);
6535 
6536 		if (iwm_tx(sc, m, ni, ac) != 0) {
6537 			ieee80211_free_node(ni);
6538 			ifp->if_oerrors++;
6539 			continue;
6540 		}
6541 
6542 		if (ifp->if_flags & IFF_UP) {
6543 			sc->sc_tx_timer = 15;
6544 			ifp->if_timer = 1;
6545 		}
6546 	}
6547 }
6548 
6549 static void
6550 iwm_stop(struct ifnet *ifp, int disable)
6551 {
6552 	struct iwm_softc *sc = ifp->if_softc;
6553 	struct ieee80211com *ic = &sc->sc_ic;
6554 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6555 
6556 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6557 	sc->sc_flags |= IWM_FLAG_STOPPED;
6558 	sc->sc_generation++;
6559 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6560 
6561 	if (in)
6562 		in->in_phyctxt = NULL;
6563 
6564 	if (ic->ic_state != IEEE80211_S_INIT)
6565 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6566 
6567 	callout_stop(&sc->sc_calib_to);
6568 	iwm_led_blink_stop(sc);
6569 	ifp->if_timer = sc->sc_tx_timer = 0;
6570 	iwm_stop_device(sc);
6571 }
6572 
6573 static void
6574 iwm_watchdog(struct ifnet *ifp)
6575 {
6576 	struct iwm_softc *sc = ifp->if_softc;
6577 
6578 	ifp->if_timer = 0;
6579 	if (sc->sc_tx_timer > 0) {
6580 		if (--sc->sc_tx_timer == 0) {
6581 			aprint_error_dev(sc->sc_dev, "device timeout\n");
6582 #ifdef IWM_DEBUG
6583 			iwm_nic_error(sc);
6584 #endif
6585 			ifp->if_flags &= ~IFF_UP;
6586 			iwm_stop(ifp, 1);
6587 			ifp->if_oerrors++;
6588 			return;
6589 		}
6590 		ifp->if_timer = 1;
6591 	}
6592 
6593 	ieee80211_watchdog(&sc->sc_ic);
6594 }
6595 
6596 static int
6597 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6598 {
6599 	struct iwm_softc *sc = ifp->if_softc;
6600 	struct ieee80211com *ic = &sc->sc_ic;
6601 	const struct sockaddr *sa;
6602 	int s, err = 0;
6603 
6604 	s = splnet();
6605 
6606 	switch (cmd) {
6607 	case SIOCSIFADDR:
6608 		ifp->if_flags |= IFF_UP;
6609 		/* FALLTHROUGH */
6610 	case SIOCSIFFLAGS:
6611 		err = ifioctl_common(ifp, cmd, data);
6612 		if (err)
6613 			break;
6614 		if (ifp->if_flags & IFF_UP) {
6615 			if (!(ifp->if_flags & IFF_RUNNING)) {
6616 				err = iwm_init(ifp);
6617 				if (err)
6618 					ifp->if_flags &= ~IFF_UP;
6619 			}
6620 		} else {
6621 			if (ifp->if_flags & IFF_RUNNING)
6622 				iwm_stop(ifp, 1);
6623 		}
6624 		break;
6625 
6626 	case SIOCADDMULTI:
6627 	case SIOCDELMULTI:
6628 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6629 			err = ENXIO;
6630 			break;
6631 		}
6632 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6633 		err = (cmd == SIOCADDMULTI) ?
6634 		    ether_addmulti(sa, &sc->sc_ec) :
6635 		    ether_delmulti(sa, &sc->sc_ec);
6636 		if (err == ENETRESET)
6637 			err = 0;
6638 		break;
6639 
6640 	default:
6641 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6642 			err = ether_ioctl(ifp, cmd, data);
6643 			break;
6644 		}
6645 		err = ieee80211_ioctl(ic, cmd, data);
6646 		break;
6647 	}
6648 
6649 	if (err == ENETRESET) {
6650 		err = 0;
6651 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6652 		    (IFF_UP | IFF_RUNNING)) {
6653 			iwm_stop(ifp, 0);
6654 			err = iwm_init(ifp);
6655 		}
6656 	}
6657 
6658 	splx(s);
6659 	return err;
6660 }
6661 
6662 /*
6663  * Note: This structure is read from the device with IO accesses,
6664  * and the reading already does the endian conversion. As it is
6665  * read with uint32_t-sized accesses, any members with a different size
6666  * need to be ordered correctly though!
6667  */
6668 struct iwm_error_event_table {
6669 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6670 	uint32_t error_id;		/* type of error */
6671 	uint32_t trm_hw_status0;	/* TRM HW status */
6672 	uint32_t trm_hw_status1;	/* TRM HW status */
6673 	uint32_t blink2;		/* branch link */
6674 	uint32_t ilink1;		/* interrupt link */
6675 	uint32_t ilink2;		/* interrupt link */
6676 	uint32_t data1;		/* error-specific data */
6677 	uint32_t data2;		/* error-specific data */
6678 	uint32_t data3;		/* error-specific data */
6679 	uint32_t bcon_time;		/* beacon timer */
6680 	uint32_t tsf_low;		/* network timestamp function timer */
6681 	uint32_t tsf_hi;		/* network timestamp function timer */
6682 	uint32_t gp1;		/* GP1 timer register */
6683 	uint32_t gp2;		/* GP2 timer register */
6684 	uint32_t fw_rev_type;	/* firmware revision type */
6685 	uint32_t major;		/* uCode version major */
6686 	uint32_t minor;		/* uCode version minor */
6687 	uint32_t hw_ver;		/* HW Silicon version */
6688 	uint32_t brd_ver;		/* HW board version */
6689 	uint32_t log_pc;		/* log program counter */
6690 	uint32_t frame_ptr;		/* frame pointer */
6691 	uint32_t stack_ptr;		/* stack pointer */
6692 	uint32_t hcmd;		/* last host command header */
6693 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6694 				 * rxtx_flag */
6695 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6696 				 * host_flag */
6697 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6698 				 * enc_flag */
6699 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6700 				 * time_flag */
6701 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6702 				 * wico interrupt */
6703 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6704 	uint32_t wait_event;		/* wait event() caller address */
6705 	uint32_t l2p_control;	/* L2pControlField */
6706 	uint32_t l2p_duration;	/* L2pDurationField */
6707 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6708 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6709 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6710 				 * (LMPM_PMG_SEL) */
6711 	uint32_t u_timestamp;	/* indicate when the date and time of the
6712 				 * compilation */
6713 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6714 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6715 
6716 /*
6717  * UMAC error struct - relevant starting from family 8000 chip.
6718  * Note: This structure is read from the device with IO accesses,
6719  * and the reading already does the endian conversion. As it is
6720  * read with u32-sized accesses, any members with a different size
6721  * need to be ordered correctly though!
6722  */
6723 struct iwm_umac_error_event_table {
6724 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6725 	uint32_t error_id;	/* type of error */
6726 	uint32_t blink1;	/* branch link */
6727 	uint32_t blink2;	/* branch link */
6728 	uint32_t ilink1;	/* interrupt link */
6729 	uint32_t ilink2;	/* interrupt link */
6730 	uint32_t data1;		/* error-specific data */
6731 	uint32_t data2;		/* error-specific data */
6732 	uint32_t data3;		/* error-specific data */
6733 	uint32_t umac_major;
6734 	uint32_t umac_minor;
6735 	uint32_t frame_pointer;	/* core register 27 */
6736 	uint32_t stack_pointer;	/* core register 28 */
6737 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6738 	uint32_t nic_isr_pref;	/* ISR status register */
6739 } __packed;
6740 
6741 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6742 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6743 
6744 #ifdef IWM_DEBUG
6745 static const struct {
6746 	const char *name;
6747 	uint8_t num;
6748 } advanced_lookup[] = {
6749 	{ "NMI_INTERRUPT_WDG", 0x34 },
6750 	{ "SYSASSERT", 0x35 },
6751 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6752 	{ "BAD_COMMAND", 0x38 },
6753 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6754 	{ "FATAL_ERROR", 0x3D },
6755 	{ "NMI_TRM_HW_ERR", 0x46 },
6756 	{ "NMI_INTERRUPT_TRM", 0x4C },
6757 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6758 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6759 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6760 	{ "NMI_INTERRUPT_HOST", 0x66 },
6761 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6762 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
6763 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6764 	{ "ADVANCED_SYSASSERT", 0 },
6765 };
6766 
6767 static const char *
6768 iwm_desc_lookup(uint32_t num)
6769 {
6770 	int i;
6771 
6772 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6773 		if (advanced_lookup[i].num == num)
6774 			return advanced_lookup[i].name;
6775 
6776 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6777 	return advanced_lookup[i].name;
6778 }
6779 
6780 /*
6781  * Support for dumping the error log seemed like a good idea ...
6782  * but it's mostly hex junk and the only sensible thing is the
6783  * hw/ucode revision (which we know anyway).  Since it's here,
6784  * I'll just leave it in, just in case e.g. the Intel guys want to
6785  * help us decipher some "ADVANCED_SYSASSERT" later.
6786  */
6787 static void
6788 iwm_nic_error(struct iwm_softc *sc)
6789 {
6790 	struct iwm_error_event_table t;
6791 	uint32_t base;
6792 
6793 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6794 	base = sc->sc_uc.uc_error_event_table;
6795 	if (base < 0x800000) {
6796 		aprint_error_dev(sc->sc_dev,
6797 		    "Invalid error log pointer 0x%08x\n", base);
6798 		return;
6799 	}
6800 
6801 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6802 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6803 		return;
6804 	}
6805 
6806 	if (!t.valid) {
6807 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6808 		return;
6809 	}
6810 
6811 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6812 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6813 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6814 		    sc->sc_flags, t.valid);
6815 	}
6816 
6817 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6818 	    iwm_desc_lookup(t.error_id));
6819 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6820 	    t.trm_hw_status0);
6821 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6822 	    t.trm_hw_status1);
6823 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6824 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6825 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6826 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6827 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6828 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6829 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6830 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6831 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6832 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6833 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6834 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6835 	    t.fw_rev_type);
6836 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6837 	    t.major);
6838 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6839 	    t.minor);
6840 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6841 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6842 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6843 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6844 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6845 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6846 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6847 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6848 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6849 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6850 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6851 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6852 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6853 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6854 	    t.l2p_addr_match);
6855 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6856 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6857 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6858 
6859 	if (sc->sc_uc.uc_umac_error_event_table)
6860 		iwm_nic_umac_error(sc);
6861 }
6862 
6863 static void
6864 iwm_nic_umac_error(struct iwm_softc *sc)
6865 {
6866 	struct iwm_umac_error_event_table t;
6867 	uint32_t base;
6868 
6869 	base = sc->sc_uc.uc_umac_error_event_table;
6870 
6871 	if (base < 0x800000) {
6872 		aprint_error_dev(sc->sc_dev,
6873 		    "Invalid error log pointer 0x%08x\n", base);
6874 		return;
6875 	}
6876 
6877 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6878 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6879 		return;
6880 	}
6881 
6882 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6883 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6884 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6885 		    sc->sc_flags, t.valid);
6886 	}
6887 
6888 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6889 		iwm_desc_lookup(t.error_id));
6890 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6891 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6892 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6893 	    t.ilink1);
6894 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6895 	    t.ilink2);
6896 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6897 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6898 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6899 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6900 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6901 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6902 	    t.frame_pointer);
6903 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6904 	    t.stack_pointer);
6905 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6906 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6907 	    t.nic_isr_pref);
6908 }
6909 #endif
6910 
6911 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
6912 do {									\
6913 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6914 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
6915 	_var_ = (void *)((_pkt_)+1);					\
6916 } while (/*CONSTCOND*/0)
6917 
6918 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6919 do {									\
6920 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6921 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6922 	_ptr_ = (void *)((_pkt_)+1);					\
6923 } while (/*CONSTCOND*/0)
6924 
6925 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6926 
6927 static void
6928 iwm_notif_intr(struct iwm_softc *sc)
6929 {
6930 	uint16_t hw;
6931 
6932 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6933 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6934 
6935 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6936 	while (sc->rxq.cur != hw) {
6937 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6938 		struct iwm_rx_packet *pkt;
6939 		struct iwm_cmd_response *cresp;
6940 		int orig_qid, qid, idx, code;
6941 
6942 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6943 		    BUS_DMASYNC_POSTREAD);
6944 		pkt = mtod(data->m, struct iwm_rx_packet *);
6945 
6946 		orig_qid = pkt->hdr.qid;
6947 		qid = orig_qid & ~0x80;
6948 		idx = pkt->hdr.idx;
6949 
6950 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6951 
6952 		/*
6953 		 * randomly get these from the firmware, no idea why.
6954 		 * they at least seem harmless, so just ignore them for now
6955 		 */
6956 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6957 		    || pkt->len_n_flags == htole32(0x55550000))) {
6958 			ADVANCE_RXQ(sc);
6959 			continue;
6960 		}
6961 
6962 		switch (code) {
6963 		case IWM_REPLY_RX_PHY_CMD:
6964 			iwm_rx_rx_phy_cmd(sc, pkt, data);
6965 			break;
6966 
6967 		case IWM_REPLY_RX_MPDU_CMD:
6968 			iwm_rx_rx_mpdu(sc, pkt, data);
6969 			break;
6970 
6971 		case IWM_TX_CMD:
6972 			iwm_rx_tx_cmd(sc, pkt, data);
6973 			break;
6974 
6975 		case IWM_MISSED_BEACONS_NOTIFICATION:
6976 			iwm_rx_missed_beacons_notif(sc, pkt, data);
6977 			break;
6978 
6979 		case IWM_MFUART_LOAD_NOTIFICATION:
6980 			break;
6981 
6982 		case IWM_ALIVE: {
6983 			struct iwm_alive_resp_v1 *resp1;
6984 			struct iwm_alive_resp_v2 *resp2;
6985 			struct iwm_alive_resp_v3 *resp3;
6986 
6987 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6988 				SYNC_RESP_STRUCT(resp1, pkt);
6989 				sc->sc_uc.uc_error_event_table
6990 				    = le32toh(resp1->error_event_table_ptr);
6991 				sc->sc_uc.uc_log_event_table
6992 				    = le32toh(resp1->log_event_table_ptr);
6993 				sc->sched_base = le32toh(resp1->scd_base_ptr);
6994 				if (resp1->status == IWM_ALIVE_STATUS_OK)
6995 					sc->sc_uc.uc_ok = 1;
6996 				else
6997 					sc->sc_uc.uc_ok = 0;
6998 			}
6999 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7000 				SYNC_RESP_STRUCT(resp2, pkt);
7001 				sc->sc_uc.uc_error_event_table
7002 				    = le32toh(resp2->error_event_table_ptr);
7003 				sc->sc_uc.uc_log_event_table
7004 				    = le32toh(resp2->log_event_table_ptr);
7005 				sc->sched_base = le32toh(resp2->scd_base_ptr);
7006 				sc->sc_uc.uc_umac_error_event_table
7007 				    = le32toh(resp2->error_info_addr);
7008 				if (resp2->status == IWM_ALIVE_STATUS_OK)
7009 					sc->sc_uc.uc_ok = 1;
7010 				else
7011 					sc->sc_uc.uc_ok = 0;
7012 			}
7013 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7014 				SYNC_RESP_STRUCT(resp3, pkt);
7015 				sc->sc_uc.uc_error_event_table
7016 				    = le32toh(resp3->error_event_table_ptr);
7017 				sc->sc_uc.uc_log_event_table
7018 				    = le32toh(resp3->log_event_table_ptr);
7019 				sc->sched_base = le32toh(resp3->scd_base_ptr);
7020 				sc->sc_uc.uc_umac_error_event_table
7021 				    = le32toh(resp3->error_info_addr);
7022 				if (resp3->status == IWM_ALIVE_STATUS_OK)
7023 					sc->sc_uc.uc_ok = 1;
7024 				else
7025 					sc->sc_uc.uc_ok = 0;
7026 			}
7027 
7028 			sc->sc_uc.uc_intr = 1;
7029 			wakeup(&sc->sc_uc);
7030 			break;
7031 		}
7032 
7033 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
7034 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
7035 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
7036 			uint16_t size = le16toh(phy_db_notif->length);
7037 			bus_dmamap_sync(sc->sc_dmat, data->map,
7038 			    sizeof(*pkt) + sizeof(*phy_db_notif),
7039 			    size, BUS_DMASYNC_POSTREAD);
7040 			iwm_phy_db_set_section(sc, phy_db_notif, size);
7041 			break;
7042 		}
7043 
7044 		case IWM_STATISTICS_NOTIFICATION: {
7045 			struct iwm_notif_statistics *stats;
7046 			SYNC_RESP_STRUCT(stats, pkt);
7047 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7048 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
7049 			break;
7050 		}
7051 
7052 		case IWM_NVM_ACCESS_CMD:
7053 		case IWM_MCC_UPDATE_CMD:
7054 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7055 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7056 				    sizeof(sc->sc_cmd_resp),
7057 				    BUS_DMASYNC_POSTREAD);
7058 				memcpy(sc->sc_cmd_resp,
7059 				    pkt, sizeof(sc->sc_cmd_resp));
7060 			}
7061 			break;
7062 
7063 		case IWM_MCC_CHUB_UPDATE_CMD: {
7064 			struct iwm_mcc_chub_notif *notif;
7065 			SYNC_RESP_STRUCT(notif, pkt);
7066 
7067 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7068 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7069 			sc->sc_fw_mcc[2] = '\0';
7070 			break;
7071 		}
7072 
7073 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
7074 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7075 		    IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7076 			struct iwm_dts_measurement_notif_v1 *notif1;
7077 			struct iwm_dts_measurement_notif_v2 *notif2;
7078 
7079 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7080 				SYNC_RESP_STRUCT(notif1, pkt);
7081 				DPRINTF(("%s: DTS temp=%d \n",
7082 				    DEVNAME(sc), notif1->temp));
7083 				break;
7084 			}
7085 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7086 				SYNC_RESP_STRUCT(notif2, pkt);
7087 				DPRINTF(("%s: DTS temp=%d \n",
7088 				    DEVNAME(sc), notif2->temp));
7089 				break;
7090 			}
7091 			break;
7092 		}
7093 
7094 		case IWM_PHY_CONFIGURATION_CMD:
7095 		case IWM_TX_ANT_CONFIGURATION_CMD:
7096 		case IWM_ADD_STA:
7097 		case IWM_MAC_CONTEXT_CMD:
7098 		case IWM_REPLY_SF_CFG_CMD:
7099 		case IWM_POWER_TABLE_CMD:
7100 		case IWM_PHY_CONTEXT_CMD:
7101 		case IWM_BINDING_CONTEXT_CMD:
7102 		case IWM_TIME_EVENT_CMD:
7103 		case IWM_SCAN_REQUEST_CMD:
7104 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7105 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7106 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7107 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7108 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
7109 		case IWM_REPLY_BEACON_FILTERING_CMD:
7110 		case IWM_MAC_PM_POWER_TABLE:
7111 		case IWM_TIME_QUOTA_CMD:
7112 		case IWM_REMOVE_STA:
7113 		case IWM_TXPATH_FLUSH:
7114 		case IWM_LQ_CMD:
7115 		case IWM_BT_CONFIG:
7116 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7117 			SYNC_RESP_STRUCT(cresp, pkt);
7118 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7119 				memcpy(sc->sc_cmd_resp,
7120 				    pkt, sizeof(*pkt) + sizeof(*cresp));
7121 			}
7122 			break;
7123 
7124 		/* ignore */
7125 		case IWM_PHY_DB_CMD:
7126 			break;
7127 
7128 		case IWM_INIT_COMPLETE_NOTIF:
7129 			sc->sc_init_complete = 1;
7130 			wakeup(&sc->sc_init_complete);
7131 			break;
7132 
7133 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7134 			struct iwm_periodic_scan_complete *notif;
7135 			SYNC_RESP_STRUCT(notif, pkt);
7136 			break;
7137 		}
7138 
7139 		case IWM_SCAN_ITERATION_COMPLETE: {
7140 			struct iwm_lmac_scan_complete_notif *notif;
7141 			SYNC_RESP_STRUCT(notif, pkt);
7142 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7143 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7144 				iwm_endscan(sc);
7145 			}
7146 			break;
7147 		}
7148 
7149 		case IWM_SCAN_COMPLETE_UMAC: {
7150 			struct iwm_umac_scan_complete *notif;
7151 			SYNC_RESP_STRUCT(notif, pkt);
7152 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7153 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7154 				iwm_endscan(sc);
7155 			}
7156 			break;
7157 		}
7158 
7159 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7160 			struct iwm_umac_scan_iter_complete_notif *notif;
7161 			SYNC_RESP_STRUCT(notif, pkt);
7162 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7163 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7164 				iwm_endscan(sc);
7165 			}
7166 			break;
7167 		}
7168 
7169 		case IWM_REPLY_ERROR: {
7170 			struct iwm_error_resp *resp;
7171 			SYNC_RESP_STRUCT(resp, pkt);
7172 			aprint_error_dev(sc->sc_dev,
7173 			    "firmware error 0x%x, cmd 0x%x\n",
7174 			    le32toh(resp->error_type), resp->cmd_id);
7175 			break;
7176 		}
7177 
7178 		case IWM_TIME_EVENT_NOTIFICATION: {
7179 			struct iwm_time_event_notif *notif;
7180 			SYNC_RESP_STRUCT(notif, pkt);
7181 			break;
7182 		}
7183 
7184 		case IWM_MCAST_FILTER_CMD:
7185 			break;
7186 
7187 		case IWM_SCD_QUEUE_CFG: {
7188 			struct iwm_scd_txq_cfg_rsp *rsp;
7189 			SYNC_RESP_STRUCT(rsp, pkt);
7190 			break;
7191 		}
7192 
7193 		default:
7194 			aprint_error_dev(sc->sc_dev,
7195 			    "unhandled firmware response 0x%x 0x%x/0x%x "
7196 			    "rx ring %d[%d]\n",
7197 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7198 			break;
7199 		}
7200 
7201 		/*
7202 		 * uCode sets bit 0x80 when it originates the notification,
7203 		 * i.e. when the notification is not a direct response to a
7204 		 * command sent by the driver.
7205 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7206 		 * received frame to the driver.
7207 		 */
7208 		if (!(orig_qid & (1 << 7))) {
7209 			iwm_cmd_done(sc, qid, idx);
7210 		}
7211 
7212 		ADVANCE_RXQ(sc);
7213 	}
7214 
7215 	/*
7216 	 * Seems like the hardware gets upset unless we align the write by 8??
7217 	 */
7218 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7219 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7220 }
7221 
7222 static int
7223 iwm_intr(void *arg)
7224 {
7225 	struct iwm_softc *sc = arg;
7226 
7227 	/* Disable interrupts */
7228 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7229 
7230 	softint_schedule(sc->sc_soft_ih);
7231 	return 1;
7232 }
7233 
7234 static void
7235 iwm_softintr(void *arg)
7236 {
7237 	struct iwm_softc *sc = arg;
7238 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7239 	uint32_t r1, r2;
7240 	int isperiodic = 0, s;
7241 
7242 	if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7243 		uint32_t *ict = sc->ict_dma.vaddr;
7244 		int tmp;
7245 
7246 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7247 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7248 		tmp = htole32(ict[sc->ict_cur]);
7249 		if (tmp == 0)
7250 			goto out_ena;	/* Interrupt not for us. */
7251 
7252 		/*
7253 		 * ok, there was something.  keep plowing until we have all.
7254 		 */
7255 		r1 = r2 = 0;
7256 		while (tmp) {
7257 			r1 |= tmp;
7258 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
7259 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7260 			tmp = htole32(ict[sc->ict_cur]);
7261 		}
7262 
7263 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7264 		    0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7265 
7266 		/* this is where the fun begins.  don't ask */
7267 		if (r1 == 0xffffffff)
7268 			r1 = 0;
7269 
7270 		/* i am not expected to understand this */
7271 		if (r1 & 0xc0000)
7272 			r1 |= 0x8000;
7273 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7274 	} else {
7275 		r1 = IWM_READ(sc, IWM_CSR_INT);
7276 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7277 			return;	/* Hardware gone! */
7278 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7279 	}
7280 	if (r1 == 0 && r2 == 0) {
7281 		goto out_ena;	/* Interrupt not for us. */
7282 	}
7283 
7284 	/* Acknowledge interrupts. */
7285 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7286 	if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7287 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7288 
7289 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7290 #ifdef IWM_DEBUG
7291 		int i;
7292 
7293 		iwm_nic_error(sc);
7294 
7295 		/* Dump driver status (TX and RX rings) while we're here. */
7296 		DPRINTF(("driver status:\n"));
7297 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7298 			struct iwm_tx_ring *ring = &sc->txq[i];
7299 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7300 			    "queued=%-3d\n",
7301 			    i, ring->qid, ring->cur, ring->queued));
7302 		}
7303 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7304 		DPRINTF(("  802.11 state %s\n",
7305 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7306 #endif
7307 
7308 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7309  fatal:
7310 		s = splnet();
7311 		ifp->if_flags &= ~IFF_UP;
7312 		iwm_stop(ifp, 1);
7313 		splx(s);
7314 		/* Don't restore interrupt mask */
7315 		return;
7316 
7317 	}
7318 
7319 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7320 		aprint_error_dev(sc->sc_dev,
7321 		    "hardware error, stopping device\n");
7322 		goto fatal;
7323 	}
7324 
7325 	/* firmware chunk loaded */
7326 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7327 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7328 		sc->sc_fw_chunk_done = 1;
7329 		wakeup(&sc->sc_fw);
7330 	}
7331 
7332 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7333 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7334 			goto fatal;
7335 	}
7336 
7337 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7338 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7339 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7340 			IWM_WRITE_1(sc,
7341 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7342 		isperiodic = 1;
7343 	}
7344 
7345 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7346 	    isperiodic) {
7347 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7348 
7349 		iwm_notif_intr(sc);
7350 
7351 		/* enable periodic interrupt, see above */
7352 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7353 		    !isperiodic)
7354 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7355 			    IWM_CSR_INT_PERIODIC_ENA);
7356 	}
7357 
7358 out_ena:
7359 	iwm_restore_interrupts(sc);
7360 }
7361 
7362 /*
7363  * Autoconf glue-sniffing
7364  */
7365 
7366 static const pci_product_id_t iwm_devices[] = {
7367 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7368 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7369 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7370 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7371 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7372 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7373 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7374 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7375 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7376 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7377 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7378 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7379 };
7380 
7381 static int
7382 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7383 {
7384 	struct pci_attach_args *pa = aux;
7385 
7386 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7387 		return 0;
7388 
7389 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7390 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7391 			return 1;
7392 
7393 	return 0;
7394 }
7395 
7396 static int
7397 iwm_preinit(struct iwm_softc *sc)
7398 {
7399 	struct ieee80211com *ic = &sc->sc_ic;
7400 	int err;
7401 
7402 	if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7403 		return 0;
7404 
7405 	err = iwm_start_hw(sc);
7406 	if (err) {
7407 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7408 		return err;
7409 	}
7410 
7411 	err = iwm_run_init_mvm_ucode(sc, 1);
7412 	iwm_stop_device(sc);
7413 	if (err)
7414 		return err;
7415 
7416 	sc->sc_flags |= IWM_FLAG_ATTACHED;
7417 
7418 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7419 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7420 	    ether_sprintf(sc->sc_nvm.hw_addr));
7421 
7422 #ifndef IEEE80211_NO_HT
7423 	if (sc->sc_nvm.sku_cap_11n_enable)
7424 		iwm_setup_ht_rates(sc);
7425 #endif
7426 
7427 	/* not all hardware can do 5GHz band */
7428 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7429 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7430 
7431 	ieee80211_ifattach(ic);
7432 
7433 	ic->ic_node_alloc = iwm_node_alloc;
7434 
7435 	/* Override 802.11 state transition machine. */
7436 	sc->sc_newstate = ic->ic_newstate;
7437 	ic->ic_newstate = iwm_newstate;
7438 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7439 	ieee80211_announce(ic);
7440 
7441 	iwm_radiotap_attach(sc);
7442 
7443 	return 0;
7444 }
7445 
7446 static void
7447 iwm_attach_hook(device_t dev)
7448 {
7449 	struct iwm_softc *sc = device_private(dev);
7450 
7451 	iwm_preinit(sc);
7452 }
7453 
7454 static void
7455 iwm_attach(device_t parent, device_t self, void *aux)
7456 {
7457 	struct iwm_softc *sc = device_private(self);
7458 	struct pci_attach_args *pa = aux;
7459 	struct ieee80211com *ic = &sc->sc_ic;
7460 	struct ifnet *ifp = &sc->sc_ec.ec_if;
7461 	pcireg_t reg, memtype;
7462 	char intrbuf[PCI_INTRSTR_LEN];
7463 	const char *intrstr;
7464 	int err;
7465 	int txq_i;
7466 	const struct sysctlnode *node;
7467 
7468 	sc->sc_dev = self;
7469 	sc->sc_pct = pa->pa_pc;
7470 	sc->sc_pcitag = pa->pa_tag;
7471 	sc->sc_dmat = pa->pa_dmat;
7472 	sc->sc_pciid = pa->pa_id;
7473 
7474 	pci_aprint_devinfo(pa, NULL);
7475 
7476 	if (workqueue_create(&sc->sc_nswq, "iwmns",
7477 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7478 		panic("%s: could not create workqueue: newstate",
7479 		    device_xname(self));
7480 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7481 	if (sc->sc_soft_ih == NULL)
7482 		panic("%s: could not establish softint", device_xname(self));
7483 
7484 	/*
7485 	 * Get the offset of the PCI Express Capability Structure in PCI
7486 	 * Configuration Space.
7487 	 */
7488 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7489 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7490 	if (err == 0) {
7491 		aprint_error_dev(self,
7492 		    "PCIe capability structure not found!\n");
7493 		return;
7494 	}
7495 
7496 	/* Clear device-specific "PCI retry timeout" register (41h). */
7497 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7498 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7499 
7500 	/* Enable bus-mastering */
7501 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7502 	reg |= PCI_COMMAND_MASTER_ENABLE;
7503 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7504 
7505 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7506 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7507 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7508 	if (err) {
7509 		aprint_error_dev(self, "can't map mem space\n");
7510 		return;
7511 	}
7512 
7513 	/* Install interrupt handler. */
7514 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7515 	if (err) {
7516 		aprint_error_dev(self, "can't allocate interrupt\n");
7517 		return;
7518 	}
7519 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7520 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7521 		CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7522 	else
7523 		SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7524 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7525 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7526 	    sizeof(intrbuf));
7527 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7528 	    IPL_NET, iwm_intr, sc, device_xname(self));
7529 	if (sc->sc_ih == NULL) {
7530 		aprint_error_dev(self, "can't establish interrupt");
7531 		if (intrstr != NULL)
7532 			aprint_error(" at %s", intrstr);
7533 		aprint_error("\n");
7534 		return;
7535 	}
7536 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7537 
7538 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7539 
7540 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7541 	switch (PCI_PRODUCT(sc->sc_pciid)) {
7542 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7543 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7544 		sc->sc_fwname = "iwlwifi-3160-16.ucode";
7545 		sc->host_interrupt_operation_mode = 1;
7546 		sc->apmg_wake_up_wa = 1;
7547 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7548 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7549 		break;
7550 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7551 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7552 		sc->sc_fwname = "iwlwifi-7265D-17.ucode";
7553 		sc->host_interrupt_operation_mode = 0;
7554 		sc->apmg_wake_up_wa = 1;
7555 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7556 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7557 		break;
7558 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7559 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7560 		sc->sc_fwname = "iwlwifi-7260-16.ucode";
7561 		sc->host_interrupt_operation_mode = 1;
7562 		sc->apmg_wake_up_wa = 1;
7563 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7564 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7565 		break;
7566 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7567 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7568 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7569 		    IWM_CSR_HW_REV_TYPE_7265D ?
7570 		    "iwlwifi-7265D-17.ucode": "iwlwifi-7265-16.ucode";
7571 		sc->host_interrupt_operation_mode = 0;
7572 		sc->apmg_wake_up_wa = 1;
7573 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7574 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7575 		break;
7576 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7577 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7578 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7579 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7580 		sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7581 		sc->host_interrupt_operation_mode = 0;
7582 		sc->apmg_wake_up_wa = 0;
7583 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7584 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7585 		break;
7586 	default:
7587 		aprint_error_dev(self, "unknown product %#x",
7588 		    PCI_PRODUCT(sc->sc_pciid));
7589 		return;
7590 	}
7591 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7592 
7593 	/*
7594 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7595 	 * changed, and now the revision step also includes bit 0-1 (no more
7596 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7597 	 * in the old format.
7598 	 */
7599 
7600 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7601 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7602 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7603 
7604 	if (iwm_prepare_card_hw(sc) != 0) {
7605 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7606 		return;
7607 	}
7608 
7609 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7610 		uint32_t hw_step;
7611 
7612 		/*
7613 		 * In order to recognize C step the driver should read the
7614 		 * chip version id located at the AUX bus MISC address.
7615 		 */
7616 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7617 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7618 		DELAY(2);
7619 
7620 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7621 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7622 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7623 				   25000);
7624 		if (!err) {
7625 			aprint_error_dev(sc->sc_dev,
7626 			    "failed to wake up the nic\n");
7627 			return;
7628 		}
7629 
7630 		if (iwm_nic_lock(sc)) {
7631 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7632 			hw_step |= IWM_ENABLE_WFPM;
7633 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7634 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7635 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7636 			if (hw_step == 0x3)
7637 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7638 				    (IWM_SILICON_C_STEP << 2);
7639 			iwm_nic_unlock(sc);
7640 		} else {
7641 			aprint_error_dev(sc->sc_dev,
7642 			    "failed to lock the nic\n");
7643 			return;
7644 		}
7645 	}
7646 
7647 	/*
7648 	 * Allocate DMA memory for firmware transfers.
7649 	 * Must be aligned on a 16-byte boundary.
7650 	 */
7651 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7652 	    16);
7653 	if (err) {
7654 		aprint_error_dev(sc->sc_dev,
7655 		    "could not allocate memory for firmware\n");
7656 		return;
7657 	}
7658 
7659 	/* Allocate "Keep Warm" page, used internally by the card. */
7660 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7661 	if (err) {
7662 		aprint_error_dev(sc->sc_dev,
7663 		    "could not allocate keep warm page\n");
7664 		goto fail1;
7665 	}
7666 
7667 	/* Allocate interrupt cause table (ICT).*/
7668 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7669 	    1 << IWM_ICT_PADDR_SHIFT);
7670 	if (err) {
7671 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7672 		goto fail2;
7673 	}
7674 
7675 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7676 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7677 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7678 	if (err) {
7679 		aprint_error_dev(sc->sc_dev,
7680 		    "could not allocate TX scheduler rings\n");
7681 		goto fail3;
7682 	}
7683 
7684 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7685 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7686 		if (err) {
7687 			aprint_error_dev(sc->sc_dev,
7688 			    "could not allocate TX ring %d\n", txq_i);
7689 			goto fail4;
7690 		}
7691 	}
7692 
7693 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
7694 	if (err) {
7695 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7696 		goto fail4;
7697 	}
7698 
7699 	/* Clear pending interrupts. */
7700 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7701 
7702 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7703 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7704 	    SYSCTL_DESCR("iwm per-controller controls"),
7705 	    NULL, 0, NULL, 0,
7706 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7707 	    CTL_EOL)) != 0) {
7708 		aprint_normal_dev(sc->sc_dev,
7709 		    "couldn't create iwm per-controller sysctl node\n");
7710 	}
7711 	if (err == 0) {
7712 		int iwm_nodenum = node->sysctl_num;
7713 
7714 		/* Reload firmware sysctl node */
7715 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7716 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7717 		    SYSCTL_DESCR("Reload firmware"),
7718 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7719 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7720 		    CTL_EOL)) != 0) {
7721 			aprint_normal_dev(sc->sc_dev,
7722 			    "couldn't create load_fw sysctl node\n");
7723 		}
7724 	}
7725 
7726 	/*
7727 	 * Attach interface
7728 	 */
7729 	ic->ic_ifp = ifp;
7730 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
7731 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
7732 	ic->ic_state = IEEE80211_S_INIT;
7733 
7734 	/* Set device capabilities. */
7735 	ic->ic_caps =
7736 	    IEEE80211_C_WEP |		/* WEP */
7737 	    IEEE80211_C_WPA |		/* 802.11i */
7738 #ifdef notyet
7739 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
7740 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
7741 #endif
7742 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
7743 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
7744 
7745 #ifndef IEEE80211_NO_HT
7746 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7747 	ic->ic_htxcaps = 0;
7748 	ic->ic_txbfcaps = 0;
7749 	ic->ic_aselcaps = 0;
7750 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7751 #endif
7752 
7753 	/* all hardware can do 2.4GHz band */
7754 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7755 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7756 
7757 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7758 		sc->sc_phyctxt[i].id = i;
7759 	}
7760 
7761 	sc->sc_amrr.amrr_min_success_threshold =  1;
7762 	sc->sc_amrr.amrr_max_success_threshold = 15;
7763 
7764 	/* IBSS channel undefined for now. */
7765 	ic->ic_ibss_chan = &ic->ic_channels[1];
7766 
7767 #if 0
7768 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7769 #endif
7770 
7771 	ifp->if_softc = sc;
7772 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7773 	ifp->if_init = iwm_init;
7774 	ifp->if_stop = iwm_stop;
7775 	ifp->if_ioctl = iwm_ioctl;
7776 	ifp->if_start = iwm_start;
7777 	ifp->if_watchdog = iwm_watchdog;
7778 	IFQ_SET_READY(&ifp->if_snd);
7779 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7780 
7781 	if_initialize(ifp);
7782 #if 0
7783 	ieee80211_ifattach(ic);
7784 #else
7785 	ether_ifattach(ifp, ic->ic_myaddr);	/* XXX */
7786 #endif
7787 	/* Use common softint-based if_input */
7788 	ifp->if_percpuq = if_percpuq_create(ifp);
7789 	if_register(ifp);
7790 
7791 	callout_init(&sc->sc_calib_to, 0);
7792 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7793 	callout_init(&sc->sc_led_blink_to, 0);
7794 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7795 #ifndef IEEE80211_NO_HT
7796 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7797 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7798 		panic("%s: could not create workqueue: setrates",
7799 		    device_xname(self));
7800 	if (workqueue_create(&sc->sc_bawq, "iwmba",
7801 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7802 		panic("%s: could not create workqueue: blockack",
7803 		    device_xname(self));
7804 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7805 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7806 		panic("%s: could not create workqueue: htprot",
7807 		    device_xname(self));
7808 #endif
7809 
7810 	if (pmf_device_register(self, NULL, NULL))
7811 		pmf_class_network_register(self, ifp);
7812 	else
7813 		aprint_error_dev(self, "couldn't establish power handler\n");
7814 
7815 	/*
7816 	 * We can't do normal attach before the file system is mounted
7817 	 * because we cannot read the MAC address without loading the
7818 	 * firmware from disk.  So we postpone until mountroot is done.
7819 	 * Notably, this will require a full driver unload/load cycle
7820 	 * (or reboot) in case the firmware is not present when the
7821 	 * hook runs.
7822 	 */
7823 	config_mountroot(self, iwm_attach_hook);
7824 
7825 	return;
7826 
7827 fail4:	while (--txq_i >= 0)
7828 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7829 	iwm_free_rx_ring(sc, &sc->rxq);
7830 	iwm_dma_contig_free(&sc->sched_dma);
7831 fail3:	if (sc->ict_dma.vaddr != NULL)
7832 		iwm_dma_contig_free(&sc->ict_dma);
7833 fail2:	iwm_dma_contig_free(&sc->kw_dma);
7834 fail1:	iwm_dma_contig_free(&sc->fw_dma);
7835 }
7836 
7837 void
7838 iwm_radiotap_attach(struct iwm_softc *sc)
7839 {
7840 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7841 
7842 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7843 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7844 	    &sc->sc_drvbpf);
7845 
7846 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7847 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7848 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7849 
7850 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
7851 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7852 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7853 }
7854 
7855 #if 0
7856 static void
7857 iwm_init_task(void *arg)
7858 {
7859 	struct iwm_softc *sc = arg;
7860 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7861 	int s;
7862 
7863 	rw_enter_write(&sc->ioctl_rwl);
7864 	s = splnet();
7865 
7866 	iwm_stop(ifp, 0);
7867 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7868 		iwm_init(ifp);
7869 
7870 	splx(s);
7871 	rw_exit(&sc->ioctl_rwl);
7872 }
7873 
7874 static void
7875 iwm_wakeup(struct iwm_softc *sc)
7876 {
7877 	pcireg_t reg;
7878 
7879 	/* Clear device-specific "PCI retry timeout" register (41h). */
7880 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7881 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7882 
7883 	iwm_init_task(sc);
7884 }
7885 
7886 static int
7887 iwm_activate(device_t self, enum devact act)
7888 {
7889 	struct iwm_softc *sc = device_private(self);
7890 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7891 
7892 	switch (act) {
7893 	case DVACT_DEACTIVATE:
7894 		if (ifp->if_flags & IFF_RUNNING)
7895 			iwm_stop(ifp, 0);
7896 		return 0;
7897 	default:
7898 		return EOPNOTSUPP;
7899 	}
7900 }
7901 #endif
7902 
7903 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7904 	NULL, NULL);
7905 
7906 static int
7907 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7908 {
7909 	struct sysctlnode node;
7910 	struct iwm_softc *sc;
7911 	int err, t;
7912 
7913 	node = *rnode;
7914 	sc = node.sysctl_data;
7915 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7916 	node.sysctl_data = &t;
7917 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
7918 	if (err || newp == NULL)
7919 		return err;
7920 
7921 	if (t == 0)
7922 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7923 	return 0;
7924 }
7925 
7926 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7927 {
7928 	const struct sysctlnode *rnode;
7929 #ifdef IWM_DEBUG
7930 	const struct sysctlnode *cnode;
7931 #endif /* IWM_DEBUG */
7932 	int rc;
7933 
7934 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7935 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7936 	    SYSCTL_DESCR("iwm global controls"),
7937 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7938 		goto err;
7939 
7940 	iwm_sysctl_root_num = rnode->sysctl_num;
7941 
7942 #ifdef IWM_DEBUG
7943 	/* control debugging printfs */
7944 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7945 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7946 	    "debug", SYSCTL_DESCR("Enable debugging output"),
7947 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7948 		goto err;
7949 #endif /* IWM_DEBUG */
7950 
7951 	return;
7952 
7953  err:
7954 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7955 }
7956