xref: /netbsd-src/sys/dev/pci/if_iwm.c (revision 87d689fb734c654d2486f87f7be32f1b53ecdbec)
1 /*	$NetBSD: if_iwm.c,v 1.77 2018/01/10 18:39:50 mlelstv Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
3 #define IEEE80211_NO_HT
4 /*
5  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
6  *   Author: Stefan Sperling <stsp@openbsd.org>
7  * Copyright (c) 2014 Fixup Software Ltd.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016        Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <linuxwifi@intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62  * Copyright(c) 2016        Intel Deutschland GmbH
63  * All rights reserved.
64  *
65  * Redistribution and use in source and binary forms, with or without
66  * modification, are permitted provided that the following conditions
67  * are met:
68  *
69  *  * Redistributions of source code must retain the above copyright
70  *    notice, this list of conditions and the following disclaimer.
71  *  * Redistributions in binary form must reproduce the above copyright
72  *    notice, this list of conditions and the following disclaimer in
73  *    the documentation and/or other materials provided with the
74  *    distribution.
75  *  * Neither the name Intel Corporation nor the names of its
76  *    contributors may be used to endorse or promote products derived
77  *    from this software without specific prior written permission.
78  *
79  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90  */
91 
92 /*-
93  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
94  *
95  * Permission to use, copy, modify, and distribute this software for any
96  * purpose with or without fee is hereby granted, provided that the above
97  * copyright notice and this permission notice appear in all copies.
98  *
99  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106  */
107 
108 #include <sys/cdefs.h>
109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.77 2018/01/10 18:39:50 mlelstv Exp $");
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/kmem.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122 
123 #include <sys/cpu.h>
124 #include <sys/bus.h>
125 #include <sys/workqueue.h>
126 #include <machine/endian.h>
127 #include <sys/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 #include <dev/firmload.h>
133 
134 #include <net/bpf.h>
135 #include <net/if.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_ether.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/ip.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_radiotap.h>
146 
147 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
148 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
149 
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 0;
157 #else
158 #define DPRINTF(x)	do { ; } while (0)
159 #define DPRINTFN(n, x)	do { ; } while (0)
160 #endif
161 
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164 
165 static const uint8_t iwm_nvm_channels[] = {
166 	/* 2.4 GHz */
167 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 	/* 5 GHz */
169 	36, 40, 44, 48, 52, 56, 60, 64,
170 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 	149, 153, 157, 161, 165
172 };
173 
174 static const uint8_t iwm_nvm_channels_8000[] = {
175 	/* 2.4 GHz */
176 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 	/* 5 GHz */
178 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 	149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182 
183 #define IWM_NUM_2GHZ_CHANNELS	14
184 
185 static const struct iwm_rate {
186 	uint8_t rate;
187 	uint8_t plcp;
188 	uint8_t ht_plcp;
189 } iwm_rates[] = {
190 		/* Legacy */		/* HT */
191 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
196 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
198 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
199 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
200 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
201 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
202 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
203 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
204 };
205 #define IWM_RIDX_CCK	0
206 #define IWM_RIDX_OFDM	4
207 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
208 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210 
211 #ifndef IEEE80211_NO_HT
212 /* Convert an MCS index into an iwm_rates[] index. */
213 static const int iwm_mcs2ridx[] = {
214 	IWM_RATE_MCS_0_INDEX,
215 	IWM_RATE_MCS_1_INDEX,
216 	IWM_RATE_MCS_2_INDEX,
217 	IWM_RATE_MCS_3_INDEX,
218 	IWM_RATE_MCS_4_INDEX,
219 	IWM_RATE_MCS_5_INDEX,
220 	IWM_RATE_MCS_6_INDEX,
221 	IWM_RATE_MCS_7_INDEX,
222 };
223 #endif
224 
225 struct iwm_nvm_section {
226 	uint16_t length;
227 	uint8_t *data;
228 };
229 
230 struct iwm_newstate_state {
231 	struct work ns_wk;
232 	enum ieee80211_state ns_nstate;
233 	int ns_arg;
234 	int ns_generation;
235 };
236 
237 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 static int	iwm_firmware_store_section(struct iwm_softc *,
239 		    enum iwm_ucode_type, uint8_t *, size_t);
240 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 #ifdef IWM_DEBUG
245 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 #endif
247 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 static int	iwm_nic_lock(struct iwm_softc *);
251 static void	iwm_nic_unlock(struct iwm_softc *);
252 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 		    uint32_t);
254 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 		    bus_size_t, bus_size_t);
258 static void	iwm_dma_contig_free(struct iwm_dma_info *);
259 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 static void	iwm_disable_rx_dma(struct iwm_softc *);
261 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 		    int);
265 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void	iwm_enable_rfkill_int(struct iwm_softc *);
268 static int	iwm_check_rfkill(struct iwm_softc *);
269 static void	iwm_enable_interrupts(struct iwm_softc *);
270 static void	iwm_restore_interrupts(struct iwm_softc *);
271 static void	iwm_disable_interrupts(struct iwm_softc *);
272 static void	iwm_ict_reset(struct iwm_softc *);
273 static int	iwm_set_hw_ready(struct iwm_softc *);
274 static int	iwm_prepare_card_hw(struct iwm_softc *);
275 static void	iwm_apm_config(struct iwm_softc *);
276 static int	iwm_apm_init(struct iwm_softc *);
277 static void	iwm_apm_stop(struct iwm_softc *);
278 static int	iwm_allow_mcast(struct iwm_softc *);
279 static int	iwm_start_hw(struct iwm_softc *);
280 static void	iwm_stop_device(struct iwm_softc *);
281 static void	iwm_nic_config(struct iwm_softc *);
282 static int	iwm_nic_rx_init(struct iwm_softc *);
283 static int	iwm_nic_tx_init(struct iwm_softc *);
284 static int	iwm_nic_init(struct iwm_softc *);
285 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int	iwm_post_alive(struct iwm_softc *);
287 static struct iwm_phy_db_entry *
288 		iwm_phy_db_get_section(struct iwm_softc *,
289 		    enum iwm_phy_db_section_type, uint16_t);
290 static int	iwm_phy_db_set_section(struct iwm_softc *,
291 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
292 static int	iwm_is_valid_channel(uint16_t);
293 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
294 static uint16_t iwm_channel_id_to_papd(uint16_t);
295 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 		    uint8_t **, uint16_t *, uint16_t);
298 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 		    void *);
300 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 		    enum iwm_phy_db_section_type, uint8_t);
302 static int	iwm_send_phy_db_data(struct iwm_softc *);
303 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 		    struct iwm_time_event_cmd_v1 *);
305 static int	iwm_send_time_event_cmd(struct iwm_softc *,
306 		    const struct iwm_time_event_cmd_v2 *);
307 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 		    uint32_t, uint32_t);
309 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 		    uint16_t, uint8_t *, uint16_t *);
311 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 		    uint16_t *, size_t);
313 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 		    const uint8_t *, size_t);
315 #ifndef IEEE80211_NO_HT
316 static void	iwm_setup_ht_rates(struct iwm_softc *);
317 static void	iwm_htprot_task(void *);
318 static void	iwm_update_htprot(struct ieee80211com *,
319 		    struct ieee80211_node *);
320 static int	iwm_ampdu_rx_start(struct ieee80211com *,
321 		    struct ieee80211_node *, uint8_t);
322 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
323 		    struct ieee80211_node *, uint8_t);
324 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 		    uint8_t, uint16_t, int);
326 #ifdef notyet
327 static int	iwm_ampdu_tx_start(struct ieee80211com *,
328 		    struct ieee80211_node *, uint8_t);
329 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
330 		    struct ieee80211_node *, uint8_t);
331 #endif
332 static void	iwm_ba_task(void *);
333 #endif
334 
335 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 		    const uint16_t *, const uint16_t *, const uint16_t *,
337 		    const uint16_t *, const uint16_t *);
338 static void	iwm_set_hw_address_8000(struct iwm_softc *,
339 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 static int	iwm_parse_nvm_sections(struct iwm_softc *,
341 		    struct iwm_nvm_section *);
342 static int	iwm_nvm_init(struct iwm_softc *);
343 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 		    const uint8_t *, uint32_t);
345 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 		    const uint8_t *, uint32_t);
347 static int	iwm_load_cpu_sections_7000(struct iwm_softc *,
348 		    struct iwm_fw_sects *, int , int *);
349 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
351 		    struct iwm_fw_sects *, int , int *);
352 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
358 		    enum iwm_ucode_type);
359 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
361 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 static int	iwm_get_signal_strength(struct iwm_softc *,
363 		    struct iwm_rx_phy_info *);
364 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 		    struct iwm_rx_packet *, struct iwm_rx_data *);
366 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 		    struct iwm_rx_data *);
369 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
370 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 		    struct iwm_rx_data *);
372 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 		    uint32_t);
374 #if 0
375 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 #endif
378 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 		    uint8_t, uint8_t);
383 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 		    uint8_t, uint8_t, uint32_t, uint32_t);
385 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 		    uint16_t, const void *);
388 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 		    uint32_t *);
390 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 		    const void *, uint32_t *);
392 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 #if 0
395 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 		    uint16_t);
397 #endif
398 static const struct iwm_rate *
399 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
401 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
402 		    struct ieee80211_node *, int);
403 static void	iwm_led_enable(struct iwm_softc *);
404 static void	iwm_led_disable(struct iwm_softc *);
405 static int	iwm_led_is_enabled(struct iwm_softc *);
406 static void	iwm_led_blink_timeout(void *);
407 static void	iwm_led_blink_start(struct iwm_softc *);
408 static void	iwm_led_blink_stop(struct iwm_softc *);
409 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 		    struct iwm_beacon_filter_cmd *);
411 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 		    int);
415 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 		    struct iwm_mac_power_cmd *);
417 static int	iwm_power_mac_update_mode(struct iwm_softc *,
418 		    struct iwm_node *);
419 static int	iwm_power_update_device(struct iwm_softc *);
420 #ifdef notyet
421 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 #endif
423 static int	iwm_disable_beacon_filter(struct iwm_softc *);
424 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 static int	iwm_add_aux_sta(struct iwm_softc *);
426 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 #ifdef notyet
429 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 #endif
432 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 		    struct iwm_scan_channel_cfg_lmac *, int);
434 static int	iwm_fill_probe_req(struct iwm_softc *,
435 		    struct iwm_scan_probe_req *);
436 static int	iwm_lmac_scan(struct iwm_softc *);
437 static int	iwm_config_umac_scan(struct iwm_softc *);
438 static int	iwm_umac_scan(struct iwm_softc *);
439 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
440 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 		    int *);
442 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
444 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 		    struct iwm_mac_data_sta *, int);
446 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 		    uint32_t, int);
448 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 static int	iwm_auth(struct iwm_softc *);
450 static int	iwm_assoc(struct iwm_softc *);
451 static void	iwm_calib_timeout(void *);
452 #ifndef IEEE80211_NO_HT
453 static void	iwm_setrates_task(void *);
454 static int	iwm_setrates(struct iwm_node *);
455 #endif
456 static int	iwm_media_change(struct ifnet *);
457 static int	iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 		    int);
459 static void	iwm_newstate_cb(struct work *, void *);
460 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 static void	iwm_endscan(struct iwm_softc *);
462 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 		    struct ieee80211_node *);
464 static int	iwm_sf_config(struct iwm_softc *, int);
465 static int	iwm_send_bt_init_conf(struct iwm_softc *);
466 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 static int	iwm_init_hw(struct iwm_softc *);
469 static int	iwm_init(struct ifnet *);
470 static void	iwm_start(struct ifnet *);
471 static void	iwm_stop(struct ifnet *, int);
472 static void	iwm_watchdog(struct ifnet *);
473 static int	iwm_ioctl(struct ifnet *, u_long, void *);
474 #ifdef IWM_DEBUG
475 static const char *iwm_desc_lookup(uint32_t);
476 static void	iwm_nic_error(struct iwm_softc *);
477 static void	iwm_nic_umac_error(struct iwm_softc *);
478 #endif
479 static void	iwm_notif_intr(struct iwm_softc *);
480 static int	iwm_intr(void *);
481 static void	iwm_softintr(void *);
482 static int	iwm_preinit(struct iwm_softc *);
483 static void	iwm_attach_hook(device_t);
484 static void	iwm_attach(device_t, device_t, void *);
485 #if 0
486 static void	iwm_init_task(void *);
487 static int	iwm_activate(device_t, enum devact);
488 static void	iwm_wakeup(struct iwm_softc *);
489 #endif
490 static void	iwm_radiotap_attach(struct iwm_softc *);
491 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
492 
493 static int iwm_sysctl_root_num;
494 static int iwm_lar_disable;
495 
496 #ifndef	IWM_DEFAULT_MCC
497 #define	IWM_DEFAULT_MCC	"ZZ"
498 #endif
499 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
500 
501 static int
502 iwm_firmload(struct iwm_softc *sc)
503 {
504 	struct iwm_fw_info *fw = &sc->sc_fw;
505 	firmware_handle_t fwh;
506 	int err;
507 
508 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
509 		return 0;
510 
511 	/* Open firmware image. */
512 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
513 	if (err) {
514 		aprint_error_dev(sc->sc_dev,
515 		    "could not get firmware handle %s\n", sc->sc_fwname);
516 		return err;
517 	}
518 
519 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
520 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
521 		fw->fw_rawdata = NULL;
522 	}
523 
524 	fw->fw_rawsize = firmware_get_size(fwh);
525 	/*
526 	 * Well, this is how the Linux driver checks it ....
527 	 */
528 	if (fw->fw_rawsize < sizeof(uint32_t)) {
529 		aprint_error_dev(sc->sc_dev,
530 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
531 		err = EINVAL;
532 		goto out;
533 	}
534 
535 	/* Read the firmware. */
536 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
537 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
538 	if (err) {
539 		aprint_error_dev(sc->sc_dev,
540 		    "could not read firmware %s\n", sc->sc_fwname);
541 		goto out;
542 	}
543 
544 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
545  out:
546 	/* caller will release memory, if necessary */
547 
548 	firmware_close(fwh);
549 	return err;
550 }
551 
552 /*
553  * just maintaining status quo.
554  */
555 static void
556 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
557 {
558 	struct ieee80211com *ic = &sc->sc_ic;
559 	struct ieee80211_frame *wh;
560 	uint8_t subtype;
561 
562 	wh = mtod(m, struct ieee80211_frame *);
563 
564 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
565 		return;
566 
567 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
568 
569 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
570 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
571 		return;
572 
573 	int chan = le32toh(sc->sc_last_phy_info.channel);
574 	if (chan < __arraycount(ic->ic_channels))
575 		ic->ic_curchan = &ic->ic_channels[chan];
576 }
577 
578 static int
579 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
580 {
581 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
582 
583 	if (dlen < sizeof(*l) ||
584 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
585 		return EINVAL;
586 
587 	/* we don't actually store anything for now, always use s/w crypto */
588 
589 	return 0;
590 }
591 
592 static int
593 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
594     uint8_t *data, size_t dlen)
595 {
596 	struct iwm_fw_sects *fws;
597 	struct iwm_fw_onesect *fwone;
598 
599 	if (type >= IWM_UCODE_TYPE_MAX)
600 		return EINVAL;
601 	if (dlen < sizeof(uint32_t))
602 		return EINVAL;
603 
604 	fws = &sc->sc_fw.fw_sects[type];
605 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
606 		return EINVAL;
607 
608 	fwone = &fws->fw_sect[fws->fw_count];
609 
610 	/* first 32bit are device load offset */
611 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
612 
613 	/* rest is data */
614 	fwone->fws_data = data + sizeof(uint32_t);
615 	fwone->fws_len = dlen - sizeof(uint32_t);
616 
617 	/* for freeing the buffer during driver unload */
618 	fwone->fws_alloc = data;
619 	fwone->fws_allocsize = dlen;
620 
621 	fws->fw_count++;
622 	fws->fw_totlen += fwone->fws_len;
623 
624 	return 0;
625 }
626 
627 struct iwm_tlv_calib_data {
628 	uint32_t ucode_type;
629 	struct iwm_tlv_calib_ctrl calib;
630 } __packed;
631 
632 static int
633 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
634 {
635 	const struct iwm_tlv_calib_data *def_calib = data;
636 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
637 
638 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
639 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
640 		    DEVNAME(sc), ucode_type));
641 		return EINVAL;
642 	}
643 
644 	sc->sc_default_calib[ucode_type].flow_trigger =
645 	    def_calib->calib.flow_trigger;
646 	sc->sc_default_calib[ucode_type].event_trigger =
647 	    def_calib->calib.event_trigger;
648 
649 	return 0;
650 }
651 
652 static int
653 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
654 {
655 	struct iwm_fw_info *fw = &sc->sc_fw;
656 	struct iwm_tlv_ucode_header *uhdr;
657 	struct iwm_ucode_tlv tlv;
658 	enum iwm_ucode_tlv_type tlv_type;
659 	uint8_t *data;
660 	int err, status;
661 	size_t len;
662 
663 	if (ucode_type != IWM_UCODE_TYPE_INIT &&
664 	    fw->fw_status == IWM_FW_STATUS_DONE)
665 		return 0;
666 
667 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
668 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
669 	} else {
670 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
671 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
672 	}
673 	status = fw->fw_status;
674 
675 	if (status == IWM_FW_STATUS_DONE)
676 		return 0;
677 
678 	err = iwm_firmload(sc);
679 	if (err) {
680 		aprint_error_dev(sc->sc_dev,
681 		    "could not read firmware %s (error %d)\n",
682 		    sc->sc_fwname, err);
683 		goto out;
684 	}
685 
686 	sc->sc_capaflags = 0;
687 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
688 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
689 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
690 
691 	uhdr = (void *)fw->fw_rawdata;
692 	if (*(uint32_t *)fw->fw_rawdata != 0
693 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
694 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
695 		    sc->sc_fwname);
696 		err = EINVAL;
697 		goto out;
698 	}
699 
700 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
701 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
702 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
703 	    IWM_UCODE_API(le32toh(uhdr->ver)));
704 	data = uhdr->data;
705 	len = fw->fw_rawsize - sizeof(*uhdr);
706 
707 	while (len >= sizeof(tlv)) {
708 		size_t tlv_len;
709 		void *tlv_data;
710 
711 		memcpy(&tlv, data, sizeof(tlv));
712 		tlv_len = le32toh(tlv.length);
713 		tlv_type = le32toh(tlv.type);
714 
715 		len -= sizeof(tlv);
716 		data += sizeof(tlv);
717 		tlv_data = data;
718 
719 		if (len < tlv_len) {
720 			aprint_error_dev(sc->sc_dev,
721 			    "firmware too short: %zu bytes\n", len);
722 			err = EINVAL;
723 			goto parse_out;
724 		}
725 
726 		switch (tlv_type) {
727 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
728 			if (tlv_len < sizeof(uint32_t)) {
729 				err = EINVAL;
730 				goto parse_out;
731 			}
732 			sc->sc_capa_max_probe_len
733 			    = le32toh(*(uint32_t *)tlv_data);
734 			/* limit it to something sensible */
735 			if (sc->sc_capa_max_probe_len >
736 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
737 				err = EINVAL;
738 				goto parse_out;
739 			}
740 			break;
741 		case IWM_UCODE_TLV_PAN:
742 			if (tlv_len) {
743 				err = EINVAL;
744 				goto parse_out;
745 			}
746 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
747 			break;
748 		case IWM_UCODE_TLV_FLAGS:
749 			if (tlv_len < sizeof(uint32_t)) {
750 				err = EINVAL;
751 				goto parse_out;
752 			}
753 			if (tlv_len % sizeof(uint32_t)) {
754 				err = EINVAL;
755 				goto parse_out;
756 			}
757 			/*
758 			 * Apparently there can be many flags, but Linux driver
759 			 * parses only the first one, and so do we.
760 			 *
761 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
762 			 * Intentional or a bug?  Observations from
763 			 * current firmware file:
764 			 *  1) TLV_PAN is parsed first
765 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
766 			 * ==> this resets TLV_PAN to itself... hnnnk
767 			 */
768 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
769 			break;
770 		case IWM_UCODE_TLV_CSCHEME:
771 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
772 			if (err)
773 				goto parse_out;
774 			break;
775 		case IWM_UCODE_TLV_NUM_OF_CPU: {
776 			uint32_t num_cpu;
777 			if (tlv_len != sizeof(uint32_t)) {
778 				err = EINVAL;
779 				goto parse_out;
780 			}
781 			num_cpu = le32toh(*(uint32_t *)tlv_data);
782 			if (num_cpu == 2) {
783 				fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
784 				    true;
785 				fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
786 				    true;
787 				fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
788 				    true;
789 			} else if (num_cpu < 1 || num_cpu > 2) {
790 				err = EINVAL;
791 				goto parse_out;
792 			}
793 			break;
794 		}
795 		case IWM_UCODE_TLV_SEC_RT:
796 			err = iwm_firmware_store_section(sc,
797 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
798 			if (err)
799 				goto parse_out;
800 			break;
801 		case IWM_UCODE_TLV_SEC_INIT:
802 			err = iwm_firmware_store_section(sc,
803 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
804 			if (err)
805 				goto parse_out;
806 			break;
807 		case IWM_UCODE_TLV_SEC_WOWLAN:
808 			err = iwm_firmware_store_section(sc,
809 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
810 			if (err)
811 				goto parse_out;
812 			break;
813 		case IWM_UCODE_TLV_DEF_CALIB:
814 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
815 				err = EINVAL;
816 				goto parse_out;
817 			}
818 			err = iwm_set_default_calib(sc, tlv_data);
819 			if (err)
820 				goto parse_out;
821 			break;
822 		case IWM_UCODE_TLV_PHY_SKU:
823 			if (tlv_len != sizeof(uint32_t)) {
824 				err = EINVAL;
825 				goto parse_out;
826 			}
827 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
828 			break;
829 
830 		case IWM_UCODE_TLV_API_CHANGES_SET: {
831 			struct iwm_ucode_api *api;
832 			uint32_t idx, bits;
833 			int i;
834 			if (tlv_len != sizeof(*api)) {
835 				err = EINVAL;
836 				goto parse_out;
837 			}
838 			api = (struct iwm_ucode_api *)tlv_data;
839 			idx = le32toh(api->api_index);
840 			bits = le32toh(api->api_flags);
841 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
842 				err = EINVAL;
843 				goto parse_out;
844 			}
845 			for (i = 0; i < 32; i++) {
846 				if (!ISSET(bits, __BIT(i)))
847 					continue;
848 				setbit(sc->sc_ucode_api, i + (32 * idx));
849 			}
850 			break;
851 		}
852 
853 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
854 			struct iwm_ucode_capa *capa;
855 			uint32_t idx, bits;
856 			int i;
857 			if (tlv_len != sizeof(*capa)) {
858 				err = EINVAL;
859 				goto parse_out;
860 			}
861 			capa = (struct iwm_ucode_capa *)tlv_data;
862 			idx = le32toh(capa->api_index);
863 			bits = le32toh(capa->api_capa);
864 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
865 				err = EINVAL;
866 				goto parse_out;
867 			}
868 			for (i = 0; i < 32; i++) {
869 				if (!ISSET(bits, __BIT(i)))
870 					continue;
871 				setbit(sc->sc_enabled_capa, i + (32 * idx));
872 			}
873 			break;
874 		}
875 
876 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
877 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
878 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
879 		case IWM_UCODE_TLV_FW_MEM_SEG:
880 			/* ignore, not used by current driver */
881 			break;
882 
883 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
884 			err = iwm_firmware_store_section(sc,
885 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
886 			    tlv_len);
887 			if (err)
888 				goto parse_out;
889 			break;
890 
891 		case IWM_UCODE_TLV_PAGING: {
892 			uint32_t paging_mem_size;
893 			if (tlv_len != sizeof(paging_mem_size)) {
894 				err = EINVAL;
895 				goto parse_out;
896 			}
897 			paging_mem_size = le32toh(*(uint32_t *)tlv_data);
898 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
899 				err = EINVAL;
900 				goto parse_out;
901 			}
902 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
903 				err = EINVAL;
904 				goto parse_out;
905 			}
906 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
907 			    paging_mem_size;
908 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
909 			    paging_mem_size;
910 			break;
911 		}
912 
913 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
914 			if (tlv_len != sizeof(uint32_t)) {
915 				err = EINVAL;
916 				goto parse_out;
917 			}
918 			sc->sc_capa_n_scan_channels =
919 			  le32toh(*(uint32_t *)tlv_data);
920 			break;
921 
922 		case IWM_UCODE_TLV_FW_VERSION:
923 			if (tlv_len != sizeof(uint32_t) * 3) {
924 				err = EINVAL;
925 				goto parse_out;
926 			}
927 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
928 			    "%d.%d.%d",
929 			    le32toh(((uint32_t *)tlv_data)[0]),
930 			    le32toh(((uint32_t *)tlv_data)[1]),
931 			    le32toh(((uint32_t *)tlv_data)[2]));
932 			break;
933 
934 		default:
935 			DPRINTF(("%s: unknown firmware section %d, abort\n",
936 			    DEVNAME(sc), tlv_type));
937 			err = EINVAL;
938 			goto parse_out;
939 		}
940 
941 		len -= roundup(tlv_len, 4);
942 		data += roundup(tlv_len, 4);
943 	}
944 
945 	KASSERT(err == 0);
946 
947  parse_out:
948 	if (err) {
949 		aprint_error_dev(sc->sc_dev,
950 		    "firmware parse error, section type %d\n", tlv_type);
951 	}
952 
953 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
954 		aprint_error_dev(sc->sc_dev,
955 		    "device uses unsupported power ops\n");
956 		err = ENOTSUP;
957 	}
958 
959  out:
960 	if (err)
961 		fw->fw_status = IWM_FW_STATUS_NONE;
962 	else
963 		fw->fw_status = IWM_FW_STATUS_DONE;
964 	wakeup(&sc->sc_fw);
965 
966 	if (err && fw->fw_rawdata != NULL) {
967 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
968 		fw->fw_rawdata = NULL;
969 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
970 		/* don't touch fw->fw_status */
971 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
972 	}
973 	return err;
974 }
975 
976 static uint32_t
977 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
978 {
979 	IWM_WRITE(sc,
980 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
981 	IWM_BARRIER_READ_WRITE(sc);
982 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
983 }
984 
985 static void
986 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
987 {
988 	IWM_WRITE(sc,
989 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
990 	IWM_BARRIER_WRITE(sc);
991 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
992 }
993 
994 #ifdef IWM_DEBUG
995 static int
996 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
997 {
998 	int offs;
999 	uint32_t *vals = buf;
1000 
1001 	if (iwm_nic_lock(sc)) {
1002 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1003 		for (offs = 0; offs < dwords; offs++)
1004 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1005 		iwm_nic_unlock(sc);
1006 		return 0;
1007 	}
1008 	return EBUSY;
1009 }
1010 #endif
1011 
1012 static int
1013 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1014 {
1015 	int offs;
1016 	const uint32_t *vals = buf;
1017 
1018 	if (iwm_nic_lock(sc)) {
1019 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1020 		/* WADDR auto-increments */
1021 		for (offs = 0; offs < dwords; offs++) {
1022 			uint32_t val = vals ? vals[offs] : 0;
1023 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1024 		}
1025 		iwm_nic_unlock(sc);
1026 		return 0;
1027 	}
1028 	return EBUSY;
1029 }
1030 
1031 static int
1032 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1033 {
1034 	return iwm_write_mem(sc, addr, &val, 1);
1035 }
1036 
1037 static int
1038 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1039     int timo)
1040 {
1041 	for (;;) {
1042 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1043 			return 1;
1044 		}
1045 		if (timo < 10) {
1046 			return 0;
1047 		}
1048 		timo -= 10;
1049 		DELAY(10);
1050 	}
1051 }
1052 
1053 static int
1054 iwm_nic_lock(struct iwm_softc *sc)
1055 {
1056 	int rv = 0;
1057 
1058 	if (sc->sc_cmd_hold_nic_awake)
1059 		return 1;
1060 
1061 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1062 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1063 
1064 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1065 		DELAY(2);
1066 
1067 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1068 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1069 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1070 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1071 		rv = 1;
1072 	} else {
1073 		DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1074 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1075 	}
1076 
1077 	return rv;
1078 }
1079 
1080 static void
1081 iwm_nic_unlock(struct iwm_softc *sc)
1082 {
1083 
1084 	if (sc->sc_cmd_hold_nic_awake)
1085 		return;
1086 
1087 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1088 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1089 }
1090 
1091 static void
1092 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1093     uint32_t mask)
1094 {
1095 	uint32_t val;
1096 
1097 	/* XXX: no error path? */
1098 	if (iwm_nic_lock(sc)) {
1099 		val = iwm_read_prph(sc, reg) & mask;
1100 		val |= bits;
1101 		iwm_write_prph(sc, reg, val);
1102 		iwm_nic_unlock(sc);
1103 	}
1104 }
1105 
1106 static void
1107 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1108 {
1109 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1110 }
1111 
1112 static void
1113 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1114 {
1115 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1116 }
1117 
1118 static int
1119 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1120     bus_size_t size, bus_size_t alignment)
1121 {
1122 	int nsegs, err;
1123 	void *va;
1124 
1125 	dma->tag = tag;
1126 	dma->size = size;
1127 
1128 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1129 	    &dma->map);
1130 	if (err)
1131 		goto fail;
1132 
1133 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1134 	    BUS_DMA_NOWAIT);
1135 	if (err)
1136 		goto fail;
1137 
1138 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1139 	if (err)
1140 		goto fail;
1141 	dma->vaddr = va;
1142 
1143 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1144 	    BUS_DMA_NOWAIT);
1145 	if (err)
1146 		goto fail;
1147 
1148 	memset(dma->vaddr, 0, size);
1149 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1150 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1151 
1152 	return 0;
1153 
1154 fail:	iwm_dma_contig_free(dma);
1155 	return err;
1156 }
1157 
1158 static void
1159 iwm_dma_contig_free(struct iwm_dma_info *dma)
1160 {
1161 	if (dma->map != NULL) {
1162 		if (dma->vaddr != NULL) {
1163 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1164 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1165 			bus_dmamap_unload(dma->tag, dma->map);
1166 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1167 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1168 			dma->vaddr = NULL;
1169 		}
1170 		bus_dmamap_destroy(dma->tag, dma->map);
1171 		dma->map = NULL;
1172 	}
1173 }
1174 
1175 static int
1176 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1177 {
1178 	bus_size_t size;
1179 	int i, err;
1180 
1181 	ring->cur = 0;
1182 
1183 	/* Allocate RX descriptors (256-byte aligned). */
1184 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1185 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1186 	if (err) {
1187 		aprint_error_dev(sc->sc_dev,
1188 		    "could not allocate RX ring DMA memory\n");
1189 		goto fail;
1190 	}
1191 	ring->desc = ring->desc_dma.vaddr;
1192 
1193 	/* Allocate RX status area (16-byte aligned). */
1194 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1195 	    sizeof(*ring->stat), 16);
1196 	if (err) {
1197 		aprint_error_dev(sc->sc_dev,
1198 		    "could not allocate RX status DMA memory\n");
1199 		goto fail;
1200 	}
1201 	ring->stat = ring->stat_dma.vaddr;
1202 
1203 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1204 		struct iwm_rx_data *data = &ring->data[i];
1205 
1206 		memset(data, 0, sizeof(*data));
1207 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1208 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1209 		    &data->map);
1210 		if (err) {
1211 			aprint_error_dev(sc->sc_dev,
1212 			    "could not create RX buf DMA map\n");
1213 			goto fail;
1214 		}
1215 
1216 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1217 		if (err)
1218 			goto fail;
1219 	}
1220 	return 0;
1221 
1222 fail:	iwm_free_rx_ring(sc, ring);
1223 	return err;
1224 }
1225 
1226 static void
1227 iwm_disable_rx_dma(struct iwm_softc *sc)
1228 {
1229 	int ntries;
1230 
1231 	if (iwm_nic_lock(sc)) {
1232 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1233 		for (ntries = 0; ntries < 1000; ntries++) {
1234 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1235 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1236 				break;
1237 			DELAY(10);
1238 		}
1239 		iwm_nic_unlock(sc);
1240 	}
1241 }
1242 
1243 void
1244 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1245 {
1246 	ring->cur = 0;
1247 	memset(ring->stat, 0, sizeof(*ring->stat));
1248 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1249 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1250 }
1251 
1252 static void
1253 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1254 {
1255 	int i;
1256 
1257 	iwm_dma_contig_free(&ring->desc_dma);
1258 	iwm_dma_contig_free(&ring->stat_dma);
1259 
1260 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1261 		struct iwm_rx_data *data = &ring->data[i];
1262 
1263 		if (data->m != NULL) {
1264 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1265 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1266 			bus_dmamap_unload(sc->sc_dmat, data->map);
1267 			m_freem(data->m);
1268 			data->m = NULL;
1269 		}
1270 		if (data->map != NULL) {
1271 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1272 			data->map = NULL;
1273 		}
1274 	}
1275 }
1276 
1277 static int
1278 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1279 {
1280 	bus_addr_t paddr;
1281 	bus_size_t size;
1282 	int i, err, nsegs;
1283 
1284 	ring->qid = qid;
1285 	ring->queued = 0;
1286 	ring->cur = 0;
1287 
1288 	/* Allocate TX descriptors (256-byte aligned). */
1289 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1290 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1291 	if (err) {
1292 		aprint_error_dev(sc->sc_dev,
1293 		    "could not allocate TX ring DMA memory\n");
1294 		goto fail;
1295 	}
1296 	ring->desc = ring->desc_dma.vaddr;
1297 
1298 	/*
1299 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1300 	 * to allocate commands space for other rings.
1301 	 */
1302 	if (qid > IWM_CMD_QUEUE)
1303 		return 0;
1304 
1305 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1306 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1307 	if (err) {
1308 		aprint_error_dev(sc->sc_dev,
1309 		    "could not allocate TX cmd DMA memory\n");
1310 		goto fail;
1311 	}
1312 	ring->cmd = ring->cmd_dma.vaddr;
1313 
1314 	paddr = ring->cmd_dma.paddr;
1315 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1316 		struct iwm_tx_data *data = &ring->data[i];
1317 		size_t mapsize;
1318 
1319 		data->cmd_paddr = paddr;
1320 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1321 		    + offsetof(struct iwm_tx_cmd, scratch);
1322 		paddr += sizeof(struct iwm_device_cmd);
1323 
1324 		/* FW commands may require more mapped space than packets. */
1325 		if (qid == IWM_CMD_QUEUE) {
1326 			mapsize = IWM_RBUF_SIZE;
1327 			nsegs = 1;
1328 		} else {
1329 			mapsize = MCLBYTES;
1330 			nsegs = IWM_NUM_OF_TBS - 2;
1331 		}
1332 		err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1333 		    0, BUS_DMA_NOWAIT, &data->map);
1334 		if (err) {
1335 			aprint_error_dev(sc->sc_dev,
1336 			    "could not create TX buf DMA map\n");
1337 			goto fail;
1338 		}
1339 	}
1340 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1341 	return 0;
1342 
1343 fail:	iwm_free_tx_ring(sc, ring);
1344 	return err;
1345 }
1346 
1347 static void
1348 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1349 {
1350 
1351 	if (!sc->apmg_wake_up_wa)
1352 		return;
1353 
1354 	if (!sc->sc_cmd_hold_nic_awake) {
1355 		aprint_error_dev(sc->sc_dev,
1356 		    "cmd_hold_nic_awake not set\n");
1357 		return;
1358 	}
1359 
1360 	sc->sc_cmd_hold_nic_awake = 0;
1361 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1362 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1363 }
1364 
1365 static int
1366 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1367 {
1368 	int ret;
1369 
1370 	/*
1371 	 * wake up the NIC to make sure that the firmware will see the host
1372 	 * command - we will let the NIC sleep once all the host commands
1373 	 * returned. This needs to be done only on NICs that have
1374 	 * apmg_wake_up_wa set.
1375 	 */
1376 	if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1377 
1378 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1379 		    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1380 
1381 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1382 		    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1383 		    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1384 		     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1385 		    15000);
1386 		if (ret == 0) {
1387 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1388 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1389 			aprint_error_dev(sc->sc_dev,
1390 			    "failed to wake NIC for hcmd\n");
1391 			return EIO;
1392 		}
1393 		sc->sc_cmd_hold_nic_awake = 1;
1394 	}
1395 
1396 	return 0;
1397 }
1398 static void
1399 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1400 {
1401 	int i;
1402 
1403 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1404 		struct iwm_tx_data *data = &ring->data[i];
1405 
1406 		if (data->m != NULL) {
1407 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1408 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1409 			bus_dmamap_unload(sc->sc_dmat, data->map);
1410 			m_freem(data->m);
1411 			data->m = NULL;
1412 		}
1413 	}
1414 	/* Clear TX descriptors. */
1415 	memset(ring->desc, 0, ring->desc_dma.size);
1416 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1417 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1418 	sc->qfullmsk &= ~(1 << ring->qid);
1419 	ring->queued = 0;
1420 	ring->cur = 0;
1421 
1422 	if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1423 		iwm_clear_cmd_in_flight(sc);
1424 }
1425 
1426 static void
1427 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1428 {
1429 	int i;
1430 
1431 	iwm_dma_contig_free(&ring->desc_dma);
1432 	iwm_dma_contig_free(&ring->cmd_dma);
1433 
1434 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1435 		struct iwm_tx_data *data = &ring->data[i];
1436 
1437 		if (data->m != NULL) {
1438 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1439 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1440 			bus_dmamap_unload(sc->sc_dmat, data->map);
1441 			m_freem(data->m);
1442 			data->m = NULL;
1443 		}
1444 		if (data->map != NULL) {
1445 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1446 			data->map = NULL;
1447 		}
1448 	}
1449 }
1450 
1451 static void
1452 iwm_enable_rfkill_int(struct iwm_softc *sc)
1453 {
1454 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1455 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1456 }
1457 
1458 static int
1459 iwm_check_rfkill(struct iwm_softc *sc)
1460 {
1461 	uint32_t v;
1462 	int s;
1463 	int rv;
1464 
1465 	s = splnet();
1466 
1467 	/*
1468 	 * "documentation" is not really helpful here:
1469 	 *  27:	HW_RF_KILL_SW
1470 	 *	Indicates state of (platform's) hardware RF-Kill switch
1471 	 *
1472 	 * But apparently when it's off, it's on ...
1473 	 */
1474 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1475 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1476 	if (rv) {
1477 		sc->sc_flags |= IWM_FLAG_RFKILL;
1478 	} else {
1479 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1480 	}
1481 
1482 	splx(s);
1483 	return rv;
1484 }
1485 
1486 static void
1487 iwm_enable_interrupts(struct iwm_softc *sc)
1488 {
1489 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1490 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1491 }
1492 
1493 static void
1494 iwm_restore_interrupts(struct iwm_softc *sc)
1495 {
1496 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1497 }
1498 
1499 static void
1500 iwm_disable_interrupts(struct iwm_softc *sc)
1501 {
1502 	int s = splnet();
1503 
1504 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1505 
1506 	/* acknowledge all interrupts */
1507 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1508 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1509 
1510 	splx(s);
1511 }
1512 
1513 static void
1514 iwm_ict_reset(struct iwm_softc *sc)
1515 {
1516 	iwm_disable_interrupts(sc);
1517 
1518 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1519 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1520 	    BUS_DMASYNC_PREWRITE);
1521 	sc->ict_cur = 0;
1522 
1523 	/* Set physical address of ICT (4KB aligned). */
1524 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1525 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1526 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1527 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1528 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1529 
1530 	/* Switch to ICT interrupt mode in driver. */
1531 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1532 
1533 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1534 	iwm_enable_interrupts(sc);
1535 }
1536 
1537 #define IWM_HW_READY_TIMEOUT 50
1538 static int
1539 iwm_set_hw_ready(struct iwm_softc *sc)
1540 {
1541 	int ready;
1542 
1543 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1544 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1545 
1546 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1547 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1548 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1549 	    IWM_HW_READY_TIMEOUT);
1550 	if (ready)
1551 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1552 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1553 
1554 	return ready;
1555 }
1556 #undef IWM_HW_READY_TIMEOUT
1557 
1558 static int
1559 iwm_prepare_card_hw(struct iwm_softc *sc)
1560 {
1561 	int t = 0;
1562 
1563 	if (iwm_set_hw_ready(sc))
1564 		return 0;
1565 
1566 	DELAY(100);
1567 
1568 	/* If HW is not ready, prepare the conditions to check again */
1569 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1570 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1571 
1572 	do {
1573 		if (iwm_set_hw_ready(sc))
1574 			return 0;
1575 		DELAY(200);
1576 		t += 200;
1577 	} while (t < 150000);
1578 
1579 	return ETIMEDOUT;
1580 }
1581 
1582 static void
1583 iwm_apm_config(struct iwm_softc *sc)
1584 {
1585 	pcireg_t reg;
1586 
1587 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1588 	    sc->sc_cap_off + PCIE_LCSR);
1589 	if (reg & PCIE_LCSR_ASPM_L1) {
1590 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1591 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1592 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1593 	} else {
1594 		/* ... and "Enabling" here */
1595 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1596 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1597 	}
1598 }
1599 
1600 /*
1601  * Start up NIC's basic functionality after it has been reset
1602  * e.g. after platform boot or shutdown.
1603  * NOTE:  This does not load uCode nor start the embedded processor
1604  */
1605 static int
1606 iwm_apm_init(struct iwm_softc *sc)
1607 {
1608 	int err = 0;
1609 
1610 	/* Disable L0S exit timer (platform NMI workaround) */
1611 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1612 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1613 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1614 	}
1615 
1616 	/*
1617 	 * Disable L0s without affecting L1;
1618 	 *  don't wait for ICH L0s (ICH bug W/A)
1619 	 */
1620 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1621 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1622 
1623 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1624 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1625 
1626 	/*
1627 	 * Enable HAP INTA (interrupt from management bus) to
1628 	 * wake device's PCI Express link L1a -> L0s
1629 	 */
1630 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1631 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1632 
1633 	iwm_apm_config(sc);
1634 
1635 #if 0 /* not for 7k/8k */
1636 	/* Configure analog phase-lock-loop before activating to D0A */
1637 	if (trans->cfg->base_params->pll_cfg_val)
1638 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1639 		    trans->cfg->base_params->pll_cfg_val);
1640 #endif
1641 
1642 	/*
1643 	 * Set "initialization complete" bit to move adapter from
1644 	 * D0U* --> D0A* (powered-up active) state.
1645 	 */
1646 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1647 
1648 	/*
1649 	 * Wait for clock stabilization; once stabilized, access to
1650 	 * device-internal resources is supported, e.g. iwm_write_prph()
1651 	 * and accesses to uCode SRAM.
1652 	 */
1653 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1654 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1655 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1656 		aprint_error_dev(sc->sc_dev,
1657 		    "timeout waiting for clock stabilization\n");
1658 		err = ETIMEDOUT;
1659 		goto out;
1660 	}
1661 
1662 	if (sc->host_interrupt_operation_mode) {
1663 		/*
1664 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1665 		 * only check host_interrupt_operation_mode even if this is
1666 		 * not related to host_interrupt_operation_mode.
1667 		 *
1668 		 * Enable the oscillator to count wake up time for L1 exit. This
1669 		 * consumes slightly more power (100uA) - but allows to be sure
1670 		 * that we wake up from L1 on time.
1671 		 *
1672 		 * This looks weird: read twice the same register, discard the
1673 		 * value, set a bit, and yet again, read that same register
1674 		 * just to discard the value. But that's the way the hardware
1675 		 * seems to like it.
1676 		 */
1677 		iwm_read_prph(sc, IWM_OSC_CLK);
1678 		iwm_read_prph(sc, IWM_OSC_CLK);
1679 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1680 		iwm_read_prph(sc, IWM_OSC_CLK);
1681 		iwm_read_prph(sc, IWM_OSC_CLK);
1682 	}
1683 
1684 	/*
1685 	 * Enable DMA clock and wait for it to stabilize.
1686 	 *
1687 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1688 	 * do not disable clocks.  This preserves any hardware bits already
1689 	 * set by default in "CLK_CTRL_REG" after reset.
1690 	 */
1691 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1692 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1693 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1694 		DELAY(20);
1695 
1696 		/* Disable L1-Active */
1697 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1698 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1699 
1700 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1701 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1702 		    IWM_APMG_RTC_INT_STT_RFKILL);
1703 	}
1704  out:
1705 	if (err)
1706 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1707 	return err;
1708 }
1709 
1710 static void
1711 iwm_apm_stop(struct iwm_softc *sc)
1712 {
1713 	/* stop device's busmaster DMA activity */
1714 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1715 
1716 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1717 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1718 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1719 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1720 	DPRINTF(("iwm apm stop\n"));
1721 }
1722 
1723 static int
1724 iwm_start_hw(struct iwm_softc *sc)
1725 {
1726 	int err;
1727 
1728 	err = iwm_prepare_card_hw(sc);
1729 	if (err)
1730 		return err;
1731 
1732 	/* Reset the entire device */
1733 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1734 	DELAY(10);
1735 
1736 	err = iwm_apm_init(sc);
1737 	if (err)
1738 		return err;
1739 
1740 	iwm_enable_rfkill_int(sc);
1741 	iwm_check_rfkill(sc);
1742 
1743 	return 0;
1744 }
1745 
1746 static void
1747 iwm_stop_device(struct iwm_softc *sc)
1748 {
1749 	int chnl, ntries;
1750 	int qid;
1751 
1752 	iwm_disable_interrupts(sc);
1753 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1754 
1755 	/* Deactivate TX scheduler. */
1756 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1757 
1758 	/* Stop all DMA channels. */
1759 	if (iwm_nic_lock(sc)) {
1760 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1761 			IWM_WRITE(sc,
1762 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1763 			for (ntries = 0; ntries < 200; ntries++) {
1764 				uint32_t r;
1765 
1766 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1767 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1768 				    chnl))
1769 					break;
1770 				DELAY(20);
1771 			}
1772 		}
1773 		iwm_nic_unlock(sc);
1774 	}
1775 	iwm_disable_rx_dma(sc);
1776 
1777 	iwm_reset_rx_ring(sc, &sc->rxq);
1778 
1779 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1780 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1781 
1782 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1783 		/* Power-down device's busmaster DMA clocks */
1784 		if (iwm_nic_lock(sc)) {
1785 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1786 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1787 			DELAY(5);
1788 			iwm_nic_unlock(sc);
1789 		}
1790 	}
1791 
1792 	/* Make sure (redundant) we've released our request to stay awake */
1793 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1794 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1795 
1796 	/* Stop the device, and put it in low power state */
1797 	iwm_apm_stop(sc);
1798 
1799 	/*
1800 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1801 	 * Clean again the interrupt here
1802 	 */
1803 	iwm_disable_interrupts(sc);
1804 
1805 	/* Reset the on-board processor. */
1806 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1807 
1808 	/* Even though we stop the HW we still want the RF kill interrupt. */
1809 	iwm_enable_rfkill_int(sc);
1810 	iwm_check_rfkill(sc);
1811 }
1812 
1813 static void
1814 iwm_nic_config(struct iwm_softc *sc)
1815 {
1816 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1817 	uint32_t reg_val = 0;
1818 
1819 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1820 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1821 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1822 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1823 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1824 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1825 
1826 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1827 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1828 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1829 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1830 
1831 	/* radio configuration */
1832 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1833 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1834 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1835 
1836 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1837 
1838 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1839 	    radio_cfg_step, radio_cfg_dash));
1840 
1841 	/*
1842 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1843 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1844 	 * to lose ownership and not being able to obtain it back.
1845 	 */
1846 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1847 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1848 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1849 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1850 	}
1851 }
1852 
1853 static int
1854 iwm_nic_rx_init(struct iwm_softc *sc)
1855 {
1856 	if (!iwm_nic_lock(sc))
1857 		return EBUSY;
1858 
1859 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1860 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1861 	    0, sc->rxq.stat_dma.size,
1862 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1863 
1864 	iwm_disable_rx_dma(sc);
1865 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1866 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1867 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1868 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1869 
1870 	/* Set physical address of RX ring (256-byte aligned). */
1871 	IWM_WRITE(sc,
1872 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1873 
1874 	/* Set physical address of RX status (16-byte aligned). */
1875 	IWM_WRITE(sc,
1876 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1877 
1878 	/* Enable RX. */
1879 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1880 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1881 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1882 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1883 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1884 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1885 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1886 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1887 
1888 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1889 
1890 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1891 	if (sc->host_interrupt_operation_mode)
1892 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1893 
1894 	/*
1895 	 * This value should initially be 0 (before preparing any RBs),
1896 	 * and should be 8 after preparing the first 8 RBs (for example).
1897 	 */
1898 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1899 
1900 	iwm_nic_unlock(sc);
1901 
1902 	return 0;
1903 }
1904 
1905 static int
1906 iwm_nic_tx_init(struct iwm_softc *sc)
1907 {
1908 	int qid;
1909 
1910 	if (!iwm_nic_lock(sc))
1911 		return EBUSY;
1912 
1913 	/* Deactivate TX scheduler. */
1914 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1915 
1916 	/* Set physical address of "keep warm" page (16-byte aligned). */
1917 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1918 
1919 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1920 		struct iwm_tx_ring *txq = &sc->txq[qid];
1921 
1922 		/* Set physical address of TX ring (256-byte aligned). */
1923 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1924 		    txq->desc_dma.paddr >> 8);
1925 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1926 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1927 	}
1928 
1929 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1930 
1931 	iwm_nic_unlock(sc);
1932 
1933 	return 0;
1934 }
1935 
1936 static int
1937 iwm_nic_init(struct iwm_softc *sc)
1938 {
1939 	int err;
1940 
1941 	iwm_apm_init(sc);
1942 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1943 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1944 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1945 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1946 	}
1947 
1948 	iwm_nic_config(sc);
1949 
1950 	err = iwm_nic_rx_init(sc);
1951 	if (err)
1952 		return err;
1953 
1954 	err = iwm_nic_tx_init(sc);
1955 	if (err)
1956 		return err;
1957 
1958 	DPRINTF(("shadow registers enabled\n"));
1959 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1960 
1961 	return 0;
1962 }
1963 
1964 static const uint8_t iwm_ac_to_tx_fifo[] = {
1965 	IWM_TX_FIFO_VO,
1966 	IWM_TX_FIFO_VI,
1967 	IWM_TX_FIFO_BE,
1968 	IWM_TX_FIFO_BK,
1969 };
1970 
1971 static int
1972 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1973 {
1974 	if (!iwm_nic_lock(sc)) {
1975 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1976 		return EBUSY;
1977 	}
1978 
1979 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1980 
1981 	if (qid == IWM_CMD_QUEUE) {
1982 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1983 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1984 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1985 
1986 		iwm_nic_unlock(sc);
1987 
1988 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1989 
1990 		if (!iwm_nic_lock(sc))
1991 			return EBUSY;
1992 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1993 		iwm_nic_unlock(sc);
1994 
1995 		iwm_write_mem32(sc,
1996 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1997 
1998 		/* Set scheduler window size and frame limit. */
1999 		iwm_write_mem32(sc,
2000 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2001 		    sizeof(uint32_t),
2002 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2003 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2004 		    ((IWM_FRAME_LIMIT
2005 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2006 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2007 
2008 		if (!iwm_nic_lock(sc))
2009 			return EBUSY;
2010 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2011 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2012 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2013 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2014 		    IWM_SCD_QUEUE_STTS_REG_MSK);
2015 	} else {
2016 		struct iwm_scd_txq_cfg_cmd cmd;
2017 		int err;
2018 
2019 		iwm_nic_unlock(sc);
2020 
2021 		memset(&cmd, 0, sizeof(cmd));
2022 		cmd.scd_queue = qid;
2023 		cmd.enable = 1;
2024 		cmd.sta_id = sta_id;
2025 		cmd.tx_fifo = fifo;
2026 		cmd.aggregate = 0;
2027 		cmd.window = IWM_FRAME_LIMIT;
2028 
2029 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2030 		    &cmd);
2031 		if (err)
2032 			return err;
2033 
2034 		if (!iwm_nic_lock(sc))
2035 			return EBUSY;
2036 	}
2037 
2038 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2039 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2040 
2041 	iwm_nic_unlock(sc);
2042 
2043 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2044 
2045 	return 0;
2046 }
2047 
2048 static int
2049 iwm_post_alive(struct iwm_softc *sc)
2050 {
2051 	int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2052 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2053 	int err, chnl;
2054 	uint32_t base;
2055 
2056 	if (!iwm_nic_lock(sc))
2057 		return EBUSY;
2058 
2059 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2060 	if (sc->sched_base != base) {
2061 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2062 		    DEVNAME(sc), sc->sched_base, base));
2063 		sc->sched_base = base;
2064 	}
2065 
2066 	iwm_nic_unlock(sc);
2067 
2068 	iwm_ict_reset(sc);
2069 
2070 	/* Clear TX scheduler state in SRAM. */
2071 	err = iwm_write_mem(sc,
2072 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2073 	if (err)
2074 		return err;
2075 
2076 	if (!iwm_nic_lock(sc))
2077 		return EBUSY;
2078 
2079 	/* Set physical address of TX scheduler rings (1KB aligned). */
2080 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2081 
2082 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2083 
2084 	iwm_nic_unlock(sc);
2085 
2086 	/* enable command channel */
2087 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2088 	if (err)
2089 		return err;
2090 
2091 	if (!iwm_nic_lock(sc))
2092 		return EBUSY;
2093 
2094 	/* Activate TX scheduler. */
2095 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2096 
2097 	/* Enable DMA channels. */
2098 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2099 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2100 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2101 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2102 	}
2103 
2104 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2105 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2106 
2107 	/* Enable L1-Active */
2108 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2109 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2110 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2111 	}
2112 
2113 	iwm_nic_unlock(sc);
2114 
2115 	return 0;
2116 }
2117 
2118 static struct iwm_phy_db_entry *
2119 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2120     uint16_t chg_id)
2121 {
2122 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2123 
2124 	if (type >= IWM_PHY_DB_MAX)
2125 		return NULL;
2126 
2127 	switch (type) {
2128 	case IWM_PHY_DB_CFG:
2129 		return &phy_db->cfg;
2130 	case IWM_PHY_DB_CALIB_NCH:
2131 		return &phy_db->calib_nch;
2132 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2133 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2134 			return NULL;
2135 		return &phy_db->calib_ch_group_papd[chg_id];
2136 	case IWM_PHY_DB_CALIB_CHG_TXP:
2137 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2138 			return NULL;
2139 		return &phy_db->calib_ch_group_txp[chg_id];
2140 	default:
2141 		return NULL;
2142 	}
2143 	return NULL;
2144 }
2145 
2146 static int
2147 iwm_phy_db_set_section(struct iwm_softc *sc,
2148     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2149 {
2150 	struct iwm_phy_db_entry *entry;
2151 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2152 	uint16_t chg_id = 0;
2153 
2154 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2155 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2156 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2157 
2158 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2159 	if (!entry)
2160 		return EINVAL;
2161 
2162 	if (entry->data)
2163 		kmem_intr_free(entry->data, entry->size);
2164 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2165 	if (!entry->data) {
2166 		entry->size = 0;
2167 		return ENOMEM;
2168 	}
2169 	memcpy(entry->data, phy_db_notif->data, size);
2170 	entry->size = size;
2171 
2172 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2173 	    __func__, __LINE__, type, size, entry->data));
2174 
2175 	return 0;
2176 }
2177 
2178 static int
2179 iwm_is_valid_channel(uint16_t ch_id)
2180 {
2181 	if (ch_id <= 14 ||
2182 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2183 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2184 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2185 		return 1;
2186 	return 0;
2187 }
2188 
2189 static uint8_t
2190 iwm_ch_id_to_ch_index(uint16_t ch_id)
2191 {
2192 	if (!iwm_is_valid_channel(ch_id))
2193 		return 0xff;
2194 
2195 	if (ch_id <= 14)
2196 		return ch_id - 1;
2197 	if (ch_id <= 64)
2198 		return (ch_id + 20) / 4;
2199 	if (ch_id <= 140)
2200 		return (ch_id - 12) / 4;
2201 	return (ch_id - 13) / 4;
2202 }
2203 
2204 
2205 static uint16_t
2206 iwm_channel_id_to_papd(uint16_t ch_id)
2207 {
2208 	if (!iwm_is_valid_channel(ch_id))
2209 		return 0xff;
2210 
2211 	if (1 <= ch_id && ch_id <= 14)
2212 		return 0;
2213 	if (36 <= ch_id && ch_id <= 64)
2214 		return 1;
2215 	if (100 <= ch_id && ch_id <= 140)
2216 		return 2;
2217 	return 3;
2218 }
2219 
2220 static uint16_t
2221 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2222 {
2223 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2224 	struct iwm_phy_db_chg_txp *txp_chg;
2225 	int i;
2226 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2227 
2228 	if (ch_index == 0xff)
2229 		return 0xff;
2230 
2231 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2232 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2233 		if (!txp_chg)
2234 			return 0xff;
2235 		/*
2236 		 * Looking for the first channel group the max channel
2237 		 * of which is higher than the requested channel.
2238 		 */
2239 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2240 			return i;
2241 	}
2242 	return 0xff;
2243 }
2244 
2245 static int
2246 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2247     uint16_t *size, uint16_t ch_id)
2248 {
2249 	struct iwm_phy_db_entry *entry;
2250 	uint16_t ch_group_id = 0;
2251 
2252 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2253 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2254 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2255 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2256 
2257 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2258 	if (!entry)
2259 		return EINVAL;
2260 
2261 	*data = entry->data;
2262 	*size = entry->size;
2263 
2264 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2265 		       __func__, __LINE__, type, *size));
2266 
2267 	return 0;
2268 }
2269 
2270 static int
2271 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2272     void *data)
2273 {
2274 	struct iwm_phy_db_cmd phy_db_cmd;
2275 	struct iwm_host_cmd cmd = {
2276 		.id = IWM_PHY_DB_CMD,
2277 		.flags = IWM_CMD_ASYNC,
2278 	};
2279 
2280 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2281 	    type, length));
2282 
2283 	phy_db_cmd.type = le16toh(type);
2284 	phy_db_cmd.length = le16toh(length);
2285 
2286 	cmd.data[0] = &phy_db_cmd;
2287 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2288 	cmd.data[1] = data;
2289 	cmd.len[1] = length;
2290 
2291 	return iwm_send_cmd(sc, &cmd);
2292 }
2293 
2294 static int
2295 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2296     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2297 {
2298 	uint16_t i;
2299 	int err;
2300 	struct iwm_phy_db_entry *entry;
2301 
2302 	/* Send all the channel-specific groups to operational fw */
2303 	for (i = 0; i < max_ch_groups; i++) {
2304 		entry = iwm_phy_db_get_section(sc, type, i);
2305 		if (!entry)
2306 			return EINVAL;
2307 
2308 		if (!entry->size)
2309 			continue;
2310 
2311 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2312 		if (err) {
2313 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2314 			    "err %d\n", DEVNAME(sc), type, i, err));
2315 			return err;
2316 		}
2317 
2318 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2319 		    DEVNAME(sc), type, i));
2320 
2321 		DELAY(1000);
2322 	}
2323 
2324 	return 0;
2325 }
2326 
2327 static int
2328 iwm_send_phy_db_data(struct iwm_softc *sc)
2329 {
2330 	uint8_t *data = NULL;
2331 	uint16_t size = 0;
2332 	int err;
2333 
2334 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2335 	if (err)
2336 		return err;
2337 
2338 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2339 	if (err)
2340 		return err;
2341 
2342 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2343 	    &data, &size, 0);
2344 	if (err)
2345 		return err;
2346 
2347 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2348 	if (err)
2349 		return err;
2350 
2351 	err = iwm_phy_db_send_all_channel_groups(sc,
2352 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2353 	if (err)
2354 		return err;
2355 
2356 	err = iwm_phy_db_send_all_channel_groups(sc,
2357 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2358 	if (err)
2359 		return err;
2360 
2361 	return 0;
2362 }
2363 
2364 /*
2365  * For the high priority TE use a time event type that has similar priority to
2366  * the FW's action scan priority.
2367  */
2368 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2369 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2370 
2371 /* used to convert from time event API v2 to v1 */
2372 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2373 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2374 static inline uint16_t
2375 iwm_te_v2_get_notify(uint16_t policy)
2376 {
2377 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2378 }
2379 
2380 static inline uint16_t
2381 iwm_te_v2_get_dep_policy(uint16_t policy)
2382 {
2383 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2384 		IWM_TE_V2_PLACEMENT_POS;
2385 }
2386 
2387 static inline uint16_t
2388 iwm_te_v2_get_absence(uint16_t policy)
2389 {
2390 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2391 }
2392 
2393 static void
2394 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2395     struct iwm_time_event_cmd_v1 *cmd_v1)
2396 {
2397 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2398 	cmd_v1->action = cmd_v2->action;
2399 	cmd_v1->id = cmd_v2->id;
2400 	cmd_v1->apply_time = cmd_v2->apply_time;
2401 	cmd_v1->max_delay = cmd_v2->max_delay;
2402 	cmd_v1->depends_on = cmd_v2->depends_on;
2403 	cmd_v1->interval = cmd_v2->interval;
2404 	cmd_v1->duration = cmd_v2->duration;
2405 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2406 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2407 	else
2408 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2409 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2410 	cmd_v1->interval_reciprocal = 0; /* unused */
2411 
2412 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2413 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2414 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2415 }
2416 
2417 static int
2418 iwm_send_time_event_cmd(struct iwm_softc *sc,
2419     const struct iwm_time_event_cmd_v2 *cmd)
2420 {
2421 	struct iwm_time_event_cmd_v1 cmd_v1;
2422 
2423 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2424 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2425 		    cmd);
2426 
2427 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2428 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2429 	    &cmd_v1);
2430 }
2431 
2432 static void
2433 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2434     uint32_t duration, uint32_t max_delay)
2435 {
2436 	struct iwm_time_event_cmd_v2 time_cmd;
2437 
2438 	memset(&time_cmd, 0, sizeof(time_cmd));
2439 
2440 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2441 	time_cmd.id_and_color =
2442 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2443 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2444 
2445 	time_cmd.apply_time = htole32(0);
2446 
2447 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2448 	time_cmd.max_delay = htole32(max_delay);
2449 	/* TODO: why do we need to interval = bi if it is not periodic? */
2450 	time_cmd.interval = htole32(1);
2451 	time_cmd.duration = htole32(duration);
2452 	time_cmd.repeat = 1;
2453 	time_cmd.policy
2454 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2455 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2456 		IWM_T2_V2_START_IMMEDIATELY);
2457 
2458 	iwm_send_time_event_cmd(sc, &time_cmd);
2459 }
2460 
2461 /*
2462  * NVM read access and content parsing.  We do not support
2463  * external NVM or writing NVM.
2464  */
2465 
2466 /* list of NVM sections we are allowed/need to read */
2467 static const int iwm_nvm_to_read[] = {
2468 	IWM_NVM_SECTION_TYPE_HW,
2469 	IWM_NVM_SECTION_TYPE_SW,
2470 	IWM_NVM_SECTION_TYPE_REGULATORY,
2471 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2472 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2473 	IWM_NVM_SECTION_TYPE_HW_8000,
2474 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2475 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2476 };
2477 
2478 /* Default NVM size to read */
2479 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2480 #define IWM_MAX_NVM_SECTION_SIZE_7000	(16 * 512 * sizeof(uint16_t)) /*16 KB*/
2481 #define IWM_MAX_NVM_SECTION_SIZE_8000	(32 * 512 * sizeof(uint16_t)) /*32 KB*/
2482 
2483 #define IWM_NVM_WRITE_OPCODE 1
2484 #define IWM_NVM_READ_OPCODE 0
2485 
2486 static int
2487 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2488     uint16_t length, uint8_t *data, uint16_t *len)
2489 {
2490 	offset = 0;
2491 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2492 		.offset = htole16(offset),
2493 		.length = htole16(length),
2494 		.type = htole16(section),
2495 		.op_code = IWM_NVM_READ_OPCODE,
2496 	};
2497 	struct iwm_nvm_access_resp *nvm_resp;
2498 	struct iwm_rx_packet *pkt;
2499 	struct iwm_host_cmd cmd = {
2500 		.id = IWM_NVM_ACCESS_CMD,
2501 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2502 		.data = { &nvm_access_cmd, },
2503 	};
2504 	int err, offset_read;
2505 	size_t bytes_read;
2506 	uint8_t *resp_data;
2507 
2508 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2509 
2510 	err = iwm_send_cmd(sc, &cmd);
2511 	if (err) {
2512 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2513 		    DEVNAME(sc), err));
2514 		return err;
2515 	}
2516 
2517 	pkt = cmd.resp_pkt;
2518 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2519 		err = EIO;
2520 		goto exit;
2521 	}
2522 
2523 	/* Extract NVM response */
2524 	nvm_resp = (void *)pkt->data;
2525 
2526 	err = le16toh(nvm_resp->status);
2527 	bytes_read = le16toh(nvm_resp->length);
2528 	offset_read = le16toh(nvm_resp->offset);
2529 	resp_data = nvm_resp->data;
2530 	if (err) {
2531 		err = EINVAL;
2532 		goto exit;
2533 	}
2534 
2535 	if (offset_read != offset) {
2536 		err = EINVAL;
2537 		goto exit;
2538 	}
2539 	if (bytes_read > length) {
2540 		err = EINVAL;
2541 		goto exit;
2542 	}
2543 
2544 	memcpy(data + offset, resp_data, bytes_read);
2545 	*len = bytes_read;
2546 
2547  exit:
2548 	iwm_free_resp(sc, &cmd);
2549 	return err;
2550 }
2551 
2552 /*
2553  * Reads an NVM section completely.
2554  * NICs prior to 7000 family doesn't have a real NVM, but just read
2555  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2556  * by uCode, we need to manually check in this case that we don't
2557  * overflow and try to read more than the EEPROM size.
2558  */
2559 static int
2560 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2561     uint16_t *len, size_t max_len)
2562 {
2563 	uint16_t chunklen, seglen;
2564 	int err;
2565 
2566 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2567 	*len = 0;
2568 
2569 	/* Read NVM chunks until exhausted (reading less than requested) */
2570 	while (seglen == chunklen && *len < max_len) {
2571 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2572 		    &seglen);
2573 		if (err) {
2574 			DPRINTF(("%s: Cannot read NVM from section %d "
2575 			    "offset %d, length %d\n",
2576 			    DEVNAME(sc), section, *len, chunklen));
2577 			return err;
2578 		}
2579 		*len += seglen;
2580 	}
2581 
2582 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2583 	return 0;
2584 }
2585 
2586 static uint8_t
2587 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2588 {
2589 	uint8_t tx_ant;
2590 
2591 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2592 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2593 
2594 	if (sc->sc_nvm.valid_tx_ant)
2595 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2596 
2597 	return tx_ant;
2598 }
2599 
2600 static uint8_t
2601 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2602 {
2603 	uint8_t rx_ant;
2604 
2605 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2606 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2607 
2608 	if (sc->sc_nvm.valid_rx_ant)
2609 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2610 
2611 	return rx_ant;
2612 }
2613 
2614 static void
2615 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2616     const uint8_t *nvm_channels, size_t nchan)
2617 {
2618 	struct ieee80211com *ic = &sc->sc_ic;
2619 	struct iwm_nvm_data *data = &sc->sc_nvm;
2620 	int ch_idx;
2621 	struct ieee80211_channel *channel;
2622 	uint16_t ch_flags;
2623 	int is_5ghz;
2624 	int flags, hw_value;
2625 
2626 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2627 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2628 		aprint_debug_dev(sc->sc_dev,
2629 		    "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2630 		    " %cwide %c40MHz %c80MHz %c160MHz\n",
2631 		    nvm_channels[ch_idx],
2632 		    ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2633 		    ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2634 		    ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2635 		    ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2636 		    ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2637 		    ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2638 		    ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2639 		    ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2640 		    ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2641 
2642 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2643 		    !data->sku_cap_band_52GHz_enable)
2644 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2645 
2646 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2647 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2648 			    nvm_channels[ch_idx], ch_flags,
2649 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2650 			continue;
2651 		}
2652 
2653 		hw_value = nvm_channels[ch_idx];
2654 		channel = &ic->ic_channels[hw_value];
2655 
2656 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2657 		if (!is_5ghz) {
2658 			flags = IEEE80211_CHAN_2GHZ;
2659 			channel->ic_flags
2660 			    = IEEE80211_CHAN_CCK
2661 			    | IEEE80211_CHAN_OFDM
2662 			    | IEEE80211_CHAN_DYN
2663 			    | IEEE80211_CHAN_2GHZ;
2664 		} else {
2665 			flags = IEEE80211_CHAN_5GHZ;
2666 			channel->ic_flags =
2667 			    IEEE80211_CHAN_A;
2668 		}
2669 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2670 
2671 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2672 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2673 
2674 #ifndef IEEE80211_NO_HT
2675 		if (data->sku_cap_11n_enable)
2676 			channel->ic_flags |= IEEE80211_CHAN_HT;
2677 #endif
2678 	}
2679 }
2680 
2681 #ifndef IEEE80211_NO_HT
2682 static void
2683 iwm_setup_ht_rates(struct iwm_softc *sc)
2684 {
2685 	struct ieee80211com *ic = &sc->sc_ic;
2686 
2687 	/* TX is supported with the same MCS as RX. */
2688 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2689 
2690 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2691 
2692 #ifdef notyet
2693 	if (sc->sc_nvm.sku_cap_mimo_disable)
2694 		return;
2695 
2696 	if (iwm_fw_valid_rx_ant(sc) > 1)
2697 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2698 	if (iwm_fw_valid_rx_ant(sc) > 2)
2699 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2700 #endif
2701 }
2702 
2703 #define IWM_MAX_RX_BA_SESSIONS 16
2704 
2705 static void
2706 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2707     uint16_t ssn, int start)
2708 {
2709 	struct ieee80211com *ic = &sc->sc_ic;
2710 	struct iwm_add_sta_cmd_v7 cmd;
2711 	struct iwm_node *in = (struct iwm_node *)ni;
2712 	int err, s;
2713 	uint32_t status;
2714 
2715 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2716 		ieee80211_addba_req_refuse(ic, ni, tid);
2717 		return;
2718 	}
2719 
2720 	memset(&cmd, 0, sizeof(cmd));
2721 
2722 	cmd.sta_id = IWM_STATION_ID;
2723 	cmd.mac_id_n_color
2724 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2725 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2726 
2727 	if (start) {
2728 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2729 		cmd.add_immediate_ba_ssn = ssn;
2730 	} else {
2731 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2732 	}
2733 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2734 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2735 
2736 	status = IWM_ADD_STA_SUCCESS;
2737 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2738 	    &status);
2739 
2740 	s = splnet();
2741 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2742 		if (start) {
2743 			sc->sc_rx_ba_sessions++;
2744 			ieee80211_addba_req_accept(ic, ni, tid);
2745 		} else if (sc->sc_rx_ba_sessions > 0)
2746 			sc->sc_rx_ba_sessions--;
2747 	} else if (start)
2748 		ieee80211_addba_req_refuse(ic, ni, tid);
2749 	splx(s);
2750 }
2751 
2752 static void
2753 iwm_htprot_task(void *arg)
2754 {
2755 	struct iwm_softc *sc = arg;
2756 	struct ieee80211com *ic = &sc->sc_ic;
2757 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2758 	int err;
2759 
2760 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2761 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2762 	if (err)
2763 		aprint_error_dev(sc->sc_dev,
2764 		    "could not change HT protection: error %d\n", err);
2765 }
2766 
2767 /*
2768  * This function is called by upper layer when HT protection settings in
2769  * beacons have changed.
2770  */
2771 static void
2772 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2773 {
2774 	struct iwm_softc *sc = ic->ic_softc;
2775 
2776 	/* assumes that ni == ic->ic_bss */
2777 	task_add(systq, &sc->htprot_task);
2778 }
2779 
2780 static void
2781 iwm_ba_task(void *arg)
2782 {
2783 	struct iwm_softc *sc = arg;
2784 	struct ieee80211com *ic = &sc->sc_ic;
2785 	struct ieee80211_node *ni = ic->ic_bss;
2786 
2787 	if (sc->ba_start)
2788 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2789 	else
2790 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2791 }
2792 
2793 /*
2794  * This function is called by upper layer when an ADDBA request is received
2795  * from another STA and before the ADDBA response is sent.
2796  */
2797 static int
2798 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2799     uint8_t tid)
2800 {
2801 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2802 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2803 
2804 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2805 		return ENOSPC;
2806 
2807 	sc->ba_start = 1;
2808 	sc->ba_tid = tid;
2809 	sc->ba_ssn = htole16(ba->ba_winstart);
2810 	task_add(systq, &sc->ba_task);
2811 
2812 	return EBUSY;
2813 }
2814 
2815 /*
2816  * This function is called by upper layer on teardown of an HT-immediate
2817  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2818  */
2819 static void
2820 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2821     uint8_t tid)
2822 {
2823 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2824 
2825 	sc->ba_start = 0;
2826 	sc->ba_tid = tid;
2827 	task_add(systq, &sc->ba_task);
2828 }
2829 #endif
2830 
2831 static void
2832 iwm_free_fw_paging(struct iwm_softc *sc)
2833 {
2834 	int i;
2835 
2836 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2837 		return;
2838 
2839 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2840 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2841 	}
2842 
2843 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2844 }
2845 
2846 static int
2847 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2848 {
2849 	int sec_idx, idx;
2850 	uint32_t offset = 0;
2851 
2852 	/*
2853 	 * find where is the paging image start point:
2854 	 * if CPU2 exist and it's in paging format, then the image looks like:
2855 	 * CPU1 sections (2 or more)
2856 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2857 	 * CPU2 sections (not paged)
2858 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2859 	 * non paged to CPU2 paging sec
2860 	 * CPU2 paging CSS
2861 	 * CPU2 paging image (including instruction and data)
2862 	 */
2863 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2864 		if (fws->fw_sect[sec_idx].fws_devoff ==
2865 		    IWM_PAGING_SEPARATOR_SECTION) {
2866 			sec_idx++;
2867 			break;
2868 		}
2869 	}
2870 
2871 	/*
2872 	 * If paging is enabled there should be at least 2 more sections left
2873 	 * (one for CSS and one for Paging data)
2874 	 */
2875 	if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2876 		aprint_verbose_dev(sc->sc_dev,
2877 		    "Paging: Missing CSS and/or paging sections\n");
2878 		iwm_free_fw_paging(sc);
2879 		return EINVAL;
2880 	}
2881 
2882 	/* copy the CSS block to the dram */
2883 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2884 	    sec_idx));
2885 
2886 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2887 	    fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2888 
2889 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2890 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2891 
2892 	sec_idx++;
2893 
2894 	/*
2895 	 * copy the paging blocks to the dram
2896 	 * loop index start from 1 since that CSS block already copied to dram
2897 	 * and CSS index is 0.
2898 	 * loop stop at num_of_paging_blk since that last block is not full.
2899 	 */
2900 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2901 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2902 		       (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2903 		       sc->fw_paging_db[idx].fw_paging_size);
2904 
2905 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2906 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2907 
2908 		offset += sc->fw_paging_db[idx].fw_paging_size;
2909 	}
2910 
2911 	/* copy the last paging block */
2912 	if (sc->num_of_pages_in_last_blk > 0) {
2913 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2914 		    (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2915 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2916 
2917 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2918 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2919 	}
2920 
2921 	return 0;
2922 }
2923 
2924 static int
2925 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2926 {
2927 	int blk_idx = 0;
2928 	int error, num_of_pages;
2929 	bus_dmamap_t dmap;
2930 
2931 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2932 		int i;
2933 		/* Device got reset, and we setup firmware paging again */
2934 		for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
2935 			dmap = sc->fw_paging_db[i].fw_paging_block.map;
2936 			bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2937 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2938 		}
2939 		return 0;
2940 	}
2941 
2942 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
2943 	CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
2944 
2945 	num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
2946 	sc->num_of_paging_blk =
2947 	    howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
2948 	sc->num_of_pages_in_last_blk = num_of_pages -
2949 	    IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
2950 
2951 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
2952 	    "each block holds 8 pages, last block holds %d pages\n",
2953 	    DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
2954 
2955 	/* allocate block of 4Kbytes for paging CSS */
2956 	error = iwm_dma_contig_alloc(sc->sc_dmat,
2957 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
2958 	    4096);
2959 	if (error) {
2960 		/* free all the previous pages since we failed */
2961 		iwm_free_fw_paging(sc);
2962 		return ENOMEM;
2963 	}
2964 
2965 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
2966 
2967 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
2968 	    DEVNAME(sc)));
2969 
2970 	/*
2971 	 * allocate blocks in dram.
2972 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
2973 	 */
2974 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
2975 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
2976 		/* XXX Use iwm_dma_contig_alloc for allocating */
2977 		error = iwm_dma_contig_alloc(sc->sc_dmat,
2978 		    &sc->fw_paging_db[blk_idx].fw_paging_block,
2979 		    IWM_PAGING_BLOCK_SIZE, 4096);
2980 		if (error) {
2981 			/* free all the previous pages since we failed */
2982 			iwm_free_fw_paging(sc);
2983 			return ENOMEM;
2984 		}
2985 
2986 		sc->fw_paging_db[blk_idx].fw_paging_size =
2987 		    IWM_PAGING_BLOCK_SIZE;
2988 
2989 		DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
2990 		    "paging.\n", DEVNAME(sc)));
2991 	}
2992 
2993 	return 0;
2994 }
2995 
2996 static int
2997 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2998 {
2999 	int err;
3000 
3001 	err = iwm_alloc_fw_paging_mem(sc, fws);
3002 	if (err)
3003 		return err;
3004 
3005 	return iwm_fill_paging_mem(sc, fws);
3006 }
3007 
3008 static bool
3009 iwm_has_new_tx_api(struct iwm_softc *sc)
3010 {
3011 	/* XXX */
3012 	return false;
3013 }
3014 
3015 /* send paging cmd to FW in case CPU2 has paging image */
3016 static int
3017 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3018 {
3019 	struct iwm_fw_paging_cmd fw_paging_cmd = {
3020 		.flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3021 		                 IWM_PAGING_CMD_IS_ENABLED |
3022 		                 (sc->num_of_pages_in_last_blk <<
3023 		                  IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3024 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3025 		.block_num = htole32(sc->num_of_paging_blk),
3026 	};
3027 	size_t size = sizeof(fw_paging_cmd);
3028 	int blk_idx;
3029 	bus_dmamap_t dmap;
3030 
3031 	if (!iwm_has_new_tx_api(sc))
3032 		size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3033 		    IWM_NUM_OF_FW_PAGING_BLOCKS;
3034 
3035 	/* loop for for all paging blocks + CSS block */
3036 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3037 		bus_addr_t dev_phy_addr =
3038 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3039 		if (iwm_has_new_tx_api(sc)) {
3040 			fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3041 			    htole64(dev_phy_addr);
3042 		} else {
3043 			dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3044 			fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3045 			    htole32(dev_phy_addr);
3046 		}
3047 		dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map;
3048 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3049 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3050 	}
3051 
3052 	return iwm_send_cmd_pdu(sc,
3053 	    iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3054 	    0, size, &fw_paging_cmd);
3055 }
3056 
3057 static void
3058 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3059     const uint16_t *mac_override, const uint16_t *nvm_hw)
3060 {
3061 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3062 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3063 	};
3064 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3065 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3066 	};
3067 	const uint8_t *hw_addr;
3068 
3069 	if (mac_override) {
3070 		hw_addr = (const uint8_t *)(mac_override +
3071 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
3072 
3073 		/*
3074 		 * Store the MAC address from MAO section.
3075 		 * No byte swapping is required in MAO section
3076 		 */
3077 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3078 
3079 		/*
3080 		 * Force the use of the OTP MAC address in case of reserved MAC
3081 		 * address in the NVM, or if address is given but invalid.
3082 		 */
3083 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3084 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3085 		    sizeof(etherbroadcastaddr)) != 0) &&
3086 		    (memcmp(etheranyaddr, data->hw_addr,
3087 		    sizeof(etheranyaddr)) != 0) &&
3088 		    !ETHER_IS_MULTICAST(data->hw_addr))
3089 			return;
3090 	}
3091 
3092 	if (nvm_hw) {
3093 		/* Read the mac address from WFMP registers. */
3094 		uint32_t mac_addr0 =
3095 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3096 		uint32_t mac_addr1 =
3097 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3098 
3099 		hw_addr = (const uint8_t *)&mac_addr0;
3100 		data->hw_addr[0] = hw_addr[3];
3101 		data->hw_addr[1] = hw_addr[2];
3102 		data->hw_addr[2] = hw_addr[1];
3103 		data->hw_addr[3] = hw_addr[0];
3104 
3105 		hw_addr = (const uint8_t *)&mac_addr1;
3106 		data->hw_addr[4] = hw_addr[1];
3107 		data->hw_addr[5] = hw_addr[0];
3108 
3109 		return;
3110 	}
3111 
3112 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
3113 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3114 }
3115 
3116 static int
3117 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3118     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3119     const uint16_t *mac_override, const uint16_t *phy_sku,
3120     const uint16_t *regulatory)
3121 {
3122 	struct iwm_nvm_data *data = &sc->sc_nvm;
3123 	uint8_t hw_addr[ETHER_ADDR_LEN];
3124 	uint32_t sku;
3125 
3126 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3127 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3128 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3129 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3130 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3131 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3132 
3133 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3134 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3135 	} else {
3136 		uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3137 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3138 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3139 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3140 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3141 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3142 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3143 
3144 		data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3145 		sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3146 	}
3147 
3148 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3149 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3150 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3151 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3152 
3153 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3154 
3155 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3156 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3157 		data->hw_addr[0] = hw_addr[1];
3158 		data->hw_addr[1] = hw_addr[0];
3159 		data->hw_addr[2] = hw_addr[3];
3160 		data->hw_addr[3] = hw_addr[2];
3161 		data->hw_addr[4] = hw_addr[5];
3162 		data->hw_addr[5] = hw_addr[4];
3163 	} else
3164 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3165 
3166 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3167 		uint16_t lar_offset, lar_config;
3168 		lar_offset = data->nvm_version < 0xE39 ?
3169 		    IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3170 		lar_config = le16_to_cpup(regulatory + lar_offset);
3171                 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3172 	}
3173 
3174 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3175 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3176 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3177 	else
3178 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3179 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3180 
3181 	data->calib_version = 255;   /* TODO:
3182 					this value will prevent some checks from
3183 					failing, we need to check if this
3184 					field is still needed, and if it does,
3185 					where is it in the NVM */
3186 
3187 	return 0;
3188 }
3189 
3190 static int
3191 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3192 {
3193 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3194 	const uint16_t *regulatory = NULL;
3195 
3196 	/* Checking for required sections */
3197 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3198 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3199 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3200 			return ENOENT;
3201 		}
3202 
3203 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3204 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3205 		/* SW and REGULATORY sections are mandatory */
3206 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3207 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3208 			return ENOENT;
3209 		}
3210 		/* MAC_OVERRIDE or at least HW section must exist */
3211 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3212 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3213 			return ENOENT;
3214 		}
3215 
3216 		/* PHY_SKU section is mandatory in B0 */
3217 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3218 			return ENOENT;
3219 		}
3220 
3221 		regulatory = (const uint16_t *)
3222 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3223 		hw = (const uint16_t *)
3224 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3225 		mac_override =
3226 			(const uint16_t *)
3227 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3228 		phy_sku = (const uint16_t *)
3229 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3230 	} else {
3231 		panic("unknown device family %d\n", sc->sc_device_family);
3232 	}
3233 
3234 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3235 	calib = (const uint16_t *)
3236 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3237 
3238 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3239 	    phy_sku, regulatory);
3240 }
3241 
3242 static int
3243 iwm_nvm_init(struct iwm_softc *sc)
3244 {
3245 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3246 	int i, section, err;
3247 	uint16_t len;
3248 	uint8_t *buf;
3249 	const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3250 	    IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3251 
3252 	/* Read From FW NVM */
3253 	DPRINTF(("Read NVM\n"));
3254 
3255 	memset(nvm_sections, 0, sizeof(nvm_sections));
3256 
3257 	buf = kmem_alloc(bufsz, KM_SLEEP);
3258 
3259 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3260 		section = iwm_nvm_to_read[i];
3261 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3262 
3263 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3264 		if (err) {
3265 			err = 0;
3266 			continue;
3267 		}
3268 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3269 		memcpy(nvm_sections[section].data, buf, len);
3270 		nvm_sections[section].length = len;
3271 	}
3272 	kmem_free(buf, bufsz);
3273 	if (err == 0)
3274 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3275 
3276 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3277 		if (nvm_sections[i].data != NULL)
3278 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3279 	}
3280 
3281 	return err;
3282 }
3283 
3284 static int
3285 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3286     const uint8_t *section, uint32_t byte_cnt)
3287 {
3288 	int err = EINVAL;
3289 	uint32_t chunk_sz, offset;
3290 
3291 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3292 
3293 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3294 		uint32_t addr, len;
3295 		const uint8_t *data;
3296 		bool is_extended = false;
3297 
3298 		addr = dst_addr + offset;
3299 		len = MIN(chunk_sz, byte_cnt - offset);
3300 		data = section + offset;
3301 
3302 		if (addr >= IWM_FW_MEM_EXTENDED_START &&
3303 		    addr <= IWM_FW_MEM_EXTENDED_END)
3304 			is_extended = true;
3305 
3306 		if (is_extended)
3307 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3308 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3309 
3310 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3311 
3312 		if (is_extended)
3313 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3314 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3315 
3316 		if (err)
3317 			break;
3318 	}
3319 
3320 	return err;
3321 }
3322 
3323 static int
3324 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3325     const uint8_t *section, uint32_t byte_cnt)
3326 {
3327 	struct iwm_dma_info *dma = &sc->fw_dma;
3328 	int err;
3329 
3330 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3331 	memcpy(dma->vaddr, section, byte_cnt);
3332 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3333 	    BUS_DMASYNC_PREWRITE);
3334 
3335 	sc->sc_fw_chunk_done = 0;
3336 
3337 	if (!iwm_nic_lock(sc))
3338 		return EBUSY;
3339 
3340 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3341 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3342 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3343 	    dst_addr);
3344 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3345 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3346 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3347 	    (iwm_get_dma_hi_addr(dma->paddr)
3348 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3349 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3350 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3351 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3352 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3353 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3354 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3355 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3356 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3357 
3358 	iwm_nic_unlock(sc);
3359 
3360 	/* Wait for this segment to load. */
3361 	err = 0;
3362 	while (!sc->sc_fw_chunk_done) {
3363 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3364 		if (err)
3365 			break;
3366 	}
3367 	if (!sc->sc_fw_chunk_done) {
3368 		DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3369 		    DEVNAME(sc), dst_addr, byte_cnt));
3370 	}
3371 
3372 	return err;
3373 }
3374 
3375 static int
3376 iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3377     int cpu, int *first_ucode_section)
3378 {
3379 	int i, err = 0;
3380 	uint32_t last_read_idx = 0;
3381 	void *data;
3382 	uint32_t dlen;
3383 	uint32_t offset;
3384 
3385 	if (cpu == 1) {
3386 		*first_ucode_section = 0;
3387 	} else {
3388 		(*first_ucode_section)++;
3389 	}
3390 
3391 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3392 		last_read_idx = i;
3393 		data = fws->fw_sect[i].fws_data;
3394 		dlen = fws->fw_sect[i].fws_len;
3395 		offset = fws->fw_sect[i].fws_devoff;
3396 
3397 		/*
3398 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3399 		 * CPU1 to CPU2.
3400 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3401 		 * CPU2 non paged to CPU2 paging sec.
3402 		 */
3403 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3404 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3405 			break;
3406 
3407 		if (dlen > sc->sc_fwdmasegsz) {
3408 			err = EFBIG;
3409 		} else
3410 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3411 		if (err) {
3412 			DPRINTF(("%s: could not load firmware chunk %d "
3413 			    "(error %d)\n", DEVNAME(sc), i, err));
3414 			return err;
3415 		}
3416 	}
3417 
3418 	*first_ucode_section = last_read_idx;
3419 
3420 	return 0;
3421 }
3422 
3423 static int
3424 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3425 {
3426 	struct iwm_fw_sects *fws;
3427 	int err = 0;
3428 	int first_ucode_section;
3429 
3430 	fws = &sc->sc_fw.fw_sects[ucode_type];
3431 
3432 	DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3433 	    fws->is_dual_cpus ? "dual" : "single"));
3434 
3435 	/* load to FW the binary Secured sections of CPU1 */
3436 	err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3437 	if (err)
3438 		return err;
3439 
3440 	if (fws->is_dual_cpus) {
3441 		/* set CPU2 header address */
3442 		if (iwm_nic_lock(sc)) {
3443 			iwm_write_prph(sc,
3444 			    IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3445 			    IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3446 			iwm_nic_unlock(sc);
3447 		}
3448 
3449 		/* load to FW the binary sections of CPU2 */
3450 		err = iwm_load_cpu_sections_7000(sc, fws, 2,
3451 		    &first_ucode_section);
3452 		if (err)
3453 			return err;
3454 	}
3455 
3456 	/* release CPU reset */
3457 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3458 
3459 	return 0;
3460 }
3461 
3462 static int
3463 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3464     int cpu, int *first_ucode_section)
3465 {
3466 	int shift_param;
3467 	int i, err = 0, sec_num = 0x1;
3468 	uint32_t val, last_read_idx = 0;
3469 	void *data;
3470 	uint32_t dlen;
3471 	uint32_t offset;
3472 
3473 	if (cpu == 1) {
3474 		shift_param = 0;
3475 		*first_ucode_section = 0;
3476 	} else {
3477 		shift_param = 16;
3478 		(*first_ucode_section)++;
3479 	}
3480 
3481 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3482 		last_read_idx = i;
3483 		data = fws->fw_sect[i].fws_data;
3484 		dlen = fws->fw_sect[i].fws_len;
3485 		offset = fws->fw_sect[i].fws_devoff;
3486 
3487 		/*
3488 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3489 		 * CPU1 to CPU2.
3490 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3491 		 * CPU2 non paged to CPU2 paging sec.
3492 		 */
3493 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3494 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3495 			break;
3496 
3497 		if (dlen > sc->sc_fwdmasegsz) {
3498 			err = EFBIG;
3499 		} else
3500 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3501 		if (err) {
3502 			DPRINTF(("%s: could not load firmware chunk %d "
3503 			    "(error %d)\n", DEVNAME(sc), i, err));
3504 			return err;
3505 		}
3506 
3507 		/* Notify the ucode of the loaded section number and status */
3508 		if (iwm_nic_lock(sc)) {
3509 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3510 			val = val | (sec_num << shift_param);
3511 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3512 			sec_num = (sec_num << 1) | 0x1;
3513 			iwm_nic_unlock(sc);
3514 
3515 			/*
3516 			 * The firmware won't load correctly without this delay.
3517 			 */
3518 			DELAY(8000);
3519 		}
3520 	}
3521 
3522 	*first_ucode_section = last_read_idx;
3523 
3524 	if (iwm_nic_lock(sc)) {
3525 		if (cpu == 1)
3526 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3527 		else
3528 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3529 		iwm_nic_unlock(sc);
3530 	}
3531 
3532 	return 0;
3533 }
3534 
3535 static int
3536 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3537 {
3538 	struct iwm_fw_sects *fws;
3539 	int err = 0;
3540 	int first_ucode_section;
3541 
3542 	fws = &sc->sc_fw.fw_sects[ucode_type];
3543 
3544 	/* configure the ucode to be ready to get the secured image */
3545 	/* release CPU reset */
3546 	if (iwm_nic_lock(sc)) {
3547 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3548 		    IWM_RELEASE_CPU_RESET_BIT);
3549 		iwm_nic_unlock(sc);
3550 	}
3551 
3552 	/* load to FW the binary Secured sections of CPU1 */
3553 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3554 	if (err)
3555 		return err;
3556 
3557 	/* load to FW the binary sections of CPU2 */
3558 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3559 }
3560 
3561 static int
3562 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3563 {
3564 	int err, w;
3565 
3566 	sc->sc_uc.uc_intr = 0;
3567 
3568 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3569 		err = iwm_load_firmware_8000(sc, ucode_type);
3570 	else
3571 		err = iwm_load_firmware_7000(sc, ucode_type);
3572 	if (err)
3573 		return err;
3574 
3575 	/* wait for the firmware to load */
3576 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3577 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3578 	if (err || !sc->sc_uc.uc_ok) {
3579 		aprint_error_dev(sc->sc_dev,
3580 		    "could not load firmware (error %d, ok %d)\n",
3581 		    err, sc->sc_uc.uc_ok);
3582 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3583 			aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3584 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3585 			aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3586 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3587 		}
3588 	}
3589 
3590 	return err;
3591 }
3592 
3593 static int
3594 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3595 {
3596 	int err;
3597 
3598 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3599 
3600 	err = iwm_nic_init(sc);
3601 	if (err) {
3602 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3603 		return err;
3604 	}
3605 
3606 	/* make sure rfkill handshake bits are cleared */
3607 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3608 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3609 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3610 
3611 	/* clear (again), then enable host interrupts */
3612 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3613 	iwm_enable_interrupts(sc);
3614 
3615 	/* really make sure rfkill handshake bits are cleared */
3616 	/* maybe we should write a few times more?  just to make sure */
3617 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3618 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3619 
3620 	return iwm_load_firmware(sc, ucode_type);
3621 }
3622 
3623 static int
3624 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3625 {
3626 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3627 		.valid = htole32(valid_tx_ant),
3628 	};
3629 
3630 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3631 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
3632 }
3633 
3634 static int
3635 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3636 {
3637 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3638 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3639 
3640 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3641 	phy_cfg_cmd.calib_control.event_trigger =
3642 	    sc->sc_default_calib[ucode_type].event_trigger;
3643 	phy_cfg_cmd.calib_control.flow_trigger =
3644 	    sc->sc_default_calib[ucode_type].flow_trigger;
3645 
3646 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3647 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3648 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3649 }
3650 
3651 static int
3652 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3653 {
3654 	struct iwm_fw_sects *fws;
3655 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3656 	int err;
3657 
3658 	err = iwm_read_firmware(sc, ucode_type);
3659 	if (err)
3660 		return err;
3661 
3662 	sc->sc_uc_current = ucode_type;
3663 	err = iwm_start_fw(sc, ucode_type);
3664 	if (err) {
3665 		sc->sc_uc_current = old_type;
3666 		return err;
3667 	}
3668 
3669 	err = iwm_post_alive(sc);
3670 	if (err)
3671 		return err;
3672 
3673 	fws = &sc->sc_fw.fw_sects[ucode_type];
3674 	if (fws->paging_mem_size) {
3675 		err = iwm_save_fw_paging(sc, fws);
3676 		if (err)
3677 			return err;
3678 
3679 		err = iwm_send_paging_cmd(sc, fws);
3680 		if (err) {
3681 			iwm_free_fw_paging(sc);
3682 			return err;
3683 		}
3684 	}
3685 
3686 	return 0;
3687 }
3688 
3689 static int
3690 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3691 {
3692 	int err;
3693 
3694 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3695 		aprint_error_dev(sc->sc_dev,
3696 		    "radio is disabled by hardware switch\n");
3697 		return EPERM;
3698 	}
3699 
3700 	sc->sc_init_complete = 0;
3701 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3702 	if (err) {
3703 		DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3704 		return err;
3705 	}
3706 
3707 	if (justnvm) {
3708 		err = iwm_nvm_init(sc);
3709 		if (err) {
3710 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3711 			return err;
3712 		}
3713 
3714 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3715 		    ETHER_ADDR_LEN);
3716 		return 0;
3717 	}
3718 
3719 	err = iwm_send_bt_init_conf(sc);
3720 	if (err)
3721 		return err;
3722 
3723 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3724 	if (err)
3725 		return err;
3726 
3727 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3728 	if (err)
3729 		return err;
3730 
3731 	/*
3732 	 * Send phy configurations command to init uCode
3733 	 * to start the 16.0 uCode init image internal calibrations.
3734 	 */
3735 	err = iwm_send_phy_cfg_cmd(sc);
3736 	if (err)
3737 		return err;
3738 
3739 	/*
3740 	 * Nothing to do but wait for the init complete notification
3741 	 * from the firmware
3742 	 */
3743 	while (!sc->sc_init_complete) {
3744 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3745 		if (err)
3746 			break;
3747 	}
3748 
3749 	return err;
3750 }
3751 
3752 static int
3753 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3754 {
3755 	struct iwm_rx_ring *ring = &sc->rxq;
3756 	struct iwm_rx_data *data = &ring->data[idx];
3757 	struct mbuf *m;
3758 	int err;
3759 	int fatal = 0;
3760 
3761 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3762 	if (m == NULL)
3763 		return ENOBUFS;
3764 
3765 	if (size <= MCLBYTES) {
3766 		MCLGET(m, M_DONTWAIT);
3767 	} else {
3768 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3769 	}
3770 	if ((m->m_flags & M_EXT) == 0) {
3771 		m_freem(m);
3772 		return ENOBUFS;
3773 	}
3774 
3775 	if (data->m != NULL) {
3776 		bus_dmamap_unload(sc->sc_dmat, data->map);
3777 		fatal = 1;
3778 	}
3779 
3780 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3781 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3782 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3783 	if (err) {
3784 		/* XXX */
3785 		if (fatal)
3786 			panic("iwm: could not load RX mbuf");
3787 		m_freem(m);
3788 		return err;
3789 	}
3790 	data->m = m;
3791 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3792 
3793 	/* Update RX descriptor. */
3794 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3795 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3796 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3797 
3798 	return 0;
3799 }
3800 
3801 #define IWM_RSSI_OFFSET 50
3802 static int
3803 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3804 {
3805 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3806 	uint32_t agc_a, agc_b;
3807 	uint32_t val;
3808 
3809 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3810 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3811 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3812 
3813 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3814 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3815 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3816 
3817 	/*
3818 	 * dBm = rssi dB - agc dB - constant.
3819 	 * Higher AGC (higher radio gain) means lower signal.
3820 	 */
3821 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3822 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3823 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3824 
3825 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3826 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3827 
3828 	return max_rssi_dbm;
3829 }
3830 
3831 /*
3832  * RSSI values are reported by the FW as positive values - need to negate
3833  * to obtain their dBM.  Account for missing antennas by replacing 0
3834  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3835  */
3836 static int
3837 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3838 {
3839 	int energy_a, energy_b, energy_c, max_energy;
3840 	uint32_t val;
3841 
3842 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3843 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3844 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3845 	energy_a = energy_a ? -energy_a : -256;
3846 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3847 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3848 	energy_b = energy_b ? -energy_b : -256;
3849 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3850 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3851 	energy_c = energy_c ? -energy_c : -256;
3852 	max_energy = MAX(energy_a, energy_b);
3853 	max_energy = MAX(max_energy, energy_c);
3854 
3855 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3856 	    energy_a, energy_b, energy_c, max_energy));
3857 
3858 	return max_energy;
3859 }
3860 
3861 static void
3862 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3863     struct iwm_rx_data *data)
3864 {
3865 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3866 
3867 	DPRINTFN(20, ("received PHY stats\n"));
3868 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3869 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3870 
3871 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3872 }
3873 
3874 /*
3875  * Retrieve the average noise (in dBm) among receivers.
3876  */
3877 static int
3878 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3879 {
3880 	int i, total, nbant, noise;
3881 
3882 	total = nbant = noise = 0;
3883 	for (i = 0; i < 3; i++) {
3884 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3885 		if (noise) {
3886 			total += noise;
3887 			nbant++;
3888 		}
3889 	}
3890 
3891 	/* There should be at least one antenna but check anyway. */
3892 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3893 }
3894 
3895 static void
3896 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3897     struct iwm_rx_data *data)
3898 {
3899 	struct ieee80211com *ic = &sc->sc_ic;
3900 	struct ieee80211_frame *wh;
3901 	struct ieee80211_node *ni;
3902 	struct ieee80211_channel *c = NULL;
3903 	struct mbuf *m;
3904 	struct iwm_rx_phy_info *phy_info;
3905 	struct iwm_rx_mpdu_res_start *rx_res;
3906 	int device_timestamp;
3907 	uint32_t len;
3908 	uint32_t rx_pkt_status;
3909 	int rssi;
3910 	int s;
3911 
3912 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3913 	    BUS_DMASYNC_POSTREAD);
3914 
3915 	phy_info = &sc->sc_last_phy_info;
3916 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3917 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3918 	len = le16toh(rx_res->byte_count);
3919 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3920 	    sizeof(*rx_res) + len));
3921 
3922 	m = data->m;
3923 	m->m_data = pkt->data + sizeof(*rx_res);
3924 	m->m_pkthdr.len = m->m_len = len;
3925 
3926 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3927 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3928 		    phy_info->cfg_phy_cnt));
3929 		return;
3930 	}
3931 
3932 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3933 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3934 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3935 		return; /* drop */
3936 	}
3937 
3938 	device_timestamp = le32toh(phy_info->system_timestamp);
3939 
3940 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3941 		rssi = iwm_get_signal_strength(sc, phy_info);
3942 	} else {
3943 		rssi = iwm_calc_rssi(sc, phy_info);
3944 	}
3945 	rssi = -rssi;
3946 
3947 	if (ic->ic_state == IEEE80211_S_SCAN)
3948 		iwm_fix_channel(sc, m);
3949 
3950 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3951 		return;
3952 
3953 	m_set_rcvif(m, IC2IFP(ic));
3954 
3955 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3956 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3957 
3958 	s = splnet();
3959 
3960 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3961 	if (c)
3962 		ni->ni_chan = c;
3963 
3964 	if (__predict_false(sc->sc_drvbpf != NULL)) {
3965 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3966 
3967 		tap->wr_flags = 0;
3968 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3969 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3970 		tap->wr_chan_freq =
3971 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3972 		tap->wr_chan_flags =
3973 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3974 		tap->wr_dbm_antsignal = (int8_t)rssi;
3975 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3976 		tap->wr_tsft = phy_info->system_timestamp;
3977 		if (phy_info->phy_flags &
3978 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3979 			uint8_t mcs = (phy_info->rate_n_flags &
3980 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3981 			      IWM_RATE_HT_MCS_NSS_MSK));
3982 			tap->wr_rate = (0x80 | mcs);
3983 		} else {
3984 			uint8_t rate = (phy_info->rate_n_flags &
3985 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3986 			switch (rate) {
3987 			/* CCK rates. */
3988 			case  10: tap->wr_rate =   2; break;
3989 			case  20: tap->wr_rate =   4; break;
3990 			case  55: tap->wr_rate =  11; break;
3991 			case 110: tap->wr_rate =  22; break;
3992 			/* OFDM rates. */
3993 			case 0xd: tap->wr_rate =  12; break;
3994 			case 0xf: tap->wr_rate =  18; break;
3995 			case 0x5: tap->wr_rate =  24; break;
3996 			case 0x7: tap->wr_rate =  36; break;
3997 			case 0x9: tap->wr_rate =  48; break;
3998 			case 0xb: tap->wr_rate =  72; break;
3999 			case 0x1: tap->wr_rate =  96; break;
4000 			case 0x3: tap->wr_rate = 108; break;
4001 			/* Unknown rate: should not happen. */
4002 			default:  tap->wr_rate =   0;
4003 			}
4004 		}
4005 
4006 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
4007 	}
4008 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
4009 	ieee80211_free_node(ni);
4010 
4011 	splx(s);
4012 }
4013 
4014 static void
4015 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4016     struct iwm_node *in)
4017 {
4018 	struct ieee80211com *ic = &sc->sc_ic;
4019 	struct ifnet *ifp = IC2IFP(ic);
4020 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4021 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4022 	int failack = tx_resp->failure_frame;
4023 
4024 	KASSERT(tx_resp->frame_count == 1);
4025 
4026 	/* Update rate control statistics. */
4027 	in->in_amn.amn_txcnt++;
4028 	if (failack > 0) {
4029 		in->in_amn.amn_retrycnt++;
4030 	}
4031 
4032 	if (status != IWM_TX_STATUS_SUCCESS &&
4033 	    status != IWM_TX_STATUS_DIRECT_DONE)
4034 		ifp->if_oerrors++;
4035 	else
4036 		ifp->if_opackets++;
4037 }
4038 
4039 static void
4040 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4041     struct iwm_rx_data *data)
4042 {
4043 	struct ieee80211com *ic = &sc->sc_ic;
4044 	struct ifnet *ifp = IC2IFP(ic);
4045 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4046 	int idx = cmd_hdr->idx;
4047 	int qid = cmd_hdr->qid;
4048 	struct iwm_tx_ring *ring = &sc->txq[qid];
4049 	struct iwm_tx_data *txd = &ring->data[idx];
4050 	struct iwm_node *in = txd->in;
4051 	int s;
4052 
4053 	s = splnet();
4054 
4055 	if (txd->done) {
4056 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4057 		    DEVNAME(sc)));
4058 		splx(s);
4059 		return;
4060 	}
4061 
4062 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4063 	    BUS_DMASYNC_POSTREAD);
4064 
4065 	sc->sc_tx_timer = 0;
4066 
4067 	iwm_rx_tx_cmd_single(sc, pkt, in);
4068 
4069 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4070 	    BUS_DMASYNC_POSTWRITE);
4071 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4072 	m_freem(txd->m);
4073 
4074 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4075 	KASSERT(txd->done == 0);
4076 	txd->done = 1;
4077 	KASSERT(txd->in);
4078 
4079 	txd->m = NULL;
4080 	txd->in = NULL;
4081 	ieee80211_free_node(&in->in_ni);
4082 
4083 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4084 		sc->qfullmsk &= ~(1 << qid);
4085 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4086 			ifp->if_flags &= ~IFF_OACTIVE;
4087 			KASSERT(KERNEL_LOCKED_P());
4088 			iwm_start(ifp);
4089 		}
4090 	}
4091 
4092 	splx(s);
4093 }
4094 
4095 static int
4096 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4097 {
4098 	struct iwm_binding_cmd cmd;
4099 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4100 	int i, err;
4101 	uint32_t status;
4102 
4103 	memset(&cmd, 0, sizeof(cmd));
4104 
4105 	cmd.id_and_color
4106 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4107 	cmd.action = htole32(action);
4108 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4109 
4110 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4111 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4112 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4113 
4114 	status = 0;
4115 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4116 	    sizeof(cmd), &cmd, &status);
4117 	if (err == 0 && status != 0)
4118 		err = EIO;
4119 
4120 	return err;
4121 }
4122 
4123 static void
4124 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4125     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4126 {
4127 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4128 
4129 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4130 	    ctxt->color));
4131 	cmd->action = htole32(action);
4132 	cmd->apply_time = htole32(apply_time);
4133 }
4134 
4135 static void
4136 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4137     struct ieee80211_channel *chan, uint8_t chains_static,
4138     uint8_t chains_dynamic)
4139 {
4140 	struct ieee80211com *ic = &sc->sc_ic;
4141 	uint8_t active_cnt, idle_cnt;
4142 
4143 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4144 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4145 
4146 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4147 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4148 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4149 
4150 	/* Set rx the chains */
4151 	idle_cnt = chains_static;
4152 	active_cnt = chains_dynamic;
4153 
4154 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4155 	    IWM_PHY_RX_CHAIN_VALID_POS);
4156 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4157 	cmd->rxchain_info |= htole32(active_cnt <<
4158 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4159 
4160 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4161 }
4162 
4163 static int
4164 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4165     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4166     uint32_t apply_time)
4167 {
4168 	struct iwm_phy_context_cmd cmd;
4169 
4170 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4171 
4172 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4173 	    chains_static, chains_dynamic);
4174 
4175 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4176 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4177 }
4178 
4179 static int
4180 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4181 {
4182 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4183 	struct iwm_tfd *desc;
4184 	struct iwm_tx_data *txdata;
4185 	struct iwm_device_cmd *cmd;
4186 	struct mbuf *m;
4187 	bus_addr_t paddr;
4188 	uint32_t addr_lo;
4189 	int err = 0, i, paylen, off, s;
4190 	int code;
4191 	int async, wantresp;
4192 	int group_id;
4193 	size_t hdrlen, datasz;
4194 	uint8_t *data;
4195 
4196 	code = hcmd->id;
4197 	async = hcmd->flags & IWM_CMD_ASYNC;
4198 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4199 
4200 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4201 		paylen += hcmd->len[i];
4202 	}
4203 
4204 	/* if the command wants an answer, busy sc_cmd_resp */
4205 	if (wantresp) {
4206 		KASSERT(!async);
4207 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4208 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4209 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
4210 	}
4211 
4212 	/*
4213 	 * Is the hardware still available?  (after e.g. above wait).
4214 	 */
4215 	s = splnet();
4216 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
4217 		err = ENXIO;
4218 		goto out;
4219 	}
4220 
4221 	desc = &ring->desc[ring->cur];
4222 	txdata = &ring->data[ring->cur];
4223 
4224 	group_id = iwm_cmd_groupid(code);
4225 	if (group_id != 0) {
4226 		hdrlen = sizeof(cmd->hdr_wide);
4227 		datasz = sizeof(cmd->data_wide);
4228 	} else {
4229 		hdrlen = sizeof(cmd->hdr);
4230 		datasz = sizeof(cmd->data);
4231 	}
4232 
4233 	if (paylen > datasz) {
4234 		/* Command is too large to fit in pre-allocated space. */
4235 		size_t totlen = hdrlen + paylen;
4236 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4237 			aprint_error_dev(sc->sc_dev,
4238 			    "firmware command too long (%zd bytes)\n", totlen);
4239 			err = EINVAL;
4240 			goto out;
4241 		}
4242 		m = m_gethdr(M_DONTWAIT, MT_DATA);
4243 		if (m == NULL) {
4244 			err = ENOMEM;
4245 			goto out;
4246 		}
4247 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4248 		if (!(m->m_flags & M_EXT)) {
4249 			aprint_error_dev(sc->sc_dev,
4250 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4251 			m_freem(m);
4252 			err = ENOMEM;
4253 			goto out;
4254 		}
4255 		cmd = mtod(m, struct iwm_device_cmd *);
4256 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4257 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4258 		if (err) {
4259 			aprint_error_dev(sc->sc_dev,
4260 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4261 			m_freem(m);
4262 			goto out;
4263 		}
4264 		txdata->m = m;
4265 		paddr = txdata->map->dm_segs[0].ds_addr;
4266 	} else {
4267 		cmd = &ring->cmd[ring->cur];
4268 		paddr = txdata->cmd_paddr;
4269 	}
4270 
4271 	if (group_id != 0) {
4272 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4273 		cmd->hdr_wide.group_id = group_id;
4274 		cmd->hdr_wide.qid = ring->qid;
4275 		cmd->hdr_wide.idx = ring->cur;
4276 		cmd->hdr_wide.length = htole16(paylen);
4277 		cmd->hdr_wide.version = iwm_cmd_version(code);
4278 		data = cmd->data_wide;
4279 	} else {
4280 		cmd->hdr.code = code;
4281 		cmd->hdr.flags = 0;
4282 		cmd->hdr.qid = ring->qid;
4283 		cmd->hdr.idx = ring->cur;
4284 		data = cmd->data;
4285 	}
4286 
4287 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4288 		if (hcmd->len[i] == 0)
4289 			continue;
4290 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4291 		off += hcmd->len[i];
4292 	}
4293 	KASSERT(off == paylen);
4294 
4295 	/* lo field is not aligned */
4296 	addr_lo = htole32((uint32_t)paddr);
4297 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4298 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4299 	    | ((hdrlen + paylen) << 4));
4300 	desc->num_tbs = 1;
4301 
4302 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4303 	    code, hdrlen + paylen, async ? " (async)" : ""));
4304 
4305 	if (paylen > datasz) {
4306 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4307 		    BUS_DMASYNC_PREWRITE);
4308 	} else {
4309 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4310 		    (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4311 		    BUS_DMASYNC_PREWRITE);
4312 	}
4313 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4314 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4315 	    BUS_DMASYNC_PREWRITE);
4316 
4317 	err = iwm_set_cmd_in_flight(sc);
4318 	if (err)
4319 		goto out;
4320 	ring->queued++;
4321 
4322 #if 0
4323 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4324 #endif
4325 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4326 	    code, ring->qid, ring->cur));
4327 
4328 	/* Kick command ring. */
4329 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4330 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4331 
4332 	if (!async) {
4333 		int generation = sc->sc_generation;
4334 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4335 		if (err == 0) {
4336 			/* if hardware is no longer up, return error */
4337 			if (generation != sc->sc_generation) {
4338 				err = ENXIO;
4339 			} else {
4340 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4341 			}
4342 		}
4343 	}
4344  out:
4345 	if (wantresp && err) {
4346 		iwm_free_resp(sc, hcmd);
4347 	}
4348 	splx(s);
4349 
4350 	return err;
4351 }
4352 
4353 static int
4354 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4355     uint16_t len, const void *data)
4356 {
4357 	struct iwm_host_cmd cmd = {
4358 		.id = id,
4359 		.len = { len, },
4360 		.data = { data, },
4361 		.flags = flags,
4362 	};
4363 
4364 	return iwm_send_cmd(sc, &cmd);
4365 }
4366 
4367 static int
4368 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4369     uint32_t *status)
4370 {
4371 	struct iwm_rx_packet *pkt;
4372 	struct iwm_cmd_response *resp;
4373 	int err, resp_len;
4374 
4375 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4376 	cmd->flags |= IWM_CMD_WANT_SKB;
4377 
4378 	err = iwm_send_cmd(sc, cmd);
4379 	if (err)
4380 		return err;
4381 	pkt = cmd->resp_pkt;
4382 
4383 	/* Can happen if RFKILL is asserted */
4384 	if (!pkt) {
4385 		err = 0;
4386 		goto out_free_resp;
4387 	}
4388 
4389 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4390 		err = EIO;
4391 		goto out_free_resp;
4392 	}
4393 
4394 	resp_len = iwm_rx_packet_payload_len(pkt);
4395 	if (resp_len != sizeof(*resp)) {
4396 		err = EIO;
4397 		goto out_free_resp;
4398 	}
4399 
4400 	resp = (void *)pkt->data;
4401 	*status = le32toh(resp->status);
4402  out_free_resp:
4403 	iwm_free_resp(sc, cmd);
4404 	return err;
4405 }
4406 
4407 static int
4408 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4409     const void *data, uint32_t *status)
4410 {
4411 	struct iwm_host_cmd cmd = {
4412 		.id = id,
4413 		.len = { len, },
4414 		.data = { data, },
4415 	};
4416 
4417 	return iwm_send_cmd_status(sc, &cmd, status);
4418 }
4419 
4420 static void
4421 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4422 {
4423 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4424 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4425 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4426 	wakeup(&sc->sc_wantresp);
4427 }
4428 
4429 static void
4430 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4431 {
4432 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4433 	struct iwm_tx_data *data;
4434 	int s;
4435 
4436 	if (qid != IWM_CMD_QUEUE) {
4437 		return;	/* Not a command ack. */
4438 	}
4439 
4440 	s = splnet();
4441 
4442 	data = &ring->data[idx];
4443 
4444 	if (data->m != NULL) {
4445 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4446 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4447 		bus_dmamap_unload(sc->sc_dmat, data->map);
4448 		m_freem(data->m);
4449 		data->m = NULL;
4450 	}
4451 	wakeup(&ring->desc[idx]);
4452 
4453 	if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4454 		aprint_error_dev(sc->sc_dev,
4455 		    "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4456 		    idx, ring->queued, ring->cur);
4457 	}
4458 
4459 	KASSERT(ring->queued > 0);
4460 	if (--ring->queued == 0)
4461 		iwm_clear_cmd_in_flight(sc);
4462 
4463 	splx(s);
4464 }
4465 
4466 #if 0
4467 /*
4468  * necessary only for block ack mode
4469  */
4470 void
4471 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4472     uint16_t len)
4473 {
4474 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4475 	uint16_t w_val;
4476 
4477 	scd_bc_tbl = sc->sched_dma.vaddr;
4478 
4479 	len += 8; /* magic numbers came naturally from paris */
4480 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4481 		len = roundup(len, 4) / 4;
4482 
4483 	w_val = htole16(sta_id << 12 | len);
4484 
4485 	/* Update TX scheduler. */
4486 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4487 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4488 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4489 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4490 
4491 	/* I really wonder what this is ?!? */
4492 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4493 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4494 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4495 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4496 		    (char *)(void *)sc->sched_dma.vaddr,
4497 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4498 	}
4499 }
4500 #endif
4501 
4502 /*
4503  * Fill in various bit for management frames, and leave them
4504  * unfilled for data frames (firmware takes care of that).
4505  * Return the selected TX rate.
4506  */
4507 static const struct iwm_rate *
4508 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4509     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4510 {
4511 	struct ieee80211com *ic = &sc->sc_ic;
4512 	struct ieee80211_node *ni = &in->in_ni;
4513 	const struct iwm_rate *rinfo;
4514 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4515 	int ridx, rate_flags, i, ind;
4516 	int nrates = ni->ni_rates.rs_nrates;
4517 
4518 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4519 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4520 
4521 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4522 	    type != IEEE80211_FC0_TYPE_DATA) {
4523 		/* for non-data, use the lowest supported rate */
4524 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4525 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4526 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4527 #ifndef IEEE80211_NO_HT
4528 	} else if (ic->ic_fixed_mcs != -1) {
4529 		ridx = sc->sc_fixed_ridx;
4530 #endif
4531 	} else if (ic->ic_fixed_rate != -1) {
4532 		ridx = sc->sc_fixed_ridx;
4533 	} else {
4534 		/* for data frames, use RS table */
4535 		tx->initial_rate_index = 0;
4536 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4537 		DPRINTFN(12, ("start with txrate %d\n",
4538 		    tx->initial_rate_index));
4539 #ifndef IEEE80211_NO_HT
4540 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4541 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4542 			return &iwm_rates[ridx];
4543 		}
4544 #endif
4545 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4546 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4547 		for (i = 0; i < nrates; i++) {
4548 			if (iwm_rates[i].rate == (ni->ni_txrate &
4549 			    IEEE80211_RATE_VAL)) {
4550 				ridx = i;
4551 				break;
4552 			}
4553 		}
4554 		return &iwm_rates[ridx];
4555 	}
4556 
4557 	rinfo = &iwm_rates[ridx];
4558 	for (i = 0, ind = sc->sc_mgmt_last_antenna;
4559 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4560 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4561 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4562 			sc->sc_mgmt_last_antenna = ind;
4563 			break;
4564 		}
4565 	}
4566 	rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4567 	if (IWM_RIDX_IS_CCK(ridx))
4568 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4569 #ifndef IEEE80211_NO_HT
4570 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4571 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4572 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4573 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4574 	} else
4575 #endif
4576 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4577 
4578 	return rinfo;
4579 }
4580 
4581 #define TB0_SIZE 16
4582 static int
4583 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4584 {
4585 	struct ieee80211com *ic = &sc->sc_ic;
4586 	struct iwm_node *in = (struct iwm_node *)ni;
4587 	struct iwm_tx_ring *ring;
4588 	struct iwm_tx_data *data;
4589 	struct iwm_tfd *desc;
4590 	struct iwm_device_cmd *cmd;
4591 	struct iwm_tx_cmd *tx;
4592 	struct ieee80211_frame *wh;
4593 	struct ieee80211_key *k = NULL;
4594 	struct mbuf *m1;
4595 	const struct iwm_rate *rinfo;
4596 	uint32_t flags;
4597 	u_int hdrlen;
4598 	bus_dma_segment_t *seg;
4599 	uint8_t tid, type;
4600 	int i, totlen, err, pad;
4601 
4602 	wh = mtod(m, struct ieee80211_frame *);
4603 	hdrlen = ieee80211_anyhdrsize(wh);
4604 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4605 
4606 	tid = 0;
4607 
4608 	ring = &sc->txq[ac];
4609 	desc = &ring->desc[ring->cur];
4610 	memset(desc, 0, sizeof(*desc));
4611 	data = &ring->data[ring->cur];
4612 
4613 	cmd = &ring->cmd[ring->cur];
4614 	cmd->hdr.code = IWM_TX_CMD;
4615 	cmd->hdr.flags = 0;
4616 	cmd->hdr.qid = ring->qid;
4617 	cmd->hdr.idx = ring->cur;
4618 
4619 	tx = (void *)cmd->data;
4620 	memset(tx, 0, sizeof(*tx));
4621 
4622 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4623 
4624 	if (__predict_false(sc->sc_drvbpf != NULL)) {
4625 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4626 
4627 		tap->wt_flags = 0;
4628 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4629 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4630 #ifndef IEEE80211_NO_HT
4631 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4632 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4633 		    type == IEEE80211_FC0_TYPE_DATA &&
4634 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
4635 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4636 		} else
4637 #endif
4638 			tap->wt_rate = rinfo->rate;
4639 		tap->wt_hwqueue = ac;
4640 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4641 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4642 
4643 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4644 	}
4645 
4646 	/* Encrypt the frame if need be. */
4647 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4648 		k = ieee80211_crypto_encap(ic, ni, m);
4649 		if (k == NULL) {
4650 			m_freem(m);
4651 			return ENOBUFS;
4652 		}
4653 		/* Packet header may have moved, reset our local pointer. */
4654 		wh = mtod(m, struct ieee80211_frame *);
4655 	}
4656 	totlen = m->m_pkthdr.len;
4657 
4658 	flags = 0;
4659 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4660 		flags |= IWM_TX_CMD_FLG_ACK;
4661 	}
4662 
4663 	if (type == IEEE80211_FC0_TYPE_DATA &&
4664 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4665 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4666 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
4667 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4668 
4669 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4670 	    type != IEEE80211_FC0_TYPE_DATA)
4671 		tx->sta_id = IWM_AUX_STA_ID;
4672 	else
4673 		tx->sta_id = IWM_STATION_ID;
4674 
4675 	if (type == IEEE80211_FC0_TYPE_MGT) {
4676 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4677 
4678 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4679 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4680 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4681 		else
4682 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4683 	} else {
4684 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4685 	}
4686 
4687 	if (hdrlen & 3) {
4688 		/* First segment length must be a multiple of 4. */
4689 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4690 		pad = 4 - (hdrlen & 3);
4691 	} else
4692 		pad = 0;
4693 
4694 	tx->driver_txop = 0;
4695 	tx->next_frame_len = 0;
4696 
4697 	tx->len = htole16(totlen);
4698 	tx->tid_tspec = tid;
4699 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4700 
4701 	/* Set physical address of "scratch area". */
4702 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4703 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4704 
4705 	/* Copy 802.11 header in TX command. */
4706 	memcpy(tx + 1, wh, hdrlen);
4707 
4708 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4709 
4710 	tx->sec_ctl = 0;
4711 	tx->tx_flags |= htole32(flags);
4712 
4713 	/* Trim 802.11 header. */
4714 	m_adj(m, hdrlen);
4715 
4716 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4717 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4718 	if (err) {
4719 		if (err != EFBIG) {
4720 			aprint_error_dev(sc->sc_dev,
4721 			    "can't map mbuf (error %d)\n", err);
4722 			m_freem(m);
4723 			return err;
4724 		}
4725 		/* Too many DMA segments, linearize mbuf. */
4726 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
4727 		if (m1 == NULL) {
4728 			m_freem(m);
4729 			return ENOBUFS;
4730 		}
4731 		if (m->m_pkthdr.len > MHLEN) {
4732 			MCLGET(m1, M_DONTWAIT);
4733 			if (!(m1->m_flags & M_EXT)) {
4734 				m_freem(m);
4735 				m_freem(m1);
4736 				return ENOBUFS;
4737 			}
4738 		}
4739 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4740 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4741 		m_freem(m);
4742 		m = m1;
4743 
4744 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4745 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4746 		if (err) {
4747 			aprint_error_dev(sc->sc_dev,
4748 			    "can't map mbuf (error %d)\n", err);
4749 			m_freem(m);
4750 			return err;
4751 		}
4752 	}
4753 	data->m = m;
4754 	data->in = in;
4755 	data->done = 0;
4756 
4757 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4758 	KASSERT(data->in != NULL);
4759 
4760 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4761 	    "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4762 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4763 	    (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4764 	    le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4765 	    le32toh(tx->rate_n_flags)));
4766 
4767 	/* Fill TX descriptor. */
4768 	desc->num_tbs = 2 + data->map->dm_nsegs;
4769 
4770 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4771 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4772 	    (TB0_SIZE << 4);
4773 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4774 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4775 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4776 	      + hdrlen + pad - TB0_SIZE) << 4);
4777 
4778 	/* Other DMA segments are for data payload. */
4779 	seg = data->map->dm_segs;
4780 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4781 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4782 		desc->tbs[i+2].hi_n_len =
4783 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4784 		    | ((seg->ds_len) << 4);
4785 	}
4786 
4787 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4788 	    BUS_DMASYNC_PREWRITE);
4789 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4790 	    (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4791 	    BUS_DMASYNC_PREWRITE);
4792 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4793 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4794 	    BUS_DMASYNC_PREWRITE);
4795 
4796 #if 0
4797 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4798 	    le16toh(tx->len));
4799 #endif
4800 
4801 	/* Kick TX ring. */
4802 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4803 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4804 
4805 	/* Mark TX ring as full if we reach a certain threshold. */
4806 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4807 		sc->qfullmsk |= 1 << ring->qid;
4808 	}
4809 
4810 	return 0;
4811 }
4812 
4813 #if 0
4814 /* not necessary? */
4815 static int
4816 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4817 {
4818 	struct iwm_tx_path_flush_cmd flush_cmd = {
4819 		.queues_ctl = htole32(tfd_msk),
4820 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4821 	};
4822 	int err;
4823 
4824 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4825 	    sizeof(flush_cmd), &flush_cmd);
4826 	if (err)
4827 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4828 		    err);
4829 	return err;
4830 }
4831 #endif
4832 
4833 static void
4834 iwm_led_enable(struct iwm_softc *sc)
4835 {
4836 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4837 }
4838 
4839 static void
4840 iwm_led_disable(struct iwm_softc *sc)
4841 {
4842 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4843 }
4844 
4845 static int
4846 iwm_led_is_enabled(struct iwm_softc *sc)
4847 {
4848 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4849 }
4850 
4851 static void
4852 iwm_led_blink_timeout(void *arg)
4853 {
4854 	struct iwm_softc *sc = arg;
4855 
4856 	if (iwm_led_is_enabled(sc))
4857 		iwm_led_disable(sc);
4858 	else
4859 		iwm_led_enable(sc);
4860 
4861 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4862 }
4863 
4864 static void
4865 iwm_led_blink_start(struct iwm_softc *sc)
4866 {
4867 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4868 }
4869 
4870 static void
4871 iwm_led_blink_stop(struct iwm_softc *sc)
4872 {
4873 	callout_stop(&sc->sc_led_blink_to);
4874 	iwm_led_disable(sc);
4875 }
4876 
4877 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4878 
4879 static int
4880 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4881     struct iwm_beacon_filter_cmd *cmd)
4882 {
4883 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4884 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4885 }
4886 
4887 static void
4888 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4889     struct iwm_beacon_filter_cmd *cmd)
4890 {
4891 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4892 }
4893 
4894 static int
4895 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4896 {
4897 	struct iwm_beacon_filter_cmd cmd = {
4898 		IWM_BF_CMD_CONFIG_DEFAULTS,
4899 		.bf_enable_beacon_filter = htole32(1),
4900 		.ba_enable_beacon_abort = htole32(enable),
4901 	};
4902 
4903 	if (!sc->sc_bf.bf_enabled)
4904 		return 0;
4905 
4906 	sc->sc_bf.ba_enabled = enable;
4907 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4908 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4909 }
4910 
4911 static void
4912 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4913     struct iwm_mac_power_cmd *cmd)
4914 {
4915 	struct ieee80211_node *ni = &in->in_ni;
4916 	int dtim_period, dtim_msec, keep_alive;
4917 
4918 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4919 	    in->in_color));
4920 	if (ni->ni_dtim_period)
4921 		dtim_period = ni->ni_dtim_period;
4922 	else
4923 		dtim_period = 1;
4924 
4925 	/*
4926 	 * Regardless of power management state the driver must set
4927 	 * keep alive period. FW will use it for sending keep alive NDPs
4928 	 * immediately after association. Check that keep alive period
4929 	 * is at least 3 * DTIM.
4930 	 */
4931 	dtim_msec = dtim_period * ni->ni_intval;
4932 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4933 	keep_alive = roundup(keep_alive, 1000) / 1000;
4934 	cmd->keep_alive_seconds = htole16(keep_alive);
4935 
4936 #ifdef notyet
4937 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4938 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4939 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4940 #endif
4941 }
4942 
4943 static int
4944 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4945 {
4946 	int err;
4947 	int ba_enable;
4948 	struct iwm_mac_power_cmd cmd;
4949 
4950 	memset(&cmd, 0, sizeof(cmd));
4951 
4952 	iwm_power_build_cmd(sc, in, &cmd);
4953 
4954 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4955 	    sizeof(cmd), &cmd);
4956 	if (err)
4957 		return err;
4958 
4959 	ba_enable = !!(cmd.flags &
4960 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4961 	return iwm_update_beacon_abort(sc, in, ba_enable);
4962 }
4963 
4964 static int
4965 iwm_power_update_device(struct iwm_softc *sc)
4966 {
4967 	struct iwm_device_power_cmd cmd = {
4968 #ifdef notyet
4969 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4970 #else
4971 		.flags = 0,
4972 #endif
4973 	};
4974 
4975 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4976 		return 0;
4977 
4978 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4979 	DPRINTF(("Sending device power command with flags = 0x%X\n",
4980 	    cmd.flags));
4981 
4982 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4983 }
4984 
4985 #ifdef notyet
4986 static int
4987 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4988 {
4989 	struct iwm_beacon_filter_cmd cmd = {
4990 		IWM_BF_CMD_CONFIG_DEFAULTS,
4991 		.bf_enable_beacon_filter = htole32(1),
4992 	};
4993 	int err;
4994 
4995 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4996 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4997 
4998 	if (err == 0)
4999 		sc->sc_bf.bf_enabled = 1;
5000 
5001 	return err;
5002 }
5003 #endif
5004 
5005 static int
5006 iwm_disable_beacon_filter(struct iwm_softc *sc)
5007 {
5008 	struct iwm_beacon_filter_cmd cmd;
5009 	int err;
5010 
5011 	memset(&cmd, 0, sizeof(cmd));
5012 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5013 		return 0;
5014 
5015 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5016 	if (err == 0)
5017 		sc->sc_bf.bf_enabled = 0;
5018 
5019 	return err;
5020 }
5021 
5022 static int
5023 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5024 {
5025 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
5026 	int err;
5027 	uint32_t status;
5028 
5029 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5030 
5031 	add_sta_cmd.sta_id = IWM_STATION_ID;
5032 	add_sta_cmd.mac_id_n_color
5033 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5034 	if (!update) {
5035 		int ac;
5036 		for (ac = 0; ac < WME_NUM_AC; ac++) {
5037 			add_sta_cmd.tfd_queue_msk |=
5038 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5039 		}
5040 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5041 	}
5042 	add_sta_cmd.add_modify = update ? 1 : 0;
5043 	add_sta_cmd.station_flags_msk
5044 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5045 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5046 	if (update)
5047 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5048 
5049 #ifndef IEEE80211_NO_HT
5050 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5051 		add_sta_cmd.station_flags_msk
5052 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5053 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5054 
5055 		add_sta_cmd.station_flags
5056 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5057 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5058 		case IEEE80211_AMPDU_PARAM_SS_2:
5059 			add_sta_cmd.station_flags
5060 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5061 			break;
5062 		case IEEE80211_AMPDU_PARAM_SS_4:
5063 			add_sta_cmd.station_flags
5064 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5065 			break;
5066 		case IEEE80211_AMPDU_PARAM_SS_8:
5067 			add_sta_cmd.station_flags
5068 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5069 			break;
5070 		case IEEE80211_AMPDU_PARAM_SS_16:
5071 			add_sta_cmd.station_flags
5072 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5073 			break;
5074 		default:
5075 			break;
5076 		}
5077 	}
5078 #endif
5079 
5080 	status = IWM_ADD_STA_SUCCESS;
5081 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5082 	    &add_sta_cmd, &status);
5083 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5084 		err = EIO;
5085 
5086 	return err;
5087 }
5088 
5089 static int
5090 iwm_add_aux_sta(struct iwm_softc *sc)
5091 {
5092 	struct iwm_add_sta_cmd_v7 cmd;
5093 	int err;
5094 	uint32_t status;
5095 
5096 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5097 	if (err)
5098 		return err;
5099 
5100 	memset(&cmd, 0, sizeof(cmd));
5101 	cmd.sta_id = IWM_AUX_STA_ID;
5102 	cmd.mac_id_n_color =
5103 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5104 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5105 	cmd.tid_disable_tx = htole16(0xffff);
5106 
5107 	status = IWM_ADD_STA_SUCCESS;
5108 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5109 	    &status);
5110 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5111 		err = EIO;
5112 
5113 	return err;
5114 }
5115 
5116 #define IWM_PLCP_QUIET_THRESH 1
5117 #define IWM_ACTIVE_QUIET_TIME 10
5118 #define LONG_OUT_TIME_PERIOD 600
5119 #define SHORT_OUT_TIME_PERIOD 200
5120 #define SUSPEND_TIME_PERIOD 100
5121 
5122 static uint16_t
5123 iwm_scan_rx_chain(struct iwm_softc *sc)
5124 {
5125 	uint16_t rx_chain;
5126 	uint8_t rx_ant;
5127 
5128 	rx_ant = iwm_fw_valid_rx_ant(sc);
5129 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5130 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5131 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5132 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5133 	return htole16(rx_chain);
5134 }
5135 
5136 static uint32_t
5137 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5138 {
5139 	uint32_t tx_ant;
5140 	int i, ind;
5141 
5142 	for (i = 0, ind = sc->sc_scan_last_antenna;
5143 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5144 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5145 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5146 			sc->sc_scan_last_antenna = ind;
5147 			break;
5148 		}
5149 	}
5150 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5151 
5152 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5153 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5154 				   tx_ant);
5155 	else
5156 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5157 }
5158 
5159 #ifdef notyet
5160 /*
5161  * If req->n_ssids > 0, it means we should do an active scan.
5162  * In case of active scan w/o directed scan, we receive a zero-length SSID
5163  * just to notify that this scan is active and not passive.
5164  * In order to notify the FW of the number of SSIDs we wish to scan (including
5165  * the zero-length one), we need to set the corresponding bits in chan->type,
5166  * one for each SSID, and set the active bit (first). If the first SSID is
5167  * already included in the probe template, so we need to set only
5168  * req->n_ssids - 1 bits in addition to the first bit.
5169  */
5170 static uint16_t
5171 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5172 {
5173 	if (flags & IEEE80211_CHAN_2GHZ)
5174 		return 30  + 3 * (n_ssids + 1);
5175 	return 20  + 2 * (n_ssids + 1);
5176 }
5177 
5178 static uint16_t
5179 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5180 {
5181 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5182 }
5183 #endif
5184 
5185 static uint8_t
5186 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5187     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5188 {
5189 	struct ieee80211com *ic = &sc->sc_ic;
5190 	struct ieee80211_channel *c;
5191 	uint8_t nchan;
5192 
5193 	for (nchan = 0, c = &ic->ic_channels[1];
5194 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5195 	    nchan < sc->sc_capa_n_scan_channels;
5196 	    c++) {
5197 		if (c->ic_flags == 0)
5198 			continue;
5199 
5200 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5201 		chan->iter_count = htole16(1);
5202 		chan->iter_interval = htole32(0);
5203 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5204 		chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5205 		if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5206 			chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5207 		chan++;
5208 		nchan++;
5209 	}
5210 
5211 	return nchan;
5212 }
5213 
5214 static uint8_t
5215 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5216     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5217 {
5218 	struct ieee80211com *ic = &sc->sc_ic;
5219 	struct ieee80211_channel *c;
5220 	uint8_t nchan;
5221 
5222 	for (nchan = 0, c = &ic->ic_channels[1];
5223 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5224 	    nchan < sc->sc_capa_n_scan_channels;
5225 	    c++) {
5226 		if (c->ic_flags == 0)
5227 			continue;
5228 
5229 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5230 		chan->iter_count = 1;
5231 		chan->iter_interval = htole16(0);
5232 		chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5233 		chan++;
5234 		nchan++;
5235 	}
5236 
5237 	return nchan;
5238 }
5239 
5240 static int
5241 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5242 {
5243 	struct ieee80211com *ic = &sc->sc_ic;
5244 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5245 	struct ieee80211_rateset *rs;
5246 	size_t remain = sizeof(preq->buf);
5247 	uint8_t *frm, *pos;
5248 
5249 	memset(preq, 0, sizeof(*preq));
5250 
5251 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5252 		return ENOBUFS;
5253 
5254 	/*
5255 	 * Build a probe request frame.  Most of the following code is a
5256 	 * copy & paste of what is done in net80211.
5257 	 */
5258 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5259 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5260 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5261 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5262 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5263 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5264 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5265 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5266 
5267 	frm = (uint8_t *)(wh + 1);
5268 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5269 
5270 	/* Tell the firmware where the MAC header is. */
5271 	preq->mac_header.offset = 0;
5272 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5273 	remain -= frm - (uint8_t *)wh;
5274 
5275 	/* Fill in 2GHz IEs and tell firmware where they are. */
5276 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5277 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5278 		if (remain < 4 + rs->rs_nrates)
5279 			return ENOBUFS;
5280 	} else if (remain < 2 + rs->rs_nrates)
5281 		return ENOBUFS;
5282 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5283 	pos = frm;
5284 	frm = ieee80211_add_rates(frm, rs);
5285 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5286 		frm = ieee80211_add_xrates(frm, rs);
5287 	preq->band_data[0].len = htole16(frm - pos);
5288 	remain -= frm - pos;
5289 
5290 	if (isset(sc->sc_enabled_capa,
5291 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5292 		if (remain < 3)
5293 			return ENOBUFS;
5294 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5295 		*frm++ = 1;
5296 		*frm++ = 0;
5297 		remain -= 3;
5298 	}
5299 
5300 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5301 		/* Fill in 5GHz IEs. */
5302 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5303 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5304 			if (remain < 4 + rs->rs_nrates)
5305 				return ENOBUFS;
5306 		} else if (remain < 2 + rs->rs_nrates)
5307 			return ENOBUFS;
5308 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5309 		pos = frm;
5310 		frm = ieee80211_add_rates(frm, rs);
5311 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5312 			frm = ieee80211_add_xrates(frm, rs);
5313 		preq->band_data[1].len = htole16(frm - pos);
5314 		remain -= frm - pos;
5315 	}
5316 
5317 #ifndef IEEE80211_NO_HT
5318 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5319 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5320 	pos = frm;
5321 	if (ic->ic_flags & IEEE80211_F_HTON) {
5322 		if (remain < 28)
5323 			return ENOBUFS;
5324 		frm = ieee80211_add_htcaps(frm, ic);
5325 		/* XXX add WME info? */
5326 	}
5327 #endif
5328 
5329 	preq->common_data.len = htole16(frm - pos);
5330 
5331 	return 0;
5332 }
5333 
5334 static int
5335 iwm_lmac_scan(struct iwm_softc *sc)
5336 {
5337 	struct ieee80211com *ic = &sc->sc_ic;
5338 	struct iwm_host_cmd hcmd = {
5339 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5340 		.len = { 0, },
5341 		.data = { NULL, },
5342 		.flags = 0,
5343 	};
5344 	struct iwm_scan_req_lmac *req;
5345 	size_t req_len;
5346 	int err;
5347 
5348 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5349 
5350 	req_len = sizeof(struct iwm_scan_req_lmac) +
5351 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5352 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5353 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5354 		return ENOMEM;
5355 	req = kmem_zalloc(req_len, KM_SLEEP);
5356 	hcmd.len[0] = (uint16_t)req_len;
5357 	hcmd.data[0] = (void *)req;
5358 
5359 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5360 	req->active_dwell = 10;
5361 	req->passive_dwell = 110;
5362 	req->fragmented_dwell = 44;
5363 	req->extended_dwell = 90;
5364 	req->max_out_time = 0;
5365 	req->suspend_time = 0;
5366 
5367 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5368 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5369 	req->iter_num = htole32(1);
5370 	req->delay = 0;
5371 
5372 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5373 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5374 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5375 	if (ic->ic_des_esslen == 0)
5376 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5377 	else
5378 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5379 	if (isset(sc->sc_enabled_capa,
5380 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5381 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5382 
5383 	req->flags = htole32(IWM_PHY_BAND_24);
5384 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5385 		req->flags |= htole32(IWM_PHY_BAND_5);
5386 	req->filter_flags =
5387 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5388 
5389 	/* Tx flags 2 GHz. */
5390 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5391 	    IWM_TX_CMD_FLG_BT_DIS);
5392 	req->tx_cmd[0].rate_n_flags =
5393 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5394 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5395 
5396 	/* Tx flags 5 GHz. */
5397 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5398 	    IWM_TX_CMD_FLG_BT_DIS);
5399 	req->tx_cmd[1].rate_n_flags =
5400 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5401 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5402 
5403 	/* Check if we're doing an active directed scan. */
5404 	if (ic->ic_des_esslen != 0) {
5405 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5406 		req->direct_scan[0].len = ic->ic_des_esslen;
5407 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5408 		    ic->ic_des_esslen);
5409 	}
5410 
5411 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5412 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5413 	    ic->ic_des_esslen != 0);
5414 
5415 	err = iwm_fill_probe_req(sc,
5416 	    (struct iwm_scan_probe_req *)(req->data +
5417 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5418 	     sc->sc_capa_n_scan_channels)));
5419 	if (err) {
5420 		kmem_free(req, req_len);
5421 		return err;
5422 	}
5423 
5424 	/* Specify the scan plan: We'll do one iteration. */
5425 	req->schedule[0].iterations = 1;
5426 	req->schedule[0].full_scan_mul = 1;
5427 
5428 	/* Disable EBS. */
5429 	req->channel_opt[0].non_ebs_ratio = 1;
5430 	req->channel_opt[1].non_ebs_ratio = 1;
5431 
5432 	err = iwm_send_cmd(sc, &hcmd);
5433 	kmem_free(req, req_len);
5434 	return err;
5435 }
5436 
5437 static int
5438 iwm_config_umac_scan(struct iwm_softc *sc)
5439 {
5440 	struct ieee80211com *ic = &sc->sc_ic;
5441 	struct iwm_scan_config *scan_config;
5442 	int err, nchan;
5443 	size_t cmd_size;
5444 	struct ieee80211_channel *c;
5445 	struct iwm_host_cmd hcmd = {
5446 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5447 		.flags = 0,
5448 	};
5449 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5450 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5451 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5452 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5453 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5454 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5455 	    IWM_SCAN_CONFIG_RATE_54M);
5456 
5457 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5458 
5459 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5460 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5461 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5462 	scan_config->legacy_rates = htole32(rates |
5463 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5464 
5465 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5466 	scan_config->dwell_active = 10;
5467 	scan_config->dwell_passive = 110;
5468 	scan_config->dwell_fragmented = 44;
5469 	scan_config->dwell_extended = 90;
5470 	scan_config->out_of_channel_time = htole32(0);
5471 	scan_config->suspend_time = htole32(0);
5472 
5473 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5474 
5475 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5476 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5477 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5478 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5479 
5480 	for (c = &ic->ic_channels[1], nchan = 0;
5481 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5482 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5483 		if (c->ic_flags == 0)
5484 			continue;
5485 		scan_config->channel_array[nchan++] =
5486 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5487 	}
5488 
5489 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5490 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5491 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5492 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5493 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5494 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5495 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5496 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5497 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5498 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5499 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5500 
5501 	hcmd.data[0] = scan_config;
5502 	hcmd.len[0] = cmd_size;
5503 
5504 	err = iwm_send_cmd(sc, &hcmd);
5505 	kmem_free(scan_config, cmd_size);
5506 	return err;
5507 }
5508 
5509 static int
5510 iwm_umac_scan(struct iwm_softc *sc)
5511 {
5512 	struct ieee80211com *ic = &sc->sc_ic;
5513 	struct iwm_host_cmd hcmd = {
5514 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5515 		.len = { 0, },
5516 		.data = { NULL, },
5517 		.flags = 0,
5518 	};
5519 	struct iwm_scan_req_umac *req;
5520 	struct iwm_scan_req_umac_tail *tail;
5521 	size_t req_len;
5522 	int err;
5523 
5524 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5525 
5526 	req_len = sizeof(struct iwm_scan_req_umac) +
5527 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5528 	    sc->sc_capa_n_scan_channels) +
5529 	    sizeof(struct iwm_scan_req_umac_tail);
5530 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5531 		return ENOMEM;
5532 	req = kmem_zalloc(req_len, KM_SLEEP);
5533 
5534 	hcmd.len[0] = (uint16_t)req_len;
5535 	hcmd.data[0] = (void *)req;
5536 
5537 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5538 	req->active_dwell = 10;
5539 	req->passive_dwell = 110;
5540 	req->fragmented_dwell = 44;
5541 	req->extended_dwell = 90;
5542 	req->max_out_time = 0;
5543 	req->suspend_time = 0;
5544 
5545 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5546 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5547 
5548 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5549 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5550 	    ic->ic_des_esslen != 0);
5551 
5552 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5553 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5554 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5555 
5556 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
5557 		sizeof(struct iwm_scan_channel_cfg_umac) *
5558 			sc->sc_capa_n_scan_channels);
5559 
5560 	/* Check if we're doing an active directed scan. */
5561 	if (ic->ic_des_esslen != 0) {
5562 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5563 		tail->direct_scan[0].len = ic->ic_des_esslen;
5564 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5565 		    ic->ic_des_esslen);
5566 		req->general_flags |=
5567 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5568 	} else
5569 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5570 
5571 	if (isset(sc->sc_enabled_capa,
5572 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5573 		req->general_flags |=
5574 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5575 
5576 	err = iwm_fill_probe_req(sc, &tail->preq);
5577 	if (err) {
5578 		kmem_free(req, req_len);
5579 		return err;
5580 	}
5581 
5582 	/* Specify the scan plan: We'll do one iteration. */
5583 	tail->schedule[0].interval = 0;
5584 	tail->schedule[0].iter_count = 1;
5585 
5586 	err = iwm_send_cmd(sc, &hcmd);
5587 	kmem_free(req, req_len);
5588 	return err;
5589 }
5590 
5591 static uint8_t
5592 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5593 {
5594 	int i;
5595 	uint8_t rval;
5596 
5597 	for (i = 0; i < rs->rs_nrates; i++) {
5598 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5599 		if (rval == iwm_rates[ridx].rate)
5600 			return rs->rs_rates[i];
5601 	}
5602 	return 0;
5603 }
5604 
5605 static void
5606 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5607     int *ofdm_rates)
5608 {
5609 	struct ieee80211_node *ni = &in->in_ni;
5610 	struct ieee80211_rateset *rs = &ni->ni_rates;
5611 	int lowest_present_ofdm = -1;
5612 	int lowest_present_cck = -1;
5613 	uint8_t cck = 0;
5614 	uint8_t ofdm = 0;
5615 	int i;
5616 
5617 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5618 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5619 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5620 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5621 				continue;
5622 			cck |= (1 << i);
5623 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5624 				lowest_present_cck = i;
5625 		}
5626 	}
5627 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5628 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5629 			continue;
5630 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5631 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5632 			lowest_present_ofdm = i;
5633 	}
5634 
5635 	/*
5636 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5637 	 * variables. This isn't sufficient though, as there might not
5638 	 * be all the right rates in the bitmap. E.g. if the only basic
5639 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5640 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5641 	 *
5642 	 *    [...] a STA responding to a received frame shall transmit
5643 	 *    its Control Response frame [...] at the highest rate in the
5644 	 *    BSSBasicRateSet parameter that is less than or equal to the
5645 	 *    rate of the immediately previous frame in the frame exchange
5646 	 *    sequence ([...]) and that is of the same modulation class
5647 	 *    ([...]) as the received frame. If no rate contained in the
5648 	 *    BSSBasicRateSet parameter meets these conditions, then the
5649 	 *    control frame sent in response to a received frame shall be
5650 	 *    transmitted at the highest mandatory rate of the PHY that is
5651 	 *    less than or equal to the rate of the received frame, and
5652 	 *    that is of the same modulation class as the received frame.
5653 	 *
5654 	 * As a consequence, we need to add all mandatory rates that are
5655 	 * lower than all of the basic rates to these bitmaps.
5656 	 */
5657 
5658 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5659 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5660 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5661 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5662 	/* 6M already there or needed so always add */
5663 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5664 
5665 	/*
5666 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5667 	 * Note, however:
5668 	 *  - if no CCK rates are basic, it must be ERP since there must
5669 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5670 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5671 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5672 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5673 	 *  - if 2M is basic, 1M is mandatory
5674 	 *  - if 1M is basic, that's the only valid ACK rate.
5675 	 * As a consequence, it's not as complicated as it sounds, just add
5676 	 * any lower rates to the ACK rate bitmap.
5677 	 */
5678 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5679 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5680 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5681 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5682 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5683 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5684 	/* 1M already there or needed so always add */
5685 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5686 
5687 	*cck_rates = cck;
5688 	*ofdm_rates = ofdm;
5689 }
5690 
5691 static void
5692 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5693     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5694 {
5695 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5696 	struct ieee80211com *ic = &sc->sc_ic;
5697 	struct ieee80211_node *ni = ic->ic_bss;
5698 	int cck_ack_rates, ofdm_ack_rates;
5699 	int i;
5700 
5701 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5702 	    in->in_color));
5703 	cmd->action = htole32(action);
5704 
5705 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5706 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5707 
5708 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5709 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5710 
5711 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5712 	cmd->cck_rates = htole32(cck_ack_rates);
5713 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5714 
5715 	cmd->cck_short_preamble
5716 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5717 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5718 	cmd->short_slot
5719 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5720 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5721 
5722 	for (i = 0; i < WME_NUM_AC; i++) {
5723 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5724 		int txf = iwm_ac_to_tx_fifo[i];
5725 
5726 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5727 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5728 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5729 		cmd->ac[txf].fifos_mask = (1 << txf);
5730 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5731 	}
5732 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5733 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5734 
5735 #ifndef IEEE80211_NO_HT
5736 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5737 		enum ieee80211_htprot htprot =
5738 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5739 		switch (htprot) {
5740 		case IEEE80211_HTPROT_NONE:
5741 			break;
5742 		case IEEE80211_HTPROT_NONMEMBER:
5743 		case IEEE80211_HTPROT_NONHT_MIXED:
5744 			cmd->protection_flags |=
5745 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5746 		case IEEE80211_HTPROT_20MHZ:
5747 			cmd->protection_flags |=
5748 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5749 			    IWM_MAC_PROT_FLG_FAT_PROT);
5750 			break;
5751 		default:
5752 			break;
5753 		}
5754 
5755 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5756 	}
5757 #endif
5758 
5759 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5760 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5761 
5762 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5763 #undef IWM_EXP2
5764 }
5765 
5766 static void
5767 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5768     struct iwm_mac_data_sta *sta, int assoc)
5769 {
5770 	struct ieee80211_node *ni = &in->in_ni;
5771 	uint32_t dtim_off;
5772 	uint64_t tsf;
5773 
5774 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5775 	tsf = le64toh(ni->ni_tstamp.tsf);
5776 
5777 	sta->is_assoc = htole32(assoc);
5778 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5779 	sta->dtim_tsf = htole64(tsf + dtim_off);
5780 	sta->bi = htole32(ni->ni_intval);
5781 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5782 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5783 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5784 	sta->listen_interval = htole32(10);
5785 	sta->assoc_id = htole32(ni->ni_associd);
5786 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5787 }
5788 
5789 static int
5790 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5791     int assoc)
5792 {
5793 	struct ieee80211_node *ni = &in->in_ni;
5794 	struct iwm_mac_ctx_cmd cmd;
5795 
5796 	memset(&cmd, 0, sizeof(cmd));
5797 
5798 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5799 
5800 	/* Allow beacons to pass through as long as we are not associated or we
5801 	 * do not have dtim period information */
5802 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5803 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5804 	else
5805 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5806 
5807 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5808 }
5809 
5810 #define IWM_MISSED_BEACONS_THRESHOLD 8
5811 
5812 static void
5813 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5814 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5815 {
5816 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5817 	int s;
5818 
5819 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5820 	    le32toh(mb->mac_id),
5821 	    le32toh(mb->consec_missed_beacons),
5822 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5823 	    le32toh(mb->num_recvd_beacons),
5824 	    le32toh(mb->num_expected_beacons)));
5825 
5826 	/*
5827 	 * TODO: the threshold should be adjusted based on latency conditions,
5828 	 * and/or in case of a CS flow on one of the other AP vifs.
5829 	 */
5830 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5831 	    IWM_MISSED_BEACONS_THRESHOLD) {
5832 		s = splnet();
5833 		ieee80211_beacon_miss(&sc->sc_ic);
5834 		splx(s);
5835 	}
5836 }
5837 
5838 static int
5839 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5840 {
5841 	struct iwm_time_quota_cmd cmd;
5842 	int i, idx, num_active_macs, quota, quota_rem;
5843 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5844 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5845 	uint16_t id;
5846 
5847 	memset(&cmd, 0, sizeof(cmd));
5848 
5849 	/* currently, PHY ID == binding ID */
5850 	if (in) {
5851 		id = in->in_phyctxt->id;
5852 		KASSERT(id < IWM_MAX_BINDINGS);
5853 		colors[id] = in->in_phyctxt->color;
5854 
5855 		if (1)
5856 			n_ifs[id] = 1;
5857 	}
5858 
5859 	/*
5860 	 * The FW's scheduling session consists of
5861 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5862 	 * equally between all the bindings that require quota
5863 	 */
5864 	num_active_macs = 0;
5865 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5866 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5867 		num_active_macs += n_ifs[i];
5868 	}
5869 
5870 	quota = 0;
5871 	quota_rem = 0;
5872 	if (num_active_macs) {
5873 		quota = IWM_MAX_QUOTA / num_active_macs;
5874 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5875 	}
5876 
5877 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5878 		if (colors[i] < 0)
5879 			continue;
5880 
5881 		cmd.quotas[idx].id_and_color =
5882 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5883 
5884 		if (n_ifs[i] <= 0) {
5885 			cmd.quotas[idx].quota = htole32(0);
5886 			cmd.quotas[idx].max_duration = htole32(0);
5887 		} else {
5888 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5889 			cmd.quotas[idx].max_duration = htole32(0);
5890 		}
5891 		idx++;
5892 	}
5893 
5894 	/* Give the remainder of the session to the first binding */
5895 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5896 
5897 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5898 }
5899 
5900 static int
5901 iwm_auth(struct iwm_softc *sc)
5902 {
5903 	struct ieee80211com *ic = &sc->sc_ic;
5904 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5905 	uint32_t duration;
5906 	int err;
5907 
5908 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5909 	if (err)
5910 		return err;
5911 
5912 	err = iwm_allow_mcast(sc);
5913 	if (err)
5914 		return err;
5915 
5916 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5917 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5918 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5919 	if (err)
5920 		return err;
5921 	in->in_phyctxt = &sc->sc_phyctxt[0];
5922 
5923 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5924 	if (err) {
5925 		aprint_error_dev(sc->sc_dev,
5926 		    "could not add MAC context (error %d)\n", err);
5927 		return err;
5928 	}
5929 
5930 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5931 	if (err)
5932 		return err;
5933 
5934 	err = iwm_add_sta_cmd(sc, in, 0);
5935 	if (err)
5936 		return err;
5937 
5938 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5939 	if (err) {
5940 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5941 		return err;
5942 	}
5943 
5944 	/*
5945 	 * Prevent the FW from wandering off channel during association
5946 	 * by "protecting" the session with a time event.
5947 	 */
5948 	if (in->in_ni.ni_intval)
5949 		duration = in->in_ni.ni_intval * 2;
5950 	else
5951 		duration = IEEE80211_DUR_TU;
5952 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5953 	DELAY(100);
5954 
5955 	return 0;
5956 }
5957 
5958 static int
5959 iwm_assoc(struct iwm_softc *sc)
5960 {
5961 	struct ieee80211com *ic = &sc->sc_ic;
5962 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5963 	int err;
5964 
5965 	err = iwm_add_sta_cmd(sc, in, 1);
5966 	if (err)
5967 		return err;
5968 
5969 	return 0;
5970 }
5971 
5972 static struct ieee80211_node *
5973 iwm_node_alloc(struct ieee80211_node_table *nt)
5974 {
5975 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5976 }
5977 
5978 static void
5979 iwm_calib_timeout(void *arg)
5980 {
5981 	struct iwm_softc *sc = arg;
5982 	struct ieee80211com *ic = &sc->sc_ic;
5983 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5984 #ifndef IEEE80211_NO_HT
5985 	struct ieee80211_node *ni = &in->in_ni;
5986 	int otxrate;
5987 #endif
5988 	int s;
5989 
5990 	s = splnet();
5991 	if ((ic->ic_fixed_rate == -1
5992 #ifndef IEEE80211_NO_HT
5993 	    || ic->ic_fixed_mcs == -1
5994 #endif
5995 	    ) &&
5996 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5997 #ifndef IEEE80211_NO_HT
5998 		if (ni->ni_flags & IEEE80211_NODE_HT)
5999 			otxrate = ni->ni_txmcs;
6000 		else
6001 			otxrate = ni->ni_txrate;
6002 #endif
6003 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6004 
6005 #ifndef IEEE80211_NO_HT
6006 		/*
6007 		 * If AMRR has chosen a new TX rate we must update
6008 		 * the firwmare's LQ rate table from process context.
6009 		 */
6010 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6011 		    otxrate != ni->ni_txmcs)
6012 			softint_schedule(sc->setrates_task);
6013 		else if (otxrate != ni->ni_txrate)
6014 			softint_schedule(sc->setrates_task);
6015 #endif
6016 	}
6017 	splx(s);
6018 
6019 	callout_schedule(&sc->sc_calib_to, mstohz(500));
6020 }
6021 
6022 #ifndef IEEE80211_NO_HT
6023 static void
6024 iwm_setrates_task(void *arg)
6025 {
6026 	struct iwm_softc *sc = arg;
6027 	struct ieee80211com *ic = &sc->sc_ic;
6028 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6029 
6030 	/* Update rates table based on new TX rate determined by AMRR. */
6031 	iwm_setrates(in);
6032 }
6033 
6034 static int
6035 iwm_setrates(struct iwm_node *in)
6036 {
6037 	struct ieee80211_node *ni = &in->in_ni;
6038 	struct ieee80211com *ic = ni->ni_ic;
6039 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6040 	struct iwm_lq_cmd *lq = &in->in_lq;
6041 	struct ieee80211_rateset *rs = &ni->ni_rates;
6042 	int i, j, ridx, ridx_min, tab = 0;
6043 #ifndef IEEE80211_NO_HT
6044 	int sgi_ok;
6045 #endif
6046 	struct iwm_host_cmd cmd = {
6047 		.id = IWM_LQ_CMD,
6048 		.len = { sizeof(in->in_lq), },
6049 	};
6050 
6051 	memset(lq, 0, sizeof(*lq));
6052 	lq->sta_id = IWM_STATION_ID;
6053 
6054 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6055 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6056 
6057 #ifndef IEEE80211_NO_HT
6058 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6059 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6060 #endif
6061 
6062 
6063 	/*
6064 	 * Fill the LQ rate selection table with legacy and/or HT rates
6065 	 * in descending order, i.e. with the node's current TX rate first.
6066 	 * In cases where throughput of an HT rate corresponds to a legacy
6067 	 * rate it makes no sense to add both. We rely on the fact that
6068 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
6069 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6070 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6071 	 */
6072 	j = 0;
6073 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6074 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6075 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6076 		if (j >= __arraycount(lq->rs_table))
6077 			break;
6078 		tab = 0;
6079 #ifndef IEEE80211_NO_HT
6080 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6081 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6082 			for (i = ni->ni_txmcs; i >= 0; i--) {
6083 				if (isclr(ni->ni_rxmcs, i))
6084 					continue;
6085 				if (ridx == iwm_mcs2ridx[i]) {
6086 					tab = iwm_rates[ridx].ht_plcp;
6087 					tab |= IWM_RATE_MCS_HT_MSK;
6088 					if (sgi_ok)
6089 						tab |= IWM_RATE_MCS_SGI_MSK;
6090 					break;
6091 				}
6092 			}
6093 		}
6094 #endif
6095 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6096 			for (i = ni->ni_txrate; i >= 0; i--) {
6097 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6098 				    IEEE80211_RATE_VAL)) {
6099 					tab = iwm_rates[ridx].plcp;
6100 					break;
6101 				}
6102 			}
6103 		}
6104 
6105 		if (tab == 0)
6106 			continue;
6107 
6108 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
6109 		if (IWM_RIDX_IS_CCK(ridx))
6110 			tab |= IWM_RATE_MCS_CCK_MSK;
6111 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
6112 		lq->rs_table[j++] = htole32(tab);
6113 	}
6114 
6115 	/* Fill the rest with the lowest possible rate */
6116 	i = j > 0 ? j - 1 : 0;
6117 	while (j < __arraycount(lq->rs_table))
6118 		lq->rs_table[j++] = lq->rs_table[i];
6119 
6120 	lq->single_stream_ant_msk = IWM_ANT_A;
6121 	lq->dual_stream_ant_msk = IWM_ANT_AB;
6122 
6123 	lq->agg_time_limit = htole16(4000);	/* 4ms */
6124 	lq->agg_disable_start_th = 3;
6125 #ifdef notyet
6126 	lq->agg_frame_cnt_limit = 0x3f;
6127 #else
6128 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6129 #endif
6130 
6131 	cmd.data[0] = &in->in_lq;
6132 	return iwm_send_cmd(sc, &cmd);
6133 }
6134 #endif
6135 
6136 static int
6137 iwm_media_change(struct ifnet *ifp)
6138 {
6139 	struct iwm_softc *sc = ifp->if_softc;
6140 	struct ieee80211com *ic = &sc->sc_ic;
6141 	uint8_t rate, ridx;
6142 	int err;
6143 
6144 	err = ieee80211_media_change(ifp);
6145 	if (err != ENETRESET)
6146 		return err;
6147 
6148 #ifndef IEEE80211_NO_HT
6149 	if (ic->ic_fixed_mcs != -1)
6150 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6151 	else
6152 #endif
6153 	if (ic->ic_fixed_rate != -1) {
6154 		rate = ic->ic_sup_rates[ic->ic_curmode].
6155 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6156 		/* Map 802.11 rate to HW rate index. */
6157 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6158 			if (iwm_rates[ridx].rate == rate)
6159 				break;
6160 		sc->sc_fixed_ridx = ridx;
6161 	}
6162 
6163 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6164 	    (IFF_UP | IFF_RUNNING)) {
6165 		iwm_stop(ifp, 0);
6166 		err = iwm_init(ifp);
6167 	}
6168 	return err;
6169 }
6170 
6171 static int
6172 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6173 {
6174 	struct ifnet *ifp = IC2IFP(ic);
6175 	struct iwm_softc *sc = ifp->if_softc;
6176 	enum ieee80211_state ostate = ic->ic_state;
6177 	struct iwm_node *in;
6178 	int err;
6179 
6180 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6181 	    ieee80211_state_name[nstate]));
6182 
6183 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6184 		iwm_led_blink_stop(sc);
6185 
6186 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
6187 		iwm_disable_beacon_filter(sc);
6188 
6189 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6190 	/* XXX Is there a way to switch states without a full reset? */
6191 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6192 		/*
6193 		 * Upon receiving a deauth frame from AP the net80211 stack
6194 		 * puts the driver into AUTH state. This will fail with this
6195 		 * driver so bring the FSM from RUN to SCAN in this case.
6196 		 */
6197 		if (nstate != IEEE80211_S_INIT) {
6198 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6199 			/* Always pass arg as -1 since we can't Tx right now. */
6200 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6201 			iwm_stop(ifp, 0);
6202 			iwm_init(ifp);
6203 			return 0;
6204 		}
6205 
6206 		iwm_stop_device(sc);
6207 		iwm_init_hw(sc);
6208 	}
6209 
6210 	switch (nstate) {
6211 	case IEEE80211_S_INIT:
6212 		break;
6213 
6214 	case IEEE80211_S_SCAN:
6215 		if (ostate == nstate &&
6216 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6217 			return 0;
6218 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6219 			err = iwm_umac_scan(sc);
6220 		else
6221 			err = iwm_lmac_scan(sc);
6222 		if (err) {
6223 			DPRINTF(("%s: could not initiate scan: %d\n",
6224 			    DEVNAME(sc), err));
6225 			return err;
6226 		}
6227 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
6228 		ic->ic_state = nstate;
6229 		iwm_led_blink_start(sc);
6230 		return 0;
6231 
6232 	case IEEE80211_S_AUTH:
6233 		err = iwm_auth(sc);
6234 		if (err) {
6235 			DPRINTF(("%s: could not move to auth state: %d\n",
6236 			    DEVNAME(sc), err));
6237 			return err;
6238 		}
6239 		break;
6240 
6241 	case IEEE80211_S_ASSOC:
6242 		err = iwm_assoc(sc);
6243 		if (err) {
6244 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6245 			    err));
6246 			return err;
6247 		}
6248 		break;
6249 
6250 	case IEEE80211_S_RUN:
6251 		in = (struct iwm_node *)ic->ic_bss;
6252 
6253 		/* We have now been assigned an associd by the AP. */
6254 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6255 		if (err) {
6256 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6257 			return err;
6258 		}
6259 
6260 		err = iwm_power_update_device(sc);
6261 		if (err) {
6262 			aprint_error_dev(sc->sc_dev,
6263 			    "could send power command (error %d)\n", err);
6264 			return err;
6265 		}
6266 #ifdef notyet
6267 		/*
6268 		 * Disabled for now. Default beacon filter settings
6269 		 * prevent net80211 from getting ERP and HT protection
6270 		 * updates from beacons.
6271 		 */
6272 		err = iwm_enable_beacon_filter(sc, in);
6273 		if (err) {
6274 			aprint_error_dev(sc->sc_dev,
6275 			    "could not enable beacon filter\n");
6276 			return err;
6277 		}
6278 #endif
6279 		err = iwm_power_mac_update_mode(sc, in);
6280 		if (err) {
6281 			aprint_error_dev(sc->sc_dev,
6282 			    "could not update MAC power (error %d)\n", err);
6283 			return err;
6284 		}
6285 
6286 		err = iwm_update_quotas(sc, in);
6287 		if (err) {
6288 			aprint_error_dev(sc->sc_dev,
6289 			    "could not update quotas (error %d)\n", err);
6290 			return err;
6291 		}
6292 
6293 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6294 
6295 		/* Start at lowest available bit-rate, AMRR will raise. */
6296 		in->in_ni.ni_txrate = 0;
6297 #ifndef IEEE80211_NO_HT
6298 		in->in_ni.ni_txmcs = 0;
6299 		iwm_setrates(in);
6300 #endif
6301 
6302 		callout_schedule(&sc->sc_calib_to, mstohz(500));
6303 		iwm_led_enable(sc);
6304 		break;
6305 
6306 	default:
6307 		break;
6308 	}
6309 
6310 	return sc->sc_newstate(ic, nstate, arg);
6311 }
6312 
6313 static void
6314 iwm_newstate_cb(struct work *wk, void *v)
6315 {
6316 	struct iwm_softc *sc = v;
6317 	struct ieee80211com *ic = &sc->sc_ic;
6318 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6319 	enum ieee80211_state nstate = iwmns->ns_nstate;
6320 	int generation = iwmns->ns_generation;
6321 	int arg = iwmns->ns_arg;
6322 	int s;
6323 
6324 	kmem_intr_free(iwmns, sizeof(*iwmns));
6325 
6326 	s = splnet();
6327 
6328 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6329 	if (sc->sc_generation != generation) {
6330 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6331 		if (nstate == IEEE80211_S_INIT) {
6332 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6333 			    "calling sc_newstate()\n"));
6334 			(void) sc->sc_newstate(ic, nstate, arg);
6335 		}
6336 	} else
6337 		(void) iwm_do_newstate(ic, nstate, arg);
6338 
6339 	splx(s);
6340 }
6341 
6342 static int
6343 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6344 {
6345 	struct iwm_newstate_state *iwmns;
6346 	struct ifnet *ifp = IC2IFP(ic);
6347 	struct iwm_softc *sc = ifp->if_softc;
6348 
6349 	callout_stop(&sc->sc_calib_to);
6350 
6351 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6352 	if (!iwmns) {
6353 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6354 		return ENOMEM;
6355 	}
6356 
6357 	iwmns->ns_nstate = nstate;
6358 	iwmns->ns_arg = arg;
6359 	iwmns->ns_generation = sc->sc_generation;
6360 
6361 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6362 
6363 	return 0;
6364 }
6365 
6366 static void
6367 iwm_endscan(struct iwm_softc *sc)
6368 {
6369 	struct ieee80211com *ic = &sc->sc_ic;
6370 	int s;
6371 
6372 	DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6373 
6374 	s = splnet();
6375 	if (ic->ic_state == IEEE80211_S_SCAN)
6376 		ieee80211_end_scan(ic);
6377 	splx(s);
6378 }
6379 
6380 /*
6381  * Aging and idle timeouts for the different possible scenarios
6382  * in default configuration
6383  */
6384 static const uint32_t
6385 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6386 	{
6387 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6388 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6389 	},
6390 	{
6391 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6392 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6393 	},
6394 	{
6395 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6396 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6397 	},
6398 	{
6399 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
6400 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6401 	},
6402 	{
6403 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6404 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6405 	},
6406 };
6407 
6408 /*
6409  * Aging and idle timeouts for the different possible scenarios
6410  * in single BSS MAC configuration.
6411  */
6412 static const uint32_t
6413 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6414 	{
6415 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6416 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6417 	},
6418 	{
6419 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6420 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6421 	},
6422 	{
6423 		htole32(IWM_SF_MCAST_AGING_TIMER),
6424 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6425 	},
6426 	{
6427 		htole32(IWM_SF_BA_AGING_TIMER),
6428 		htole32(IWM_SF_BA_IDLE_TIMER)
6429 	},
6430 	{
6431 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6432 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6433 	},
6434 };
6435 
6436 static void
6437 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6438     struct ieee80211_node *ni)
6439 {
6440 	int i, j, watermark;
6441 
6442 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6443 
6444 	/*
6445 	 * If we are in association flow - check antenna configuration
6446 	 * capabilities of the AP station, and choose the watermark accordingly.
6447 	 */
6448 	if (ni) {
6449 #ifndef IEEE80211_NO_HT
6450 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6451 #ifdef notyet
6452 			if (ni->ni_rxmcs[2] != 0)
6453 				watermark = IWM_SF_W_MARK_MIMO3;
6454 			else if (ni->ni_rxmcs[1] != 0)
6455 				watermark = IWM_SF_W_MARK_MIMO2;
6456 			else
6457 #endif
6458 				watermark = IWM_SF_W_MARK_SISO;
6459 		} else
6460 #endif
6461 			watermark = IWM_SF_W_MARK_LEGACY;
6462 	/* default watermark value for unassociated mode. */
6463 	} else {
6464 		watermark = IWM_SF_W_MARK_MIMO2;
6465 	}
6466 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6467 
6468 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6469 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6470 			sf_cmd->long_delay_timeouts[i][j] =
6471 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6472 		}
6473 	}
6474 
6475 	if (ni) {
6476 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6477 		       sizeof(iwm_sf_full_timeout));
6478 	} else {
6479 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6480 		       sizeof(iwm_sf_full_timeout_def));
6481 	}
6482 }
6483 
6484 static int
6485 iwm_sf_config(struct iwm_softc *sc, int new_state)
6486 {
6487 	struct ieee80211com *ic = &sc->sc_ic;
6488 	struct iwm_sf_cfg_cmd sf_cmd = {
6489 		.state = htole32(IWM_SF_FULL_ON),
6490 	};
6491 
6492 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6493 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6494 
6495 	switch (new_state) {
6496 	case IWM_SF_UNINIT:
6497 	case IWM_SF_INIT_OFF:
6498 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6499 		break;
6500 	case IWM_SF_FULL_ON:
6501 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6502 		break;
6503 	default:
6504 		return EINVAL;
6505 	}
6506 
6507 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6508 	    sizeof(sf_cmd), &sf_cmd);
6509 }
6510 
6511 static int
6512 iwm_send_bt_init_conf(struct iwm_softc *sc)
6513 {
6514 	struct iwm_bt_coex_cmd bt_cmd;
6515 
6516 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6517 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6518 
6519 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6520 }
6521 
6522 static bool
6523 iwm_is_lar_supported(struct iwm_softc *sc)
6524 {
6525 	bool nvm_lar = sc->sc_nvm.lar_enabled;
6526 	bool tlv_lar = isset(sc->sc_enabled_capa,
6527 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6528 
6529 	if (iwm_lar_disable)
6530 		return false;
6531 
6532 	/*
6533 	 * Enable LAR only if it is supported by the FW (TLV) &&
6534 	 * enabled in the NVM
6535 	 */
6536 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6537 		return nvm_lar && tlv_lar;
6538 	else
6539 		return tlv_lar;
6540 }
6541 
6542 static int
6543 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6544 {
6545 	struct iwm_mcc_update_cmd mcc_cmd;
6546 	struct iwm_host_cmd hcmd = {
6547 		.id = IWM_MCC_UPDATE_CMD,
6548 		.flags = IWM_CMD_WANT_SKB,
6549 		.data = { &mcc_cmd },
6550 	};
6551 	int err;
6552 	int resp_v2 = isset(sc->sc_enabled_capa,
6553 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6554 
6555 	if (!iwm_is_lar_supported(sc)) {
6556 		DPRINTF(("%s: no LAR support\n", __func__));
6557 		return 0;
6558 	}
6559 
6560 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6561 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6562 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6563 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6564 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6565 	else
6566 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6567 
6568 	if (resp_v2)
6569 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6570 	else
6571 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6572 
6573 	err = iwm_send_cmd(sc, &hcmd);
6574 	if (err)
6575 		return err;
6576 
6577 	iwm_free_resp(sc, &hcmd);
6578 
6579 	return 0;
6580 }
6581 
6582 static void
6583 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6584 {
6585 	struct iwm_host_cmd cmd = {
6586 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6587 		.len = { sizeof(uint32_t), },
6588 		.data = { &backoff, },
6589 	};
6590 
6591 	iwm_send_cmd(sc, &cmd);
6592 }
6593 
6594 static int
6595 iwm_init_hw(struct iwm_softc *sc)
6596 {
6597 	struct ieee80211com *ic = &sc->sc_ic;
6598 	int err, i, ac;
6599 
6600 	err = iwm_preinit(sc);
6601 	if (err)
6602 		return err;
6603 
6604 	err = iwm_start_hw(sc);
6605 	if (err) {
6606 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6607 		return err;
6608 	}
6609 
6610 	err = iwm_run_init_mvm_ucode(sc, 0);
6611 	if (err)
6612 		return err;
6613 
6614 	/* Should stop and start HW since INIT image just loaded. */
6615 	iwm_stop_device(sc);
6616 	err = iwm_start_hw(sc);
6617 	if (err) {
6618 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6619 		return err;
6620 	}
6621 
6622 	/* Restart, this time with the regular firmware */
6623 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6624 	if (err) {
6625 		aprint_error_dev(sc->sc_dev,
6626 		    "could not load firmware (error %d)\n", err);
6627 		goto err;
6628 	}
6629 
6630 	err = iwm_send_bt_init_conf(sc);
6631 	if (err) {
6632 		aprint_error_dev(sc->sc_dev,
6633 		    "could not init bt coex (error %d)\n", err);
6634 		goto err;
6635 	}
6636 
6637 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6638 	if (err) {
6639 		aprint_error_dev(sc->sc_dev,
6640 		    "could not init tx ant config (error %d)\n", err);
6641 		goto err;
6642 	}
6643 
6644 	/* Send phy db control command and then phy db calibration*/
6645 	err = iwm_send_phy_db_data(sc);
6646 	if (err) {
6647 		aprint_error_dev(sc->sc_dev,
6648 		    "could not init phy db (error %d)\n", err);
6649 		goto err;
6650 	}
6651 
6652 	err = iwm_send_phy_cfg_cmd(sc);
6653 	if (err) {
6654 		aprint_error_dev(sc->sc_dev,
6655 		    "could not send phy config (error %d)\n", err);
6656 		goto err;
6657 	}
6658 
6659 	/* Add auxiliary station for scanning */
6660 	err = iwm_add_aux_sta(sc);
6661 	if (err) {
6662 		aprint_error_dev(sc->sc_dev,
6663 		    "could not add aux station (error %d)\n", err);
6664 		goto err;
6665 	}
6666 
6667 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6668 		/*
6669 		 * The channel used here isn't relevant as it's
6670 		 * going to be overwritten in the other flows.
6671 		 * For now use the first channel we have.
6672 		 */
6673 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6674 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6675 		    IWM_FW_CTXT_ACTION_ADD, 0);
6676 		if (err) {
6677 			aprint_error_dev(sc->sc_dev,
6678 			    "could not add phy context %d (error %d)\n",
6679 			    i, err);
6680 			goto err;
6681 		}
6682 	}
6683 
6684 	/* Initialize tx backoffs to the minimum. */
6685 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6686 		iwm_tt_tx_backoff(sc, 0);
6687 
6688 	err = iwm_power_update_device(sc);
6689 	if (err) {
6690 		aprint_error_dev(sc->sc_dev,
6691 		    "could send power command (error %d)\n", err);
6692 		goto err;
6693 	}
6694 
6695 	err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6696 	if (err) {
6697 		aprint_error_dev(sc->sc_dev,
6698 		    "could not init LAR (error %d)\n", err);
6699 		goto err;
6700 	}
6701 
6702 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6703 		err = iwm_config_umac_scan(sc);
6704 		if (err) {
6705 			aprint_error_dev(sc->sc_dev,
6706 			    "could not configure scan (error %d)\n", err);
6707 			goto err;
6708 		}
6709 	}
6710 
6711 	for (ac = 0; ac < WME_NUM_AC; ac++) {
6712 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6713 		    iwm_ac_to_tx_fifo[ac]);
6714 		if (err) {
6715 			aprint_error_dev(sc->sc_dev,
6716 			    "could not enable Tx queue %d (error %d)\n",
6717 			    i, err);
6718 			goto err;
6719 		}
6720 	}
6721 
6722 	err = iwm_disable_beacon_filter(sc);
6723 	if (err) {
6724 		aprint_error_dev(sc->sc_dev,
6725 		    "could not disable beacon filter (error %d)\n", err);
6726 		goto err;
6727 	}
6728 
6729 	return 0;
6730 
6731  err:
6732 	iwm_stop_device(sc);
6733 	return err;
6734 }
6735 
6736 /* Allow multicast from our BSSID. */
6737 static int
6738 iwm_allow_mcast(struct iwm_softc *sc)
6739 {
6740 	struct ieee80211com *ic = &sc->sc_ic;
6741 	struct ieee80211_node *ni = ic->ic_bss;
6742 	struct iwm_mcast_filter_cmd *cmd;
6743 	size_t size;
6744 	int err;
6745 
6746 	size = roundup(sizeof(*cmd), 4);
6747 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6748 	if (cmd == NULL)
6749 		return ENOMEM;
6750 	cmd->filter_own = 1;
6751 	cmd->port_id = 0;
6752 	cmd->count = 0;
6753 	cmd->pass_all = 1;
6754 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6755 
6756 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6757 	kmem_intr_free(cmd, size);
6758 	return err;
6759 }
6760 
6761 static int
6762 iwm_init(struct ifnet *ifp)
6763 {
6764 	struct iwm_softc *sc = ifp->if_softc;
6765 	int err;
6766 
6767 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6768 		return 0;
6769 
6770 	sc->sc_generation++;
6771 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
6772 
6773 	err = iwm_init_hw(sc);
6774 	if (err) {
6775 		iwm_stop(ifp, 1);
6776 		return err;
6777 	}
6778 
6779 	ifp->if_flags &= ~IFF_OACTIVE;
6780 	ifp->if_flags |= IFF_RUNNING;
6781 
6782 	ieee80211_begin_scan(&sc->sc_ic, 0);
6783 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6784 
6785 	return 0;
6786 }
6787 
6788 static void
6789 iwm_start(struct ifnet *ifp)
6790 {
6791 	struct iwm_softc *sc = ifp->if_softc;
6792 	struct ieee80211com *ic = &sc->sc_ic;
6793 	struct ieee80211_node *ni;
6794 	struct ether_header *eh;
6795 	struct mbuf *m;
6796 	int ac;
6797 
6798 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6799 		return;
6800 
6801 	for (;;) {
6802 		/* why isn't this done per-queue? */
6803 		if (sc->qfullmsk != 0) {
6804 			ifp->if_flags |= IFF_OACTIVE;
6805 			break;
6806 		}
6807 
6808 		/* need to send management frames even if we're not RUNning */
6809 		IF_DEQUEUE(&ic->ic_mgtq, m);
6810 		if (m) {
6811 			ni = M_GETCTX(m, struct ieee80211_node *);
6812 			M_CLEARCTX(m);
6813 			ac = WME_AC_BE;
6814 			goto sendit;
6815 		}
6816 		if (ic->ic_state != IEEE80211_S_RUN) {
6817 			break;
6818 		}
6819 
6820 		IFQ_DEQUEUE(&ifp->if_snd, m);
6821 		if (m == NULL)
6822 			break;
6823 
6824 		if (m->m_len < sizeof (*eh) &&
6825 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
6826 			ifp->if_oerrors++;
6827 			continue;
6828 		}
6829 
6830 		eh = mtod(m, struct ether_header *);
6831 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6832 		if (ni == NULL) {
6833 			m_freem(m);
6834 			ifp->if_oerrors++;
6835 			continue;
6836 		}
6837 
6838 		/* classify mbuf so we can find which tx ring to use */
6839 		if (ieee80211_classify(ic, m, ni) != 0) {
6840 			m_freem(m);
6841 			ieee80211_free_node(ni);
6842 			ifp->if_oerrors++;
6843 			continue;
6844 		}
6845 
6846 		/* No QoS encapsulation for EAPOL frames. */
6847 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6848 		    M_WME_GETAC(m) : WME_AC_BE;
6849 
6850 		bpf_mtap(ifp, m);
6851 
6852 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6853 			ieee80211_free_node(ni);
6854 			ifp->if_oerrors++;
6855 			continue;
6856 		}
6857 
6858  sendit:
6859 		bpf_mtap3(ic->ic_rawbpf, m);
6860 
6861 		if (iwm_tx(sc, m, ni, ac) != 0) {
6862 			ieee80211_free_node(ni);
6863 			ifp->if_oerrors++;
6864 			continue;
6865 		}
6866 
6867 		if (ifp->if_flags & IFF_UP) {
6868 			sc->sc_tx_timer = 15;
6869 			ifp->if_timer = 1;
6870 		}
6871 	}
6872 }
6873 
6874 static void
6875 iwm_stop(struct ifnet *ifp, int disable)
6876 {
6877 	struct iwm_softc *sc = ifp->if_softc;
6878 	struct ieee80211com *ic = &sc->sc_ic;
6879 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6880 
6881 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6882 	sc->sc_flags |= IWM_FLAG_STOPPED;
6883 	sc->sc_generation++;
6884 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6885 
6886 	if (in)
6887 		in->in_phyctxt = NULL;
6888 
6889 	if (ic->ic_state != IEEE80211_S_INIT)
6890 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6891 
6892 	callout_stop(&sc->sc_calib_to);
6893 	iwm_led_blink_stop(sc);
6894 	ifp->if_timer = sc->sc_tx_timer = 0;
6895 	iwm_stop_device(sc);
6896 }
6897 
6898 static void
6899 iwm_watchdog(struct ifnet *ifp)
6900 {
6901 	struct iwm_softc *sc = ifp->if_softc;
6902 
6903 	ifp->if_timer = 0;
6904 	if (sc->sc_tx_timer > 0) {
6905 		if (--sc->sc_tx_timer == 0) {
6906 			aprint_error_dev(sc->sc_dev, "device timeout\n");
6907 #ifdef IWM_DEBUG
6908 			iwm_nic_error(sc);
6909 #endif
6910 			ifp->if_flags &= ~IFF_UP;
6911 			iwm_stop(ifp, 1);
6912 			ifp->if_oerrors++;
6913 			return;
6914 		}
6915 		ifp->if_timer = 1;
6916 	}
6917 
6918 	ieee80211_watchdog(&sc->sc_ic);
6919 }
6920 
6921 static int
6922 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6923 {
6924 	struct iwm_softc *sc = ifp->if_softc;
6925 	struct ieee80211com *ic = &sc->sc_ic;
6926 	const struct sockaddr *sa;
6927 	int s, err = 0;
6928 
6929 	s = splnet();
6930 
6931 	switch (cmd) {
6932 	case SIOCSIFADDR:
6933 		ifp->if_flags |= IFF_UP;
6934 		/* FALLTHROUGH */
6935 	case SIOCSIFFLAGS:
6936 		err = ifioctl_common(ifp, cmd, data);
6937 		if (err)
6938 			break;
6939 		if (ifp->if_flags & IFF_UP) {
6940 			if (!(ifp->if_flags & IFF_RUNNING)) {
6941 				err = iwm_init(ifp);
6942 				if (err)
6943 					ifp->if_flags &= ~IFF_UP;
6944 			}
6945 		} else {
6946 			if (ifp->if_flags & IFF_RUNNING)
6947 				iwm_stop(ifp, 1);
6948 		}
6949 		break;
6950 
6951 	case SIOCADDMULTI:
6952 	case SIOCDELMULTI:
6953 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6954 			err = ENXIO;
6955 			break;
6956 		}
6957 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6958 		err = (cmd == SIOCADDMULTI) ?
6959 		    ether_addmulti(sa, &sc->sc_ec) :
6960 		    ether_delmulti(sa, &sc->sc_ec);
6961 		if (err == ENETRESET)
6962 			err = 0;
6963 		break;
6964 
6965 	default:
6966 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6967 			err = ether_ioctl(ifp, cmd, data);
6968 			break;
6969 		}
6970 		err = ieee80211_ioctl(ic, cmd, data);
6971 		break;
6972 	}
6973 
6974 	if (err == ENETRESET) {
6975 		err = 0;
6976 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6977 		    (IFF_UP | IFF_RUNNING)) {
6978 			iwm_stop(ifp, 0);
6979 			err = iwm_init(ifp);
6980 		}
6981 	}
6982 
6983 	splx(s);
6984 	return err;
6985 }
6986 
6987 /*
6988  * Note: This structure is read from the device with IO accesses,
6989  * and the reading already does the endian conversion. As it is
6990  * read with uint32_t-sized accesses, any members with a different size
6991  * need to be ordered correctly though!
6992  */
6993 struct iwm_error_event_table {
6994 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6995 	uint32_t error_id;		/* type of error */
6996 	uint32_t trm_hw_status0;	/* TRM HW status */
6997 	uint32_t trm_hw_status1;	/* TRM HW status */
6998 	uint32_t blink2;		/* branch link */
6999 	uint32_t ilink1;		/* interrupt link */
7000 	uint32_t ilink2;		/* interrupt link */
7001 	uint32_t data1;		/* error-specific data */
7002 	uint32_t data2;		/* error-specific data */
7003 	uint32_t data3;		/* error-specific data */
7004 	uint32_t bcon_time;		/* beacon timer */
7005 	uint32_t tsf_low;		/* network timestamp function timer */
7006 	uint32_t tsf_hi;		/* network timestamp function timer */
7007 	uint32_t gp1;		/* GP1 timer register */
7008 	uint32_t gp2;		/* GP2 timer register */
7009 	uint32_t fw_rev_type;	/* firmware revision type */
7010 	uint32_t major;		/* uCode version major */
7011 	uint32_t minor;		/* uCode version minor */
7012 	uint32_t hw_ver;		/* HW Silicon version */
7013 	uint32_t brd_ver;		/* HW board version */
7014 	uint32_t log_pc;		/* log program counter */
7015 	uint32_t frame_ptr;		/* frame pointer */
7016 	uint32_t stack_ptr;		/* stack pointer */
7017 	uint32_t hcmd;		/* last host command header */
7018 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
7019 				 * rxtx_flag */
7020 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
7021 				 * host_flag */
7022 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
7023 				 * enc_flag */
7024 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
7025 				 * time_flag */
7026 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
7027 				 * wico interrupt */
7028 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
7029 	uint32_t wait_event;		/* wait event() caller address */
7030 	uint32_t l2p_control;	/* L2pControlField */
7031 	uint32_t l2p_duration;	/* L2pDurationField */
7032 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
7033 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
7034 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
7035 				 * (LMPM_PMG_SEL) */
7036 	uint32_t u_timestamp;	/* indicate when the date and time of the
7037 				 * compilation */
7038 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
7039 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7040 
7041 /*
7042  * UMAC error struct - relevant starting from family 8000 chip.
7043  * Note: This structure is read from the device with IO accesses,
7044  * and the reading already does the endian conversion. As it is
7045  * read with u32-sized accesses, any members with a different size
7046  * need to be ordered correctly though!
7047  */
7048 struct iwm_umac_error_event_table {
7049 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
7050 	uint32_t error_id;	/* type of error */
7051 	uint32_t blink1;	/* branch link */
7052 	uint32_t blink2;	/* branch link */
7053 	uint32_t ilink1;	/* interrupt link */
7054 	uint32_t ilink2;	/* interrupt link */
7055 	uint32_t data1;		/* error-specific data */
7056 	uint32_t data2;		/* error-specific data */
7057 	uint32_t data3;		/* error-specific data */
7058 	uint32_t umac_major;
7059 	uint32_t umac_minor;
7060 	uint32_t frame_pointer;	/* core register 27 */
7061 	uint32_t stack_pointer;	/* core register 28 */
7062 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
7063 	uint32_t nic_isr_pref;	/* ISR status register */
7064 } __packed;
7065 
7066 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
7067 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
7068 
7069 #ifdef IWM_DEBUG
7070 static const struct {
7071 	const char *name;
7072 	uint8_t num;
7073 } advanced_lookup[] = {
7074 	{ "NMI_INTERRUPT_WDG", 0x34 },
7075 	{ "SYSASSERT", 0x35 },
7076 	{ "UCODE_VERSION_MISMATCH", 0x37 },
7077 	{ "BAD_COMMAND", 0x38 },
7078 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7079 	{ "FATAL_ERROR", 0x3D },
7080 	{ "NMI_TRM_HW_ERR", 0x46 },
7081 	{ "NMI_INTERRUPT_TRM", 0x4C },
7082 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7083 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7084 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7085 	{ "NMI_INTERRUPT_HOST", 0x66 },
7086 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
7087 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7088 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7089 	{ "ADVANCED_SYSASSERT", 0 },
7090 };
7091 
7092 static const char *
7093 iwm_desc_lookup(uint32_t num)
7094 {
7095 	int i;
7096 
7097 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7098 		if (advanced_lookup[i].num == num)
7099 			return advanced_lookup[i].name;
7100 
7101 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7102 	return advanced_lookup[i].name;
7103 }
7104 
7105 /*
7106  * Support for dumping the error log seemed like a good idea ...
7107  * but it's mostly hex junk and the only sensible thing is the
7108  * hw/ucode revision (which we know anyway).  Since it's here,
7109  * I'll just leave it in, just in case e.g. the Intel guys want to
7110  * help us decipher some "ADVANCED_SYSASSERT" later.
7111  */
7112 static void
7113 iwm_nic_error(struct iwm_softc *sc)
7114 {
7115 	struct iwm_error_event_table t;
7116 	uint32_t base;
7117 
7118 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7119 	base = sc->sc_uc.uc_error_event_table;
7120 	if (base < 0x800000) {
7121 		aprint_error_dev(sc->sc_dev,
7122 		    "Invalid error log pointer 0x%08x\n", base);
7123 		return;
7124 	}
7125 
7126 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7127 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7128 		return;
7129 	}
7130 
7131 	if (!t.valid) {
7132 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7133 		return;
7134 	}
7135 
7136 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7137 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7138 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7139 		    sc->sc_flags, t.valid);
7140 	}
7141 
7142 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7143 	    iwm_desc_lookup(t.error_id));
7144 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7145 	    t.trm_hw_status0);
7146 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7147 	    t.trm_hw_status1);
7148 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7149 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7150 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7151 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7152 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7153 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7154 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7155 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7156 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7157 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7158 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7159 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7160 	    t.fw_rev_type);
7161 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7162 	    t.major);
7163 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7164 	    t.minor);
7165 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7166 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7167 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7168 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7169 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7170 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7171 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7172 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7173 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7174 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7175 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7176 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7177 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7178 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7179 	    t.l2p_addr_match);
7180 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7181 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7182 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7183 
7184 	if (sc->sc_uc.uc_umac_error_event_table)
7185 		iwm_nic_umac_error(sc);
7186 }
7187 
7188 static void
7189 iwm_nic_umac_error(struct iwm_softc *sc)
7190 {
7191 	struct iwm_umac_error_event_table t;
7192 	uint32_t base;
7193 
7194 	base = sc->sc_uc.uc_umac_error_event_table;
7195 
7196 	if (base < 0x800000) {
7197 		aprint_error_dev(sc->sc_dev,
7198 		    "Invalid error log pointer 0x%08x\n", base);
7199 		return;
7200 	}
7201 
7202 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7203 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7204 		return;
7205 	}
7206 
7207 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7208 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7209 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7210 		    sc->sc_flags, t.valid);
7211 	}
7212 
7213 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7214 		iwm_desc_lookup(t.error_id));
7215 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7216 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7217 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7218 	    t.ilink1);
7219 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7220 	    t.ilink2);
7221 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7222 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7223 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7224 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7225 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7226 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7227 	    t.frame_pointer);
7228 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7229 	    t.stack_pointer);
7230 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7231 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7232 	    t.nic_isr_pref);
7233 }
7234 #endif
7235 
7236 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7237 do {									\
7238 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7239 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7240 	_var_ = (void *)((_pkt_)+1);					\
7241 } while (/*CONSTCOND*/0)
7242 
7243 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7244 do {									\
7245 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7246 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7247 	_ptr_ = (void *)((_pkt_)+1);					\
7248 } while (/*CONSTCOND*/0)
7249 
7250 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7251 
7252 static void
7253 iwm_notif_intr(struct iwm_softc *sc)
7254 {
7255 	uint16_t hw;
7256 
7257 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7258 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7259 
7260 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7261 	while (sc->rxq.cur != hw) {
7262 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7263 		struct iwm_rx_packet *pkt;
7264 		struct iwm_cmd_response *cresp;
7265 		int orig_qid, qid, idx, code;
7266 
7267 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7268 		    BUS_DMASYNC_POSTREAD);
7269 		pkt = mtod(data->m, struct iwm_rx_packet *);
7270 
7271 		orig_qid = pkt->hdr.qid;
7272 		qid = orig_qid & ~0x80;
7273 		idx = pkt->hdr.idx;
7274 
7275 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7276 
7277 		/*
7278 		 * randomly get these from the firmware, no idea why.
7279 		 * they at least seem harmless, so just ignore them for now
7280 		 */
7281 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7282 		    || pkt->len_n_flags == htole32(0x55550000))) {
7283 			ADVANCE_RXQ(sc);
7284 			continue;
7285 		}
7286 
7287 		switch (code) {
7288 		case IWM_REPLY_RX_PHY_CMD:
7289 			iwm_rx_rx_phy_cmd(sc, pkt, data);
7290 			break;
7291 
7292 		case IWM_REPLY_RX_MPDU_CMD:
7293 			iwm_rx_rx_mpdu(sc, pkt, data);
7294 			break;
7295 
7296 		case IWM_TX_CMD:
7297 			iwm_rx_tx_cmd(sc, pkt, data);
7298 			break;
7299 
7300 		case IWM_MISSED_BEACONS_NOTIFICATION:
7301 			iwm_rx_missed_beacons_notif(sc, pkt, data);
7302 			break;
7303 
7304 		case IWM_MFUART_LOAD_NOTIFICATION:
7305 			break;
7306 
7307 		case IWM_ALIVE: {
7308 			struct iwm_alive_resp_v1 *resp1;
7309 			struct iwm_alive_resp_v2 *resp2;
7310 			struct iwm_alive_resp_v3 *resp3;
7311 
7312 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7313 				SYNC_RESP_STRUCT(resp1, pkt);
7314 				sc->sc_uc.uc_error_event_table
7315 				    = le32toh(resp1->error_event_table_ptr);
7316 				sc->sc_uc.uc_log_event_table
7317 				    = le32toh(resp1->log_event_table_ptr);
7318 				sc->sched_base = le32toh(resp1->scd_base_ptr);
7319 				if (resp1->status == IWM_ALIVE_STATUS_OK)
7320 					sc->sc_uc.uc_ok = 1;
7321 				else
7322 					sc->sc_uc.uc_ok = 0;
7323 			}
7324 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7325 				SYNC_RESP_STRUCT(resp2, pkt);
7326 				sc->sc_uc.uc_error_event_table
7327 				    = le32toh(resp2->error_event_table_ptr);
7328 				sc->sc_uc.uc_log_event_table
7329 				    = le32toh(resp2->log_event_table_ptr);
7330 				sc->sched_base = le32toh(resp2->scd_base_ptr);
7331 				sc->sc_uc.uc_umac_error_event_table
7332 				    = le32toh(resp2->error_info_addr);
7333 				if (resp2->status == IWM_ALIVE_STATUS_OK)
7334 					sc->sc_uc.uc_ok = 1;
7335 				else
7336 					sc->sc_uc.uc_ok = 0;
7337 			}
7338 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7339 				SYNC_RESP_STRUCT(resp3, pkt);
7340 				sc->sc_uc.uc_error_event_table
7341 				    = le32toh(resp3->error_event_table_ptr);
7342 				sc->sc_uc.uc_log_event_table
7343 				    = le32toh(resp3->log_event_table_ptr);
7344 				sc->sched_base = le32toh(resp3->scd_base_ptr);
7345 				sc->sc_uc.uc_umac_error_event_table
7346 				    = le32toh(resp3->error_info_addr);
7347 				if (resp3->status == IWM_ALIVE_STATUS_OK)
7348 					sc->sc_uc.uc_ok = 1;
7349 				else
7350 					sc->sc_uc.uc_ok = 0;
7351 			}
7352 
7353 			sc->sc_uc.uc_intr = 1;
7354 			wakeup(&sc->sc_uc);
7355 			break;
7356 		}
7357 
7358 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
7359 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
7360 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
7361 			uint16_t size = le16toh(phy_db_notif->length);
7362 			bus_dmamap_sync(sc->sc_dmat, data->map,
7363 			    sizeof(*pkt) + sizeof(*phy_db_notif),
7364 			    size, BUS_DMASYNC_POSTREAD);
7365 			iwm_phy_db_set_section(sc, phy_db_notif, size);
7366 			break;
7367 		}
7368 
7369 		case IWM_STATISTICS_NOTIFICATION: {
7370 			struct iwm_notif_statistics *stats;
7371 			SYNC_RESP_STRUCT(stats, pkt);
7372 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7373 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
7374 			break;
7375 		}
7376 
7377 		case IWM_NVM_ACCESS_CMD:
7378 		case IWM_MCC_UPDATE_CMD:
7379 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7380 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7381 				    sizeof(sc->sc_cmd_resp),
7382 				    BUS_DMASYNC_POSTREAD);
7383 				memcpy(sc->sc_cmd_resp,
7384 				    pkt, sizeof(sc->sc_cmd_resp));
7385 			}
7386 			break;
7387 
7388 		case IWM_MCC_CHUB_UPDATE_CMD: {
7389 			struct iwm_mcc_chub_notif *notif;
7390 			SYNC_RESP_STRUCT(notif, pkt);
7391 
7392 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7393 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7394 			sc->sc_fw_mcc[2] = '\0';
7395 			break;
7396 		}
7397 
7398 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
7399 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7400 		    IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7401 			struct iwm_dts_measurement_notif_v1 *notif1;
7402 			struct iwm_dts_measurement_notif_v2 *notif2;
7403 
7404 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7405 				SYNC_RESP_STRUCT(notif1, pkt);
7406 				DPRINTF(("%s: DTS temp=%d \n",
7407 				    DEVNAME(sc), notif1->temp));
7408 				break;
7409 			}
7410 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7411 				SYNC_RESP_STRUCT(notif2, pkt);
7412 				DPRINTF(("%s: DTS temp=%d \n",
7413 				    DEVNAME(sc), notif2->temp));
7414 				break;
7415 			}
7416 			break;
7417 		}
7418 
7419 		case IWM_PHY_CONFIGURATION_CMD:
7420 		case IWM_TX_ANT_CONFIGURATION_CMD:
7421 		case IWM_ADD_STA:
7422 		case IWM_MAC_CONTEXT_CMD:
7423 		case IWM_REPLY_SF_CFG_CMD:
7424 		case IWM_POWER_TABLE_CMD:
7425 		case IWM_PHY_CONTEXT_CMD:
7426 		case IWM_BINDING_CONTEXT_CMD:
7427 		case IWM_TIME_EVENT_CMD:
7428 		case IWM_SCAN_REQUEST_CMD:
7429 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7430 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7431 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7432 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7433 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
7434 		case IWM_REPLY_BEACON_FILTERING_CMD:
7435 		case IWM_MAC_PM_POWER_TABLE:
7436 		case IWM_TIME_QUOTA_CMD:
7437 		case IWM_REMOVE_STA:
7438 		case IWM_TXPATH_FLUSH:
7439 		case IWM_LQ_CMD:
7440 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7441 		case IWM_BT_CONFIG:
7442 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7443 			SYNC_RESP_STRUCT(cresp, pkt);
7444 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7445 				memcpy(sc->sc_cmd_resp,
7446 				    pkt, sizeof(*pkt) + sizeof(*cresp));
7447 			}
7448 			break;
7449 
7450 		/* ignore */
7451 		case IWM_PHY_DB_CMD:
7452 			break;
7453 
7454 		case IWM_INIT_COMPLETE_NOTIF:
7455 			sc->sc_init_complete = 1;
7456 			wakeup(&sc->sc_init_complete);
7457 			break;
7458 
7459 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7460 			struct iwm_periodic_scan_complete *notif;
7461 			SYNC_RESP_STRUCT(notif, pkt);
7462 			break;
7463 		}
7464 
7465 		case IWM_SCAN_ITERATION_COMPLETE: {
7466 			struct iwm_lmac_scan_complete_notif *notif;
7467 			SYNC_RESP_STRUCT(notif, pkt);
7468 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7469 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7470 				iwm_endscan(sc);
7471 			}
7472 			break;
7473 		}
7474 
7475 		case IWM_SCAN_COMPLETE_UMAC: {
7476 			struct iwm_umac_scan_complete *notif;
7477 			SYNC_RESP_STRUCT(notif, pkt);
7478 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7479 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7480 				iwm_endscan(sc);
7481 			}
7482 			break;
7483 		}
7484 
7485 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7486 			struct iwm_umac_scan_iter_complete_notif *notif;
7487 			SYNC_RESP_STRUCT(notif, pkt);
7488 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7489 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7490 				iwm_endscan(sc);
7491 			}
7492 			break;
7493 		}
7494 
7495 		case IWM_REPLY_ERROR: {
7496 			struct iwm_error_resp *resp;
7497 			SYNC_RESP_STRUCT(resp, pkt);
7498 			aprint_error_dev(sc->sc_dev,
7499 			    "firmware error 0x%x, cmd 0x%x\n",
7500 			    le32toh(resp->error_type), resp->cmd_id);
7501 			break;
7502 		}
7503 
7504 		case IWM_TIME_EVENT_NOTIFICATION: {
7505 			struct iwm_time_event_notif *notif;
7506 			SYNC_RESP_STRUCT(notif, pkt);
7507 			break;
7508 		}
7509 
7510 		case IWM_DEBUG_LOG_MSG:
7511 			break;
7512 
7513 		case IWM_MCAST_FILTER_CMD:
7514 			break;
7515 
7516 		case IWM_SCD_QUEUE_CFG: {
7517 			struct iwm_scd_txq_cfg_rsp *rsp;
7518 			SYNC_RESP_STRUCT(rsp, pkt);
7519 			break;
7520 		}
7521 
7522 		default:
7523 			aprint_error_dev(sc->sc_dev,
7524 			    "unhandled firmware response 0x%x 0x%x/0x%x "
7525 			    "rx ring %d[%d]\n",
7526 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7527 			break;
7528 		}
7529 
7530 		/*
7531 		 * uCode sets bit 0x80 when it originates the notification,
7532 		 * i.e. when the notification is not a direct response to a
7533 		 * command sent by the driver.
7534 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7535 		 * received frame to the driver.
7536 		 */
7537 		if (!(orig_qid & (1 << 7))) {
7538 			iwm_cmd_done(sc, qid, idx);
7539 		}
7540 
7541 		ADVANCE_RXQ(sc);
7542 	}
7543 
7544 	/*
7545 	 * Seems like the hardware gets upset unless we align the write by 8??
7546 	 */
7547 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7548 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7549 }
7550 
7551 static int
7552 iwm_intr(void *arg)
7553 {
7554 	struct iwm_softc *sc = arg;
7555 
7556 	/* Disable interrupts */
7557 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7558 
7559 	softint_schedule(sc->sc_soft_ih);
7560 	return 1;
7561 }
7562 
7563 static void
7564 iwm_softintr(void *arg)
7565 {
7566 	struct iwm_softc *sc = arg;
7567 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7568 	uint32_t r1, r2;
7569 	int isperiodic = 0, s;
7570 
7571 	if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7572 		uint32_t *ict = sc->ict_dma.vaddr;
7573 		int tmp;
7574 
7575 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7576 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7577 		tmp = htole32(ict[sc->ict_cur]);
7578 		if (tmp == 0)
7579 			goto out_ena;	/* Interrupt not for us. */
7580 
7581 		/*
7582 		 * ok, there was something.  keep plowing until we have all.
7583 		 */
7584 		r1 = r2 = 0;
7585 		while (tmp) {
7586 			r1 |= tmp;
7587 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
7588 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7589 			tmp = htole32(ict[sc->ict_cur]);
7590 		}
7591 
7592 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7593 		    0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7594 
7595 		/* this is where the fun begins.  don't ask */
7596 		if (r1 == 0xffffffff)
7597 			r1 = 0;
7598 
7599 		/* i am not expected to understand this */
7600 		if (r1 & 0xc0000)
7601 			r1 |= 0x8000;
7602 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7603 	} else {
7604 		r1 = IWM_READ(sc, IWM_CSR_INT);
7605 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7606 			return;	/* Hardware gone! */
7607 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7608 	}
7609 	if (r1 == 0 && r2 == 0) {
7610 		goto out_ena;	/* Interrupt not for us. */
7611 	}
7612 
7613 	/* Acknowledge interrupts. */
7614 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7615 	if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7616 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7617 
7618 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7619 #ifdef IWM_DEBUG
7620 		int i;
7621 
7622 		iwm_nic_error(sc);
7623 
7624 		/* Dump driver status (TX and RX rings) while we're here. */
7625 		DPRINTF(("driver status:\n"));
7626 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7627 			struct iwm_tx_ring *ring = &sc->txq[i];
7628 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7629 			    "queued=%-3d\n",
7630 			    i, ring->qid, ring->cur, ring->queued));
7631 		}
7632 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7633 		DPRINTF(("  802.11 state %s\n",
7634 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7635 #endif
7636 
7637 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7638  fatal:
7639 		s = splnet();
7640 		ifp->if_flags &= ~IFF_UP;
7641 		iwm_stop(ifp, 1);
7642 		splx(s);
7643 		/* Don't restore interrupt mask */
7644 		return;
7645 
7646 	}
7647 
7648 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7649 		aprint_error_dev(sc->sc_dev,
7650 		    "hardware error, stopping device\n");
7651 		goto fatal;
7652 	}
7653 
7654 	/* firmware chunk loaded */
7655 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7656 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7657 		sc->sc_fw_chunk_done = 1;
7658 		wakeup(&sc->sc_fw);
7659 	}
7660 
7661 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7662 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7663 			goto fatal;
7664 	}
7665 
7666 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7667 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7668 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7669 			IWM_WRITE_1(sc,
7670 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7671 		isperiodic = 1;
7672 	}
7673 
7674 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7675 	    isperiodic) {
7676 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7677 
7678 		iwm_notif_intr(sc);
7679 
7680 		/* enable periodic interrupt, see above */
7681 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7682 		    !isperiodic)
7683 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7684 			    IWM_CSR_INT_PERIODIC_ENA);
7685 	}
7686 
7687 out_ena:
7688 	iwm_restore_interrupts(sc);
7689 }
7690 
7691 /*
7692  * Autoconf glue-sniffing
7693  */
7694 
7695 static const pci_product_id_t iwm_devices[] = {
7696 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7697 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7698 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7699 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7700 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7701 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7702 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7703 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7704 	PCI_PRODUCT_INTEL_WIFI_LINK_3168,
7705 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7706 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7707 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7708 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7709 	PCI_PRODUCT_INTEL_WIFI_LINK_8265,
7710 };
7711 
7712 static int
7713 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7714 {
7715 	struct pci_attach_args *pa = aux;
7716 
7717 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7718 		return 0;
7719 
7720 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7721 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7722 			return 1;
7723 
7724 	return 0;
7725 }
7726 
7727 static int
7728 iwm_preinit(struct iwm_softc *sc)
7729 {
7730 	struct ieee80211com *ic = &sc->sc_ic;
7731 	int err;
7732 
7733 	if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7734 		return 0;
7735 
7736 	err = iwm_start_hw(sc);
7737 	if (err) {
7738 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7739 		return err;
7740 	}
7741 
7742 	err = iwm_run_init_mvm_ucode(sc, 1);
7743 	iwm_stop_device(sc);
7744 	if (err)
7745 		return err;
7746 
7747 	sc->sc_flags |= IWM_FLAG_ATTACHED;
7748 
7749 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7750 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7751 	    ether_sprintf(sc->sc_nvm.hw_addr));
7752 
7753 #ifndef IEEE80211_NO_HT
7754 	if (sc->sc_nvm.sku_cap_11n_enable)
7755 		iwm_setup_ht_rates(sc);
7756 #endif
7757 
7758 	/* not all hardware can do 5GHz band */
7759 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7760 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7761 
7762 	ieee80211_ifattach(ic);
7763 
7764 	ic->ic_node_alloc = iwm_node_alloc;
7765 
7766 	/* Override 802.11 state transition machine. */
7767 	sc->sc_newstate = ic->ic_newstate;
7768 	ic->ic_newstate = iwm_newstate;
7769 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7770 	ieee80211_announce(ic);
7771 
7772 	iwm_radiotap_attach(sc);
7773 
7774 	return 0;
7775 }
7776 
7777 static void
7778 iwm_attach_hook(device_t dev)
7779 {
7780 	struct iwm_softc *sc = device_private(dev);
7781 
7782 	iwm_preinit(sc);
7783 }
7784 
7785 static void
7786 iwm_attach(device_t parent, device_t self, void *aux)
7787 {
7788 	struct iwm_softc *sc = device_private(self);
7789 	struct pci_attach_args *pa = aux;
7790 	struct ieee80211com *ic = &sc->sc_ic;
7791 	struct ifnet *ifp = &sc->sc_ec.ec_if;
7792 	pcireg_t reg, memtype;
7793 	char intrbuf[PCI_INTRSTR_LEN];
7794 	const char *intrstr;
7795 	int err;
7796 	int txq_i;
7797 	const struct sysctlnode *node;
7798 
7799 	sc->sc_dev = self;
7800 	sc->sc_pct = pa->pa_pc;
7801 	sc->sc_pcitag = pa->pa_tag;
7802 	sc->sc_dmat = pa->pa_dmat;
7803 	sc->sc_pciid = pa->pa_id;
7804 
7805 	pci_aprint_devinfo(pa, NULL);
7806 
7807 	if (workqueue_create(&sc->sc_nswq, "iwmns",
7808 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7809 		panic("%s: could not create workqueue: newstate",
7810 		    device_xname(self));
7811 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7812 	if (sc->sc_soft_ih == NULL)
7813 		panic("%s: could not establish softint", device_xname(self));
7814 
7815 	/*
7816 	 * Get the offset of the PCI Express Capability Structure in PCI
7817 	 * Configuration Space.
7818 	 */
7819 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7820 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7821 	if (err == 0) {
7822 		aprint_error_dev(self,
7823 		    "PCIe capability structure not found!\n");
7824 		return;
7825 	}
7826 
7827 	/* Clear device-specific "PCI retry timeout" register (41h). */
7828 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7829 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7830 
7831 	/* Enable bus-mastering */
7832 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7833 	reg |= PCI_COMMAND_MASTER_ENABLE;
7834 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7835 
7836 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7837 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7838 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7839 	if (err) {
7840 		aprint_error_dev(self, "can't map mem space\n");
7841 		return;
7842 	}
7843 
7844 	/* Install interrupt handler. */
7845 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7846 	if (err) {
7847 		aprint_error_dev(self, "can't allocate interrupt\n");
7848 		return;
7849 	}
7850 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7851 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7852 		CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7853 	else
7854 		SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7855 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7856 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7857 	    sizeof(intrbuf));
7858 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7859 	    IPL_NET, iwm_intr, sc, device_xname(self));
7860 	if (sc->sc_ih == NULL) {
7861 		aprint_error_dev(self, "can't establish interrupt");
7862 		if (intrstr != NULL)
7863 			aprint_error(" at %s", intrstr);
7864 		aprint_error("\n");
7865 		return;
7866 	}
7867 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7868 
7869 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7870 
7871 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7872 	switch (PCI_PRODUCT(sc->sc_pciid)) {
7873 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7874 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7875 		sc->sc_fwname = "iwlwifi-3160-17.ucode";
7876 		sc->host_interrupt_operation_mode = 1;
7877 		sc->apmg_wake_up_wa = 1;
7878 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7879 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7880 		break;
7881 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7882 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7883 		sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7884 		sc->host_interrupt_operation_mode = 0;
7885 		sc->apmg_wake_up_wa = 1;
7886 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7887 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7888 		break;
7889 	case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7890 		sc->sc_fwname = "iwlwifi-3168-22.ucode";
7891 		sc->host_interrupt_operation_mode = 0;
7892 		sc->apmg_wake_up_wa = 1;
7893 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7894 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7895 		break;
7896 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7897 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7898 		sc->sc_fwname = "iwlwifi-7260-17.ucode";
7899 		sc->host_interrupt_operation_mode = 1;
7900 		sc->apmg_wake_up_wa = 1;
7901 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7902 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7903 		break;
7904 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7905 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7906 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7907 		    IWM_CSR_HW_REV_TYPE_7265D ?
7908 		    "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7909 		sc->host_interrupt_operation_mode = 0;
7910 		sc->apmg_wake_up_wa = 1;
7911 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7912 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7913 		break;
7914 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7915 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7916 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7917 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7918 		sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7919 		sc->host_interrupt_operation_mode = 0;
7920 		sc->apmg_wake_up_wa = 0;
7921 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7922 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7923 		break;
7924 	case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7925 		sc->sc_fwname = "iwlwifi-8265-22.ucode";
7926 		sc->host_interrupt_operation_mode = 0;
7927 		sc->apmg_wake_up_wa = 0;
7928 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7929 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7930 		break;
7931 	default:
7932 		aprint_error_dev(self, "unknown product %#x",
7933 		    PCI_PRODUCT(sc->sc_pciid));
7934 		return;
7935 	}
7936 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7937 
7938 	/*
7939 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7940 	 * changed, and now the revision step also includes bit 0-1 (no more
7941 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7942 	 * in the old format.
7943 	 */
7944 
7945 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7946 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7947 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7948 
7949 	if (iwm_prepare_card_hw(sc) != 0) {
7950 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7951 		return;
7952 	}
7953 
7954 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7955 		uint32_t hw_step;
7956 
7957 		/*
7958 		 * In order to recognize C step the driver should read the
7959 		 * chip version id located at the AUX bus MISC address.
7960 		 */
7961 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7962 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7963 		DELAY(2);
7964 
7965 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7966 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7967 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7968 				   25000);
7969 		if (!err) {
7970 			aprint_error_dev(sc->sc_dev,
7971 			    "failed to wake up the nic\n");
7972 			return;
7973 		}
7974 
7975 		if (iwm_nic_lock(sc)) {
7976 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7977 			hw_step |= IWM_ENABLE_WFPM;
7978 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7979 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7980 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7981 			if (hw_step == 0x3)
7982 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7983 				    (IWM_SILICON_C_STEP << 2);
7984 			iwm_nic_unlock(sc);
7985 		} else {
7986 			aprint_error_dev(sc->sc_dev,
7987 			    "failed to lock the nic\n");
7988 			return;
7989 		}
7990 	}
7991 
7992 	/*
7993 	 * Allocate DMA memory for firmware transfers.
7994 	 * Must be aligned on a 16-byte boundary.
7995 	 */
7996 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7997 	    16);
7998 	if (err) {
7999 		aprint_error_dev(sc->sc_dev,
8000 		    "could not allocate memory for firmware\n");
8001 		return;
8002 	}
8003 
8004 	/* Allocate "Keep Warm" page, used internally by the card. */
8005 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
8006 	if (err) {
8007 		aprint_error_dev(sc->sc_dev,
8008 		    "could not allocate keep warm page\n");
8009 		goto fail1;
8010 	}
8011 
8012 	/* Allocate interrupt cause table (ICT).*/
8013 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
8014 	    1 << IWM_ICT_PADDR_SHIFT);
8015 	if (err) {
8016 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
8017 		goto fail2;
8018 	}
8019 
8020 	/* TX scheduler rings must be aligned on a 1KB boundary. */
8021 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8022 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
8023 	if (err) {
8024 		aprint_error_dev(sc->sc_dev,
8025 		    "could not allocate TX scheduler rings\n");
8026 		goto fail3;
8027 	}
8028 
8029 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
8030 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8031 		if (err) {
8032 			aprint_error_dev(sc->sc_dev,
8033 			    "could not allocate TX ring %d\n", txq_i);
8034 			goto fail4;
8035 		}
8036 	}
8037 
8038 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
8039 	if (err) {
8040 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8041 		goto fail5;
8042 	}
8043 
8044 	/* Clear pending interrupts. */
8045 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8046 
8047 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8048 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8049 	    SYSCTL_DESCR("iwm per-controller controls"),
8050 	    NULL, 0, NULL, 0,
8051 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8052 	    CTL_EOL)) != 0) {
8053 		aprint_normal_dev(sc->sc_dev,
8054 		    "couldn't create iwm per-controller sysctl node\n");
8055 	}
8056 	if (err == 0) {
8057 		int iwm_nodenum = node->sysctl_num;
8058 
8059 		/* Reload firmware sysctl node */
8060 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8061 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8062 		    SYSCTL_DESCR("Reload firmware"),
8063 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8064 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8065 		    CTL_EOL)) != 0) {
8066 			aprint_normal_dev(sc->sc_dev,
8067 			    "couldn't create load_fw sysctl node\n");
8068 		}
8069 	}
8070 
8071 	/*
8072 	 * Attach interface
8073 	 */
8074 	ic->ic_ifp = ifp;
8075 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8076 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8077 	ic->ic_state = IEEE80211_S_INIT;
8078 
8079 	/* Set device capabilities. */
8080 	ic->ic_caps =
8081 	    IEEE80211_C_WEP |		/* WEP */
8082 	    IEEE80211_C_WPA |		/* 802.11i */
8083 #ifdef notyet
8084 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8085 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8086 #endif
8087 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8088 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8089 
8090 #ifndef IEEE80211_NO_HT
8091 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8092 	ic->ic_htxcaps = 0;
8093 	ic->ic_txbfcaps = 0;
8094 	ic->ic_aselcaps = 0;
8095 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8096 #endif
8097 
8098 	/* all hardware can do 2.4GHz band */
8099 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8100 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8101 
8102 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8103 		sc->sc_phyctxt[i].id = i;
8104 	}
8105 
8106 	sc->sc_amrr.amrr_min_success_threshold =  1;
8107 	sc->sc_amrr.amrr_max_success_threshold = 15;
8108 
8109 	/* IBSS channel undefined for now. */
8110 	ic->ic_ibss_chan = &ic->ic_channels[1];
8111 
8112 #if 0
8113 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8114 #endif
8115 
8116 	ifp->if_softc = sc;
8117 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8118 	ifp->if_init = iwm_init;
8119 	ifp->if_stop = iwm_stop;
8120 	ifp->if_ioctl = iwm_ioctl;
8121 	ifp->if_start = iwm_start;
8122 	ifp->if_watchdog = iwm_watchdog;
8123 	IFQ_SET_READY(&ifp->if_snd);
8124 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8125 
8126 	err = if_initialize(ifp);
8127 	if (err != 0) {
8128 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
8129 		    err);
8130 		goto fail6;
8131 	}
8132 #if 0
8133 	ieee80211_ifattach(ic);
8134 #else
8135 	ether_ifattach(ifp, ic->ic_myaddr);	/* XXX */
8136 #endif
8137 	/* Use common softint-based if_input */
8138 	ifp->if_percpuq = if_percpuq_create(ifp);
8139 	if_register(ifp);
8140 
8141 	callout_init(&sc->sc_calib_to, 0);
8142 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8143 	callout_init(&sc->sc_led_blink_to, 0);
8144 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8145 #ifndef IEEE80211_NO_HT
8146 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8147 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8148 		panic("%s: could not create workqueue: setrates",
8149 		    device_xname(self));
8150 	if (workqueue_create(&sc->sc_bawq, "iwmba",
8151 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8152 		panic("%s: could not create workqueue: blockack",
8153 		    device_xname(self));
8154 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8155 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8156 		panic("%s: could not create workqueue: htprot",
8157 		    device_xname(self));
8158 #endif
8159 
8160 	if (pmf_device_register(self, NULL, NULL))
8161 		pmf_class_network_register(self, ifp);
8162 	else
8163 		aprint_error_dev(self, "couldn't establish power handler\n");
8164 
8165 	/*
8166 	 * We can't do normal attach before the file system is mounted
8167 	 * because we cannot read the MAC address without loading the
8168 	 * firmware from disk.  So we postpone until mountroot is done.
8169 	 * Notably, this will require a full driver unload/load cycle
8170 	 * (or reboot) in case the firmware is not present when the
8171 	 * hook runs.
8172 	 */
8173 	config_mountroot(self, iwm_attach_hook);
8174 
8175 	return;
8176 
8177 fail6:	iwm_free_rx_ring(sc, &sc->rxq);
8178 fail5:	while (--txq_i >= 0)
8179 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8180 fail4:	iwm_dma_contig_free(&sc->sched_dma);
8181 fail3:	if (sc->ict_dma.vaddr != NULL)
8182 		iwm_dma_contig_free(&sc->ict_dma);
8183 fail2:	iwm_dma_contig_free(&sc->kw_dma);
8184 fail1:	iwm_dma_contig_free(&sc->fw_dma);
8185 }
8186 
8187 void
8188 iwm_radiotap_attach(struct iwm_softc *sc)
8189 {
8190 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8191 
8192 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8193 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8194 	    &sc->sc_drvbpf);
8195 
8196 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8197 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8198 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8199 
8200 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8201 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8202 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8203 }
8204 
8205 #if 0
8206 static void
8207 iwm_init_task(void *arg)
8208 {
8209 	struct iwm_softc *sc = arg;
8210 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8211 	int s;
8212 
8213 	rw_enter_write(&sc->ioctl_rwl);
8214 	s = splnet();
8215 
8216 	iwm_stop(ifp, 0);
8217 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8218 		iwm_init(ifp);
8219 
8220 	splx(s);
8221 	rw_exit(&sc->ioctl_rwl);
8222 }
8223 
8224 static void
8225 iwm_wakeup(struct iwm_softc *sc)
8226 {
8227 	pcireg_t reg;
8228 
8229 	/* Clear device-specific "PCI retry timeout" register (41h). */
8230 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8231 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8232 
8233 	iwm_init_task(sc);
8234 }
8235 
8236 static int
8237 iwm_activate(device_t self, enum devact act)
8238 {
8239 	struct iwm_softc *sc = device_private(self);
8240 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8241 
8242 	switch (act) {
8243 	case DVACT_DEACTIVATE:
8244 		if (ifp->if_flags & IFF_RUNNING)
8245 			iwm_stop(ifp, 0);
8246 		return 0;
8247 	default:
8248 		return EOPNOTSUPP;
8249 	}
8250 }
8251 #endif
8252 
8253 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8254 	NULL, NULL);
8255 
8256 static int
8257 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8258 {
8259 	struct sysctlnode node;
8260 	struct iwm_softc *sc;
8261 	int err, t;
8262 
8263 	node = *rnode;
8264 	sc = node.sysctl_data;
8265 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8266 	node.sysctl_data = &t;
8267 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
8268 	if (err || newp == NULL)
8269 		return err;
8270 
8271 	if (t == 0)
8272 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8273 	return 0;
8274 }
8275 
8276 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8277 {
8278 	const struct sysctlnode *rnode;
8279 #ifdef IWM_DEBUG
8280 	const struct sysctlnode *cnode;
8281 #endif /* IWM_DEBUG */
8282 	int rc;
8283 
8284 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8285 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8286 	    SYSCTL_DESCR("iwm global controls"),
8287 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8288 		goto err;
8289 
8290 	iwm_sysctl_root_num = rnode->sysctl_num;
8291 
8292 #ifdef IWM_DEBUG
8293 	/* control debugging printfs */
8294 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8295 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8296 	    "debug", SYSCTL_DESCR("Enable debugging output"),
8297 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8298 		goto err;
8299 #endif /* IWM_DEBUG */
8300 
8301 	return;
8302 
8303  err:
8304 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8305 }
8306