xref: /netbsd-src/sys/dev/pci/if_iwm.c (revision 965ff70d6cc168e208e3ec6b725c8ce156e95fd0)
1 /*	$NetBSD: if_iwm.c,v 1.90 2024/11/10 11:44:36 mlelstv Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
3 #define IEEE80211_NO_HT
4 /*
5  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
6  *   Author: Stefan Sperling <stsp@openbsd.org>
7  * Copyright (c) 2014 Fixup Software Ltd.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016        Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <linuxwifi@intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62  * Copyright(c) 2016        Intel Deutschland GmbH
63  * All rights reserved.
64  *
65  * Redistribution and use in source and binary forms, with or without
66  * modification, are permitted provided that the following conditions
67  * are met:
68  *
69  *  * Redistributions of source code must retain the above copyright
70  *    notice, this list of conditions and the following disclaimer.
71  *  * Redistributions in binary form must reproduce the above copyright
72  *    notice, this list of conditions and the following disclaimer in
73  *    the documentation and/or other materials provided with the
74  *    distribution.
75  *  * Neither the name Intel Corporation nor the names of its
76  *    contributors may be used to endorse or promote products derived
77  *    from this software without specific prior written permission.
78  *
79  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90  */
91 
92 /*-
93  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
94  *
95  * Permission to use, copy, modify, and distribute this software for any
96  * purpose with or without fee is hereby granted, provided that the above
97  * copyright notice and this permission notice appear in all copies.
98  *
99  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106  */
107 
108 #include <sys/cdefs.h>
109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.90 2024/11/10 11:44:36 mlelstv Exp $");
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/kmem.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122 
123 #include <sys/cpu.h>
124 #include <sys/bus.h>
125 #include <sys/workqueue.h>
126 #include <machine/endian.h>
127 #include <sys/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 #include <dev/firmload.h>
133 
134 #include <net/bpf.h>
135 #include <net/if.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_ether.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/ip.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_radiotap.h>
146 
147 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
148 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
149 
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 0;
157 #else
158 #define DPRINTF(x)	do { ; } while (0)
159 #define DPRINTFN(n, x)	do { ; } while (0)
160 #endif
161 
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164 
165 static const uint8_t iwm_nvm_channels[] = {
166 	/* 2.4 GHz */
167 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 	/* 5 GHz */
169 	36, 40, 44, 48, 52, 56, 60, 64,
170 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 	149, 153, 157, 161, 165
172 };
173 
174 static const uint8_t iwm_nvm_channels_8000[] = {
175 	/* 2.4 GHz */
176 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 	/* 5 GHz */
178 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 	149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182 
183 #define IWM_NUM_2GHZ_CHANNELS	14
184 
185 static const struct iwm_rate {
186 	uint8_t rate;
187 	uint8_t plcp;
188 	uint8_t ht_plcp;
189 } iwm_rates[] = {
190 		/* Legacy */		/* HT */
191 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
196 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
198 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
199 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
200 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
201 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
202 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
203 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
204 };
205 #define IWM_RIDX_CCK	0
206 #define IWM_RIDX_OFDM	4
207 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
208 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210 
211 #ifndef IEEE80211_NO_HT
212 /* Convert an MCS index into an iwm_rates[] index. */
213 static const int iwm_mcs2ridx[] = {
214 	IWM_RATE_MCS_0_INDEX,
215 	IWM_RATE_MCS_1_INDEX,
216 	IWM_RATE_MCS_2_INDEX,
217 	IWM_RATE_MCS_3_INDEX,
218 	IWM_RATE_MCS_4_INDEX,
219 	IWM_RATE_MCS_5_INDEX,
220 	IWM_RATE_MCS_6_INDEX,
221 	IWM_RATE_MCS_7_INDEX,
222 };
223 #endif
224 
225 struct iwm_nvm_section {
226 	uint16_t length;
227 	uint8_t *data;
228 };
229 
230 struct iwm_newstate_state {
231 	struct work ns_wk;
232 	enum ieee80211_state ns_nstate;
233 	int ns_arg;
234 	int ns_generation;
235 };
236 
237 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 static int	iwm_firmware_store_section(struct iwm_softc *,
239 		    enum iwm_ucode_type, uint8_t *, size_t);
240 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 #ifdef IWM_DEBUG
245 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 #endif
247 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 static int	iwm_nic_lock(struct iwm_softc *);
251 static void	iwm_nic_unlock(struct iwm_softc *);
252 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 		    uint32_t);
254 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 		    bus_size_t, bus_size_t);
258 static void	iwm_dma_contig_free(struct iwm_dma_info *);
259 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 static void	iwm_disable_rx_dma(struct iwm_softc *);
261 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 		    int);
265 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void	iwm_enable_rfkill_int(struct iwm_softc *);
268 static int	iwm_check_rfkill(struct iwm_softc *);
269 static void	iwm_enable_interrupts(struct iwm_softc *);
270 static void	iwm_restore_interrupts(struct iwm_softc *);
271 static void	iwm_disable_interrupts(struct iwm_softc *);
272 static void	iwm_ict_reset(struct iwm_softc *);
273 static int	iwm_set_hw_ready(struct iwm_softc *);
274 static int	iwm_prepare_card_hw(struct iwm_softc *);
275 static void	iwm_apm_config(struct iwm_softc *);
276 static int	iwm_apm_init(struct iwm_softc *);
277 static void	iwm_apm_stop(struct iwm_softc *);
278 static int	iwm_allow_mcast(struct iwm_softc *);
279 static int	iwm_start_hw(struct iwm_softc *);
280 static void	iwm_stop_device(struct iwm_softc *);
281 static void	iwm_nic_config(struct iwm_softc *);
282 static int	iwm_nic_rx_init(struct iwm_softc *);
283 static int	iwm_nic_tx_init(struct iwm_softc *);
284 static int	iwm_nic_init(struct iwm_softc *);
285 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int	iwm_post_alive(struct iwm_softc *);
287 static struct iwm_phy_db_entry *
288 		iwm_phy_db_get_section(struct iwm_softc *,
289 		    enum iwm_phy_db_section_type, uint16_t);
290 static int	iwm_phy_db_set_section(struct iwm_softc *,
291 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
292 static int	iwm_is_valid_channel(uint16_t);
293 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
294 static uint16_t iwm_channel_id_to_papd(uint16_t);
295 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 		    uint8_t **, uint16_t *, uint16_t);
298 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 		    void *);
300 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 		    enum iwm_phy_db_section_type, uint8_t);
302 static int	iwm_send_phy_db_data(struct iwm_softc *);
303 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 		    struct iwm_time_event_cmd_v1 *);
305 static int	iwm_send_time_event_cmd(struct iwm_softc *,
306 		    const struct iwm_time_event_cmd_v2 *);
307 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 		    uint32_t, uint32_t);
309 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 		    uint16_t, uint8_t *, uint16_t *);
311 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 		    uint16_t *, size_t);
313 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 		    const uint8_t *, size_t);
315 #ifndef IEEE80211_NO_HT
316 static void	iwm_setup_ht_rates(struct iwm_softc *);
317 static void	iwm_htprot_task(void *);
318 static void	iwm_update_htprot(struct ieee80211com *,
319 		    struct ieee80211_node *);
320 static int	iwm_ampdu_rx_start(struct ieee80211com *,
321 		    struct ieee80211_node *, uint8_t);
322 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
323 		    struct ieee80211_node *, uint8_t);
324 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 		    uint8_t, uint16_t, int);
326 #ifdef notyet
327 static int	iwm_ampdu_tx_start(struct ieee80211com *,
328 		    struct ieee80211_node *, uint8_t);
329 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
330 		    struct ieee80211_node *, uint8_t);
331 #endif
332 static void	iwm_ba_task(void *);
333 #endif
334 
335 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 		    const uint16_t *, const uint16_t *, const uint16_t *,
337 		    const uint16_t *, const uint16_t *);
338 static void	iwm_set_hw_address_8000(struct iwm_softc *,
339 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 static int	iwm_parse_nvm_sections(struct iwm_softc *,
341 		    struct iwm_nvm_section *);
342 static int	iwm_nvm_init(struct iwm_softc *);
343 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 		    const uint8_t *, uint32_t);
345 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 		    const uint8_t *, uint32_t);
347 static int	iwm_load_cpu_sections_7000(struct iwm_softc *,
348 		    struct iwm_fw_sects *, int , int *);
349 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
351 		    struct iwm_fw_sects *, int , int *);
352 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
358 		    enum iwm_ucode_type);
359 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
361 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 static int	iwm_get_signal_strength(struct iwm_softc *,
363 		    struct iwm_rx_phy_info *);
364 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 		    struct iwm_rx_packet *, struct iwm_rx_data *);
366 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 		    struct iwm_rx_data *);
369 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
370 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 		    struct iwm_rx_data *);
372 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 		    uint32_t);
374 #if 0
375 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 #endif
378 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 		    uint8_t, uint8_t);
383 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 		    uint8_t, uint8_t, uint32_t, uint32_t);
385 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 		    uint16_t, const void *);
388 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 		    uint32_t *);
390 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 		    const void *, uint32_t *);
392 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 #if 0
395 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 		    uint16_t);
397 #endif
398 static const struct iwm_rate *
399 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
401 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
402 		    struct ieee80211_node *, int);
403 static void	iwm_led_enable(struct iwm_softc *);
404 static void	iwm_led_disable(struct iwm_softc *);
405 static int	iwm_led_is_enabled(struct iwm_softc *);
406 static void	iwm_led_blink_timeout(void *);
407 static void	iwm_led_blink_start(struct iwm_softc *);
408 static void	iwm_led_blink_stop(struct iwm_softc *);
409 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 		    struct iwm_beacon_filter_cmd *);
411 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 		    int);
415 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 		    struct iwm_mac_power_cmd *);
417 static int	iwm_power_mac_update_mode(struct iwm_softc *,
418 		    struct iwm_node *);
419 static int	iwm_power_update_device(struct iwm_softc *);
420 #ifdef notyet
421 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 #endif
423 static int	iwm_disable_beacon_filter(struct iwm_softc *);
424 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 static int	iwm_add_aux_sta(struct iwm_softc *);
426 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 #ifdef notyet
429 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 #endif
432 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 		    struct iwm_scan_channel_cfg_lmac *, int);
434 static int	iwm_fill_probe_req(struct iwm_softc *,
435 		    struct iwm_scan_probe_req *);
436 static int	iwm_lmac_scan(struct iwm_softc *);
437 static int	iwm_config_umac_scan(struct iwm_softc *);
438 static int	iwm_umac_scan(struct iwm_softc *);
439 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
440 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 		    int *);
442 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
444 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 		    struct iwm_mac_data_sta *, int);
446 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 		    uint32_t, int);
448 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 static int	iwm_auth(struct iwm_softc *);
450 static int	iwm_assoc(struct iwm_softc *);
451 static void	iwm_calib_timeout(void *);
452 #ifndef IEEE80211_NO_HT
453 static void	iwm_setrates_task(void *);
454 static int	iwm_setrates(struct iwm_node *);
455 #endif
456 static int	iwm_media_change(struct ifnet *);
457 static int	iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 		    int);
459 static void	iwm_newstate_cb(struct work *, void *);
460 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 static void	iwm_endscan(struct iwm_softc *);
462 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 		    struct ieee80211_node *);
464 static int	iwm_sf_config(struct iwm_softc *, int);
465 static int	iwm_send_bt_init_conf(struct iwm_softc *);
466 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 static int	iwm_init_hw(struct iwm_softc *);
469 static int	iwm_init(struct ifnet *);
470 static void	iwm_start(struct ifnet *);
471 static void	iwm_stop(struct ifnet *, int);
472 static void	iwm_watchdog(struct ifnet *);
473 static int	iwm_ioctl(struct ifnet *, u_long, void *);
474 #ifdef IWM_DEBUG
475 static const char *iwm_desc_lookup(uint32_t);
476 static void	iwm_nic_error(struct iwm_softc *);
477 static void	iwm_nic_umac_error(struct iwm_softc *);
478 #endif
479 static void	iwm_notif_intr(struct iwm_softc *);
480 static int	iwm_intr(void *);
481 static void	iwm_softintr(void *);
482 static int	iwm_preinit(struct iwm_softc *);
483 static void	iwm_attach_hook(device_t);
484 static void	iwm_attach(device_t, device_t, void *);
485 static int	iwm_config_complete(struct iwm_softc *);
486 #if 0
487 static void	iwm_init_task(void *);
488 static int	iwm_activate(device_t, enum devact);
489 static void	iwm_wakeup(struct iwm_softc *);
490 #endif
491 static void	iwm_radiotap_attach(struct iwm_softc *);
492 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
493 
494 static int iwm_sysctl_root_num;
495 static int iwm_lar_disable;
496 
497 #ifndef	IWM_DEFAULT_MCC
498 #define	IWM_DEFAULT_MCC	"ZZ"
499 #endif
500 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
501 
502 static int
503 iwm_firmload(struct iwm_softc *sc)
504 {
505 	struct iwm_fw_info *fw = &sc->sc_fw;
506 	firmware_handle_t fwh;
507 	int err;
508 
509 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
510 		return 0;
511 
512 	/* Open firmware image. */
513 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
514 	if (err) {
515 		aprint_error_dev(sc->sc_dev,
516 		    "could not get firmware handle %s\n", sc->sc_fwname);
517 		return err;
518 	}
519 
520 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
521 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
522 		fw->fw_rawdata = NULL;
523 	}
524 
525 	fw->fw_rawsize = firmware_get_size(fwh);
526 	/*
527 	 * Well, this is how the Linux driver checks it ....
528 	 */
529 	if (fw->fw_rawsize < sizeof(uint32_t)) {
530 		aprint_error_dev(sc->sc_dev,
531 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
532 		err = EINVAL;
533 		goto out;
534 	}
535 
536 	/* Read the firmware. */
537 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
538 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
539 	if (err) {
540 		aprint_error_dev(sc->sc_dev,
541 		    "could not read firmware %s\n", sc->sc_fwname);
542 		goto out;
543 	}
544 
545 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
546  out:
547 	/* caller will release memory, if necessary */
548 
549 	firmware_close(fwh);
550 	return err;
551 }
552 
553 /*
554  * just maintaining status quo.
555  */
556 static void
557 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
558 {
559 	struct ieee80211com *ic = &sc->sc_ic;
560 	struct ieee80211_frame *wh;
561 	uint8_t subtype;
562 
563 	wh = mtod(m, struct ieee80211_frame *);
564 
565 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
566 		return;
567 
568 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
569 
570 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
571 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
572 		return;
573 
574 	int chan = le32toh(sc->sc_last_phy_info.channel);
575 	if (chan < __arraycount(ic->ic_channels))
576 		ic->ic_curchan = &ic->ic_channels[chan];
577 }
578 
579 static int
580 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
581 {
582 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
583 
584 	if (dlen < sizeof(*l) ||
585 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
586 		return EINVAL;
587 
588 	/* we don't actually store anything for now, always use s/w crypto */
589 
590 	return 0;
591 }
592 
593 static int
594 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
595     uint8_t *data, size_t dlen)
596 {
597 	struct iwm_fw_sects *fws;
598 	struct iwm_fw_onesect *fwone;
599 
600 	if (type >= IWM_UCODE_TYPE_MAX)
601 		return EINVAL;
602 	if (dlen < sizeof(uint32_t))
603 		return EINVAL;
604 
605 	fws = &sc->sc_fw.fw_sects[type];
606 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
607 		return EINVAL;
608 
609 	fwone = &fws->fw_sect[fws->fw_count];
610 
611 	/* first 32bit are device load offset */
612 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
613 
614 	/* rest is data */
615 	fwone->fws_data = data + sizeof(uint32_t);
616 	fwone->fws_len = dlen - sizeof(uint32_t);
617 
618 	/* for freeing the buffer during driver unload */
619 	fwone->fws_alloc = data;
620 	fwone->fws_allocsize = dlen;
621 
622 	fws->fw_count++;
623 	fws->fw_totlen += fwone->fws_len;
624 
625 	return 0;
626 }
627 
628 struct iwm_tlv_calib_data {
629 	uint32_t ucode_type;
630 	struct iwm_tlv_calib_ctrl calib;
631 } __packed;
632 
633 static int
634 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
635 {
636 	const struct iwm_tlv_calib_data *def_calib = data;
637 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
638 
639 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
640 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
641 		    DEVNAME(sc), ucode_type));
642 		return EINVAL;
643 	}
644 
645 	sc->sc_default_calib[ucode_type].flow_trigger =
646 	    def_calib->calib.flow_trigger;
647 	sc->sc_default_calib[ucode_type].event_trigger =
648 	    def_calib->calib.event_trigger;
649 
650 	return 0;
651 }
652 
653 static int
654 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
655 {
656 	struct iwm_fw_info *fw = &sc->sc_fw;
657 	struct iwm_tlv_ucode_header *uhdr;
658 	struct iwm_ucode_tlv tlv;
659 	enum iwm_ucode_tlv_type tlv_type;
660 	uint8_t *data;
661 	int err, status;
662 	size_t len;
663 
664 	if (ucode_type != IWM_UCODE_TYPE_INIT &&
665 	    fw->fw_status == IWM_FW_STATUS_DONE)
666 		return 0;
667 
668 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
669 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
670 	} else {
671 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
672 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
673 	}
674 	status = fw->fw_status;
675 
676 	if (status == IWM_FW_STATUS_DONE)
677 		return 0;
678 
679 	err = iwm_firmload(sc);
680 	if (err) {
681 		aprint_error_dev(sc->sc_dev,
682 		    "could not read firmware %s (error %d)\n",
683 		    sc->sc_fwname, err);
684 		goto out;
685 	}
686 
687 	sc->sc_capaflags = 0;
688 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
689 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
690 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
691 
692 	uhdr = (void *)fw->fw_rawdata;
693 	if (*(uint32_t *)fw->fw_rawdata != 0
694 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
695 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
696 		    sc->sc_fwname);
697 		err = EINVAL;
698 		goto out;
699 	}
700 
701 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
702 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
703 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
704 	    IWM_UCODE_API(le32toh(uhdr->ver)));
705 	data = uhdr->data;
706 	len = fw->fw_rawsize - sizeof(*uhdr);
707 
708 	while (len >= sizeof(tlv)) {
709 		size_t tlv_len;
710 		void *tlv_data;
711 
712 		memcpy(&tlv, data, sizeof(tlv));
713 		tlv_len = le32toh(tlv.length);
714 		tlv_type = le32toh(tlv.type);
715 
716 		len -= sizeof(tlv);
717 		data += sizeof(tlv);
718 		tlv_data = data;
719 
720 		if (len < tlv_len) {
721 			aprint_error_dev(sc->sc_dev,
722 			    "firmware too short: %zu bytes\n", len);
723 			err = EINVAL;
724 			goto parse_out;
725 		}
726 
727 		switch (tlv_type) {
728 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
729 			if (tlv_len < sizeof(uint32_t)) {
730 				err = EINVAL;
731 				goto parse_out;
732 			}
733 			sc->sc_capa_max_probe_len
734 			    = le32toh(*(uint32_t *)tlv_data);
735 			/* limit it to something sensible */
736 			if (sc->sc_capa_max_probe_len >
737 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
738 				err = EINVAL;
739 				goto parse_out;
740 			}
741 			break;
742 		case IWM_UCODE_TLV_PAN:
743 			if (tlv_len) {
744 				err = EINVAL;
745 				goto parse_out;
746 			}
747 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
748 			break;
749 		case IWM_UCODE_TLV_FLAGS:
750 			if (tlv_len < sizeof(uint32_t)) {
751 				err = EINVAL;
752 				goto parse_out;
753 			}
754 			if (tlv_len % sizeof(uint32_t)) {
755 				err = EINVAL;
756 				goto parse_out;
757 			}
758 			/*
759 			 * Apparently there can be many flags, but Linux driver
760 			 * parses only the first one, and so do we.
761 			 *
762 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
763 			 * Intentional or a bug?  Observations from
764 			 * current firmware file:
765 			 *  1) TLV_PAN is parsed first
766 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
767 			 * ==> this resets TLV_PAN to itself... hnnnk
768 			 */
769 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
770 			break;
771 		case IWM_UCODE_TLV_CSCHEME:
772 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
773 			if (err)
774 				goto parse_out;
775 			break;
776 		case IWM_UCODE_TLV_NUM_OF_CPU: {
777 			uint32_t num_cpu;
778 			if (tlv_len != sizeof(uint32_t)) {
779 				err = EINVAL;
780 				goto parse_out;
781 			}
782 			num_cpu = le32toh(*(uint32_t *)tlv_data);
783 			if (num_cpu == 2) {
784 				fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
785 				    true;
786 				fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
787 				    true;
788 				fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
789 				    true;
790 			} else if (num_cpu < 1 || num_cpu > 2) {
791 				err = EINVAL;
792 				goto parse_out;
793 			}
794 			break;
795 		}
796 		case IWM_UCODE_TLV_SEC_RT:
797 			err = iwm_firmware_store_section(sc,
798 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
799 			if (err)
800 				goto parse_out;
801 			break;
802 		case IWM_UCODE_TLV_SEC_INIT:
803 			err = iwm_firmware_store_section(sc,
804 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
805 			if (err)
806 				goto parse_out;
807 			break;
808 		case IWM_UCODE_TLV_SEC_WOWLAN:
809 			err = iwm_firmware_store_section(sc,
810 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
811 			if (err)
812 				goto parse_out;
813 			break;
814 		case IWM_UCODE_TLV_DEF_CALIB:
815 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
816 				err = EINVAL;
817 				goto parse_out;
818 			}
819 			err = iwm_set_default_calib(sc, tlv_data);
820 			if (err)
821 				goto parse_out;
822 			break;
823 		case IWM_UCODE_TLV_PHY_SKU:
824 			if (tlv_len != sizeof(uint32_t)) {
825 				err = EINVAL;
826 				goto parse_out;
827 			}
828 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
829 			break;
830 
831 		case IWM_UCODE_TLV_API_CHANGES_SET: {
832 			struct iwm_ucode_api *api;
833 			uint32_t idx, bits;
834 			int i;
835 			if (tlv_len != sizeof(*api)) {
836 				err = EINVAL;
837 				goto parse_out;
838 			}
839 			api = (struct iwm_ucode_api *)tlv_data;
840 			idx = le32toh(api->api_index);
841 			bits = le32toh(api->api_flags);
842 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
843 				err = EINVAL;
844 				goto parse_out;
845 			}
846 			for (i = 0; i < 32; i++) {
847 				if (!ISSET(bits, __BIT(i)))
848 					continue;
849 				setbit(sc->sc_ucode_api, i + (32 * idx));
850 			}
851 			break;
852 		}
853 
854 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
855 			struct iwm_ucode_capa *capa;
856 			uint32_t idx, bits;
857 			int i;
858 			if (tlv_len != sizeof(*capa)) {
859 				err = EINVAL;
860 				goto parse_out;
861 			}
862 			capa = (struct iwm_ucode_capa *)tlv_data;
863 			idx = le32toh(capa->api_index);
864 			bits = le32toh(capa->api_capa);
865 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
866 				err = EINVAL;
867 				goto parse_out;
868 			}
869 			for (i = 0; i < 32; i++) {
870 				if (!ISSET(bits, __BIT(i)))
871 					continue;
872 				setbit(sc->sc_enabled_capa, i + (32 * idx));
873 			}
874 			break;
875 		}
876 
877 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
878 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
879 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
880 		case IWM_UCODE_TLV_FW_MEM_SEG:
881 			/* ignore, not used by current driver */
882 			break;
883 
884 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
885 			err = iwm_firmware_store_section(sc,
886 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
887 			    tlv_len);
888 			if (err)
889 				goto parse_out;
890 			break;
891 
892 		case IWM_UCODE_TLV_PAGING: {
893 			uint32_t paging_mem_size;
894 			if (tlv_len != sizeof(paging_mem_size)) {
895 				err = EINVAL;
896 				goto parse_out;
897 			}
898 			paging_mem_size = le32toh(*(uint32_t *)tlv_data);
899 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
900 				err = EINVAL;
901 				goto parse_out;
902 			}
903 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
904 				err = EINVAL;
905 				goto parse_out;
906 			}
907 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
908 			    paging_mem_size;
909 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
910 			    paging_mem_size;
911 			break;
912 		}
913 
914 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
915 			if (tlv_len != sizeof(uint32_t)) {
916 				err = EINVAL;
917 				goto parse_out;
918 			}
919 			sc->sc_capa_n_scan_channels =
920 			  le32toh(*(uint32_t *)tlv_data);
921 			break;
922 
923 		case IWM_UCODE_TLV_FW_VERSION:
924 			if (tlv_len != sizeof(uint32_t) * 3) {
925 				err = EINVAL;
926 				goto parse_out;
927 			}
928 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
929 			    "%d.%d.%d",
930 			    le32toh(((uint32_t *)tlv_data)[0]),
931 			    le32toh(((uint32_t *)tlv_data)[1]),
932 			    le32toh(((uint32_t *)tlv_data)[2]));
933 			break;
934 
935 		default:
936 			DPRINTF(("%s: unknown firmware section %d, abort\n",
937 			    DEVNAME(sc), tlv_type));
938 			err = EINVAL;
939 			goto parse_out;
940 		}
941 
942 		len -= roundup(tlv_len, 4);
943 		data += roundup(tlv_len, 4);
944 	}
945 
946 	KASSERT(err == 0);
947 
948  parse_out:
949 	if (err) {
950 		aprint_error_dev(sc->sc_dev,
951 		    "firmware parse error, section type %d\n", tlv_type);
952 	}
953 
954 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
955 		aprint_error_dev(sc->sc_dev,
956 		    "device uses unsupported power ops\n");
957 		err = ENOTSUP;
958 	}
959 
960  out:
961 	if (err)
962 		fw->fw_status = IWM_FW_STATUS_NONE;
963 	else
964 		fw->fw_status = IWM_FW_STATUS_DONE;
965 	wakeup(&sc->sc_fw);
966 
967 	if (err && fw->fw_rawdata != NULL) {
968 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
969 		fw->fw_rawdata = NULL;
970 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
971 		/* don't touch fw->fw_status */
972 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
973 	}
974 	return err;
975 }
976 
977 static uint32_t
978 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
979 {
980 	IWM_WRITE(sc,
981 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
982 	IWM_BARRIER_READ_WRITE(sc);
983 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
984 }
985 
986 static void
987 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
988 {
989 	IWM_WRITE(sc,
990 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
991 	IWM_BARRIER_WRITE(sc);
992 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
993 }
994 
995 #ifdef IWM_DEBUG
996 static int
997 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
998 {
999 	int offs;
1000 	uint32_t *vals = buf;
1001 
1002 	if (iwm_nic_lock(sc)) {
1003 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1004 		for (offs = 0; offs < dwords; offs++)
1005 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1006 		iwm_nic_unlock(sc);
1007 		return 0;
1008 	}
1009 	return EBUSY;
1010 }
1011 #endif
1012 
1013 static int
1014 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1015 {
1016 	int offs;
1017 	const uint32_t *vals = buf;
1018 
1019 	if (iwm_nic_lock(sc)) {
1020 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1021 		/* WADDR auto-increments */
1022 		for (offs = 0; offs < dwords; offs++) {
1023 			uint32_t val = vals ? vals[offs] : 0;
1024 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1025 		}
1026 		iwm_nic_unlock(sc);
1027 		return 0;
1028 	}
1029 	return EBUSY;
1030 }
1031 
1032 static int
1033 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1034 {
1035 	return iwm_write_mem(sc, addr, &val, 1);
1036 }
1037 
1038 static int
1039 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1040     int timo)
1041 {
1042 	for (;;) {
1043 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1044 			return 1;
1045 		}
1046 		if (timo < 10) {
1047 			return 0;
1048 		}
1049 		timo -= 10;
1050 		DELAY(10);
1051 	}
1052 }
1053 
1054 static int
1055 iwm_nic_lock(struct iwm_softc *sc)
1056 {
1057 	int rv = 0;
1058 
1059 	if (sc->sc_cmd_hold_nic_awake)
1060 		return 1;
1061 
1062 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1063 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1064 
1065 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1066 		DELAY(2);
1067 
1068 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1069 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1070 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1071 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1072 		rv = 1;
1073 	} else {
1074 		DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1075 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1076 	}
1077 
1078 	return rv;
1079 }
1080 
1081 static void
1082 iwm_nic_unlock(struct iwm_softc *sc)
1083 {
1084 
1085 	if (sc->sc_cmd_hold_nic_awake)
1086 		return;
1087 
1088 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1089 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1090 }
1091 
1092 static void
1093 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1094     uint32_t mask)
1095 {
1096 	uint32_t val;
1097 
1098 	/* XXX: no error path? */
1099 	if (iwm_nic_lock(sc)) {
1100 		val = iwm_read_prph(sc, reg) & mask;
1101 		val |= bits;
1102 		iwm_write_prph(sc, reg, val);
1103 		iwm_nic_unlock(sc);
1104 	}
1105 }
1106 
1107 static void
1108 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1109 {
1110 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1111 }
1112 
1113 static void
1114 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1115 {
1116 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1117 }
1118 
1119 static int
1120 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1121     bus_size_t size, bus_size_t alignment)
1122 {
1123 	int nsegs, err;
1124 	void *va;
1125 
1126 	dma->tag = tag;
1127 	dma->size = size;
1128 
1129 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1130 	    &dma->map);
1131 	if (err)
1132 		goto fail;
1133 
1134 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1135 	    BUS_DMA_NOWAIT);
1136 	if (err)
1137 		goto fail;
1138 
1139 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1140 	if (err)
1141 		goto fail;
1142 	dma->vaddr = va;
1143 
1144 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1145 	    BUS_DMA_NOWAIT);
1146 	if (err)
1147 		goto fail;
1148 
1149 	memset(dma->vaddr, 0, size);
1150 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1151 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1152 
1153 	return 0;
1154 
1155 fail:	iwm_dma_contig_free(dma);
1156 	return err;
1157 }
1158 
1159 static void
1160 iwm_dma_contig_free(struct iwm_dma_info *dma)
1161 {
1162 	if (dma->map != NULL) {
1163 		if (dma->vaddr != NULL) {
1164 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1165 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1166 			bus_dmamap_unload(dma->tag, dma->map);
1167 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1168 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1169 			dma->vaddr = NULL;
1170 		}
1171 		bus_dmamap_destroy(dma->tag, dma->map);
1172 		dma->map = NULL;
1173 	}
1174 }
1175 
1176 static int
1177 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1178 {
1179 	bus_size_t size;
1180 	int i, err;
1181 
1182 	ring->cur = 0;
1183 
1184 	/* Allocate RX descriptors (256-byte aligned). */
1185 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1186 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1187 	if (err) {
1188 		aprint_error_dev(sc->sc_dev,
1189 		    "could not allocate RX ring DMA memory\n");
1190 		goto fail;
1191 	}
1192 	ring->desc = ring->desc_dma.vaddr;
1193 
1194 	/* Allocate RX status area (16-byte aligned). */
1195 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1196 	    sizeof(*ring->stat), 16);
1197 	if (err) {
1198 		aprint_error_dev(sc->sc_dev,
1199 		    "could not allocate RX status DMA memory\n");
1200 		goto fail;
1201 	}
1202 	ring->stat = ring->stat_dma.vaddr;
1203 
1204 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1205 		struct iwm_rx_data *data = &ring->data[i];
1206 
1207 		memset(data, 0, sizeof(*data));
1208 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1209 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1210 		    &data->map);
1211 		if (err) {
1212 			aprint_error_dev(sc->sc_dev,
1213 			    "could not create RX buf DMA map\n");
1214 			goto fail;
1215 		}
1216 
1217 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1218 		if (err)
1219 			goto fail;
1220 	}
1221 	return 0;
1222 
1223 fail:	iwm_free_rx_ring(sc, ring);
1224 	return err;
1225 }
1226 
1227 static void
1228 iwm_disable_rx_dma(struct iwm_softc *sc)
1229 {
1230 	int ntries;
1231 
1232 	if (iwm_nic_lock(sc)) {
1233 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1234 		for (ntries = 0; ntries < 1000; ntries++) {
1235 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1236 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1237 				break;
1238 			DELAY(10);
1239 		}
1240 		iwm_nic_unlock(sc);
1241 	}
1242 }
1243 
1244 void
1245 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1246 {
1247 	ring->cur = 0;
1248 	memset(ring->stat, 0, sizeof(*ring->stat));
1249 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1250 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1251 }
1252 
1253 static void
1254 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1255 {
1256 	int i;
1257 
1258 	iwm_dma_contig_free(&ring->desc_dma);
1259 	iwm_dma_contig_free(&ring->stat_dma);
1260 
1261 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1262 		struct iwm_rx_data *data = &ring->data[i];
1263 
1264 		if (data->m != NULL) {
1265 			bus_size_t sz = data->m->m_pkthdr.len;
1266 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1267 			    sz, BUS_DMASYNC_POSTREAD);
1268 			bus_dmamap_unload(sc->sc_dmat, data->map);
1269 			m_freem(data->m);
1270 			data->m = NULL;
1271 		}
1272 		if (data->map != NULL) {
1273 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1274 			data->map = NULL;
1275 		}
1276 	}
1277 }
1278 
1279 static int
1280 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1281 {
1282 	bus_addr_t paddr;
1283 	bus_size_t size;
1284 	int i, err, nsegs;
1285 
1286 	ring->qid = qid;
1287 	ring->queued = 0;
1288 	ring->cur = 0;
1289 
1290 	/* Allocate TX descriptors (256-byte aligned). */
1291 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1292 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1293 	if (err) {
1294 		aprint_error_dev(sc->sc_dev,
1295 		    "could not allocate TX ring DMA memory\n");
1296 		goto fail;
1297 	}
1298 	ring->desc = ring->desc_dma.vaddr;
1299 
1300 	/*
1301 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1302 	 * to allocate commands space for other rings.
1303 	 */
1304 	if (qid > IWM_CMD_QUEUE)
1305 		return 0;
1306 
1307 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1308 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1309 	if (err) {
1310 		aprint_error_dev(sc->sc_dev,
1311 		    "could not allocate TX cmd DMA memory\n");
1312 		goto fail;
1313 	}
1314 	ring->cmd = ring->cmd_dma.vaddr;
1315 
1316 	paddr = ring->cmd_dma.paddr;
1317 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1318 		struct iwm_tx_data *data = &ring->data[i];
1319 		size_t mapsize;
1320 
1321 		data->cmd_paddr = paddr;
1322 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1323 		    + offsetof(struct iwm_tx_cmd, scratch);
1324 		paddr += sizeof(struct iwm_device_cmd);
1325 
1326 		/* FW commands may require more mapped space than packets. */
1327 		if (qid == IWM_CMD_QUEUE) {
1328 			mapsize = IWM_RBUF_SIZE;
1329 			nsegs = 1;
1330 		} else {
1331 			mapsize = MCLBYTES;
1332 			nsegs = IWM_NUM_OF_TBS - 2;
1333 		}
1334 		err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1335 		    0, BUS_DMA_NOWAIT, &data->map);
1336 		if (err) {
1337 			aprint_error_dev(sc->sc_dev,
1338 			    "could not create TX buf DMA map\n");
1339 			goto fail;
1340 		}
1341 	}
1342 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1343 	return 0;
1344 
1345 fail:	iwm_free_tx_ring(sc, ring);
1346 	return err;
1347 }
1348 
1349 static void
1350 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1351 {
1352 
1353 	if (!sc->apmg_wake_up_wa)
1354 		return;
1355 
1356 	if (!sc->sc_cmd_hold_nic_awake) {
1357 		aprint_error_dev(sc->sc_dev,
1358 		    "cmd_hold_nic_awake not set\n");
1359 		return;
1360 	}
1361 
1362 	sc->sc_cmd_hold_nic_awake = 0;
1363 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1364 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1365 }
1366 
1367 static int
1368 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1369 {
1370 	int ret;
1371 
1372 	/*
1373 	 * wake up the NIC to make sure that the firmware will see the host
1374 	 * command - we will let the NIC sleep once all the host commands
1375 	 * returned. This needs to be done only on NICs that have
1376 	 * apmg_wake_up_wa set.
1377 	 */
1378 	if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1379 
1380 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1381 		    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1382 
1383 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1384 		    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1385 		    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1386 		     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1387 		    15000);
1388 		if (ret == 0) {
1389 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1390 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1391 			aprint_error_dev(sc->sc_dev,
1392 			    "failed to wake NIC for hcmd\n");
1393 			return EIO;
1394 		}
1395 		sc->sc_cmd_hold_nic_awake = 1;
1396 	}
1397 
1398 	return 0;
1399 }
1400 static void
1401 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1402 {
1403 	int i;
1404 
1405 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1406 		struct iwm_tx_data *data = &ring->data[i];
1407 
1408 		if (data->m != NULL) {
1409 			bus_size_t sz = data->m->m_pkthdr.len;
1410 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1411 			    sz, BUS_DMASYNC_POSTWRITE);
1412 			bus_dmamap_unload(sc->sc_dmat, data->map);
1413 			m_freem(data->m);
1414 			data->m = NULL;
1415 		}
1416 	}
1417 	/* Clear TX descriptors. */
1418 	memset(ring->desc, 0, ring->desc_dma.size);
1419 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1420 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1421 	sc->qfullmsk &= ~(1 << ring->qid);
1422 	ring->queued = 0;
1423 	ring->cur = 0;
1424 
1425 	if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1426 		iwm_clear_cmd_in_flight(sc);
1427 }
1428 
1429 static void
1430 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1431 {
1432 	int i;
1433 
1434 	iwm_dma_contig_free(&ring->desc_dma);
1435 	iwm_dma_contig_free(&ring->cmd_dma);
1436 
1437 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1438 		struct iwm_tx_data *data = &ring->data[i];
1439 
1440 		if (data->m != NULL) {
1441 			bus_size_t sz = data->m->m_pkthdr.len;
1442 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1443 			    sz, BUS_DMASYNC_POSTWRITE);
1444 			bus_dmamap_unload(sc->sc_dmat, data->map);
1445 			m_freem(data->m);
1446 			data->m = NULL;
1447 		}
1448 		if (data->map != NULL) {
1449 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1450 			data->map = NULL;
1451 		}
1452 	}
1453 }
1454 
1455 static void
1456 iwm_enable_rfkill_int(struct iwm_softc *sc)
1457 {
1458 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1459 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1460 }
1461 
1462 static int
1463 iwm_check_rfkill(struct iwm_softc *sc)
1464 {
1465 	uint32_t v;
1466 	int s;
1467 	int rv;
1468 
1469 	s = splnet();
1470 
1471 	/*
1472 	 * "documentation" is not really helpful here:
1473 	 *  27:	HW_RF_KILL_SW
1474 	 *	Indicates state of (platform's) hardware RF-Kill switch
1475 	 *
1476 	 * But apparently when it's off, it's on ...
1477 	 */
1478 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1479 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1480 	if (rv) {
1481 		sc->sc_flags |= IWM_FLAG_RFKILL;
1482 	} else {
1483 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1484 	}
1485 
1486 	splx(s);
1487 	return rv;
1488 }
1489 
1490 static void
1491 iwm_enable_interrupts(struct iwm_softc *sc)
1492 {
1493 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1494 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1495 }
1496 
1497 static void
1498 iwm_restore_interrupts(struct iwm_softc *sc)
1499 {
1500 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1501 }
1502 
1503 static void
1504 iwm_disable_interrupts(struct iwm_softc *sc)
1505 {
1506 	int s = splnet();
1507 
1508 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1509 
1510 	/* acknowledge all interrupts */
1511 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1512 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1513 
1514 	splx(s);
1515 }
1516 
1517 static void
1518 iwm_ict_reset(struct iwm_softc *sc)
1519 {
1520 	iwm_disable_interrupts(sc);
1521 
1522 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1523 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, sc->ict_dma.size,
1524 	    BUS_DMASYNC_PREWRITE);
1525 	sc->ict_cur = 0;
1526 
1527 	/* Set physical address of ICT (4KB aligned). */
1528 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1529 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1530 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1531 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1532 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1533 
1534 	/* Switch to ICT interrupt mode in driver. */
1535 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1536 
1537 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1538 	iwm_enable_interrupts(sc);
1539 }
1540 
1541 #define IWM_HW_READY_TIMEOUT 50
1542 static int
1543 iwm_set_hw_ready(struct iwm_softc *sc)
1544 {
1545 	int ready;
1546 
1547 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1548 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1549 
1550 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1551 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1552 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1553 	    IWM_HW_READY_TIMEOUT);
1554 	if (ready)
1555 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1556 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1557 
1558 	return ready;
1559 }
1560 #undef IWM_HW_READY_TIMEOUT
1561 
1562 static int
1563 iwm_prepare_card_hw(struct iwm_softc *sc)
1564 {
1565 	int t = 0;
1566 
1567 	if (iwm_set_hw_ready(sc))
1568 		return 0;
1569 
1570 	DELAY(100);
1571 
1572 	/* If HW is not ready, prepare the conditions to check again */
1573 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1574 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1575 
1576 	do {
1577 		if (iwm_set_hw_ready(sc))
1578 			return 0;
1579 		DELAY(200);
1580 		t += 200;
1581 	} while (t < 150000);
1582 
1583 	return ETIMEDOUT;
1584 }
1585 
1586 static void
1587 iwm_apm_config(struct iwm_softc *sc)
1588 {
1589 	pcireg_t reg;
1590 
1591 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1592 	    sc->sc_cap_off + PCIE_LCSR);
1593 	if (reg & PCIE_LCSR_ASPM_L1) {
1594 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1595 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1596 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1597 	} else {
1598 		/* ... and "Enabling" here */
1599 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1600 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1601 	}
1602 }
1603 
1604 /*
1605  * Start up NIC's basic functionality after it has been reset
1606  * e.g. after platform boot or shutdown.
1607  * NOTE:  This does not load uCode nor start the embedded processor
1608  */
1609 static int
1610 iwm_apm_init(struct iwm_softc *sc)
1611 {
1612 	int err = 0;
1613 
1614 	/* Disable L0S exit timer (platform NMI workaround) */
1615 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1616 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1617 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1618 	}
1619 
1620 	/*
1621 	 * Disable L0s without affecting L1;
1622 	 *  don't wait for ICH L0s (ICH bug W/A)
1623 	 */
1624 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1625 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1626 
1627 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1628 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1629 
1630 	/*
1631 	 * Enable HAP INTA (interrupt from management bus) to
1632 	 * wake device's PCI Express link L1a -> L0s
1633 	 */
1634 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1635 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1636 
1637 	iwm_apm_config(sc);
1638 
1639 #if 0 /* not for 7k/8k */
1640 	/* Configure analog phase-lock-loop before activating to D0A */
1641 	if (trans->cfg->base_params->pll_cfg_val)
1642 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1643 		    trans->cfg->base_params->pll_cfg_val);
1644 #endif
1645 
1646 	/*
1647 	 * Set "initialization complete" bit to move adapter from
1648 	 * D0U* --> D0A* (powered-up active) state.
1649 	 */
1650 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1651 
1652 	/*
1653 	 * Wait for clock stabilization; once stabilized, access to
1654 	 * device-internal resources is supported, e.g. iwm_write_prph()
1655 	 * and accesses to uCode SRAM.
1656 	 */
1657 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1658 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1659 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1660 		aprint_error_dev(sc->sc_dev,
1661 		    "timeout waiting for clock stabilization\n");
1662 		err = ETIMEDOUT;
1663 		goto out;
1664 	}
1665 
1666 	if (sc->host_interrupt_operation_mode) {
1667 		/*
1668 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1669 		 * only check host_interrupt_operation_mode even if this is
1670 		 * not related to host_interrupt_operation_mode.
1671 		 *
1672 		 * Enable the oscillator to count wake up time for L1 exit. This
1673 		 * consumes slightly more power (100uA) - but allows to be sure
1674 		 * that we wake up from L1 on time.
1675 		 *
1676 		 * This looks weird: read twice the same register, discard the
1677 		 * value, set a bit, and yet again, read that same register
1678 		 * just to discard the value. But that's the way the hardware
1679 		 * seems to like it.
1680 		 */
1681 		iwm_read_prph(sc, IWM_OSC_CLK);
1682 		iwm_read_prph(sc, IWM_OSC_CLK);
1683 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1684 		iwm_read_prph(sc, IWM_OSC_CLK);
1685 		iwm_read_prph(sc, IWM_OSC_CLK);
1686 	}
1687 
1688 	/*
1689 	 * Enable DMA clock and wait for it to stabilize.
1690 	 *
1691 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1692 	 * do not disable clocks.  This preserves any hardware bits already
1693 	 * set by default in "CLK_CTRL_REG" after reset.
1694 	 */
1695 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1696 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1697 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1698 		DELAY(20);
1699 
1700 		/* Disable L1-Active */
1701 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1702 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1703 
1704 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1705 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1706 		    IWM_APMG_RTC_INT_STT_RFKILL);
1707 	}
1708  out:
1709 	if (err)
1710 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1711 	return err;
1712 }
1713 
1714 static void
1715 iwm_apm_stop(struct iwm_softc *sc)
1716 {
1717 	/* stop device's busmaster DMA activity */
1718 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1719 
1720 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1721 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1722 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1723 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1724 	DPRINTF(("iwm apm stop\n"));
1725 }
1726 
1727 static int
1728 iwm_start_hw(struct iwm_softc *sc)
1729 {
1730 	int err;
1731 
1732 	err = iwm_prepare_card_hw(sc);
1733 	if (err)
1734 		return err;
1735 
1736 	/* Reset the entire device */
1737 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1738 	DELAY(10);
1739 
1740 	err = iwm_apm_init(sc);
1741 	if (err)
1742 		return err;
1743 
1744 	iwm_enable_rfkill_int(sc);
1745 	iwm_check_rfkill(sc);
1746 
1747 	return 0;
1748 }
1749 
1750 static void
1751 iwm_stop_device(struct iwm_softc *sc)
1752 {
1753 	int chnl, ntries;
1754 	int qid;
1755 
1756 	iwm_disable_interrupts(sc);
1757 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1758 
1759 	/* Deactivate TX scheduler. */
1760 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1761 
1762 	/* Stop all DMA channels. */
1763 	if (iwm_nic_lock(sc)) {
1764 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1765 			IWM_WRITE(sc,
1766 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1767 			for (ntries = 0; ntries < 200; ntries++) {
1768 				uint32_t r;
1769 
1770 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1771 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1772 				    chnl))
1773 					break;
1774 				DELAY(20);
1775 			}
1776 		}
1777 		iwm_nic_unlock(sc);
1778 	}
1779 	iwm_disable_rx_dma(sc);
1780 
1781 	iwm_reset_rx_ring(sc, &sc->rxq);
1782 
1783 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1784 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1785 
1786 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1787 		/* Power-down device's busmaster DMA clocks */
1788 		if (iwm_nic_lock(sc)) {
1789 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1790 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1791 			DELAY(5);
1792 			iwm_nic_unlock(sc);
1793 		}
1794 	}
1795 
1796 	/* Make sure (redundant) we've released our request to stay awake */
1797 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1798 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1799 
1800 	/* Stop the device, and put it in low power state */
1801 	iwm_apm_stop(sc);
1802 
1803 	/*
1804 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1805 	 * Clean again the interrupt here
1806 	 */
1807 	iwm_disable_interrupts(sc);
1808 
1809 	/* Reset the on-board processor. */
1810 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1811 
1812 	/* Even though we stop the HW we still want the RF kill interrupt. */
1813 	iwm_enable_rfkill_int(sc);
1814 	iwm_check_rfkill(sc);
1815 }
1816 
1817 static void
1818 iwm_nic_config(struct iwm_softc *sc)
1819 {
1820 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1821 	uint32_t reg_val = 0;
1822 
1823 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1824 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1825 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1826 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1827 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1828 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1829 
1830 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1831 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1832 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1833 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1834 
1835 	/* radio configuration */
1836 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1837 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1838 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1839 
1840 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1841 
1842 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1843 	    radio_cfg_step, radio_cfg_dash));
1844 
1845 	/*
1846 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1847 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1848 	 * to lose ownership and not being able to obtain it back.
1849 	 */
1850 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1851 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1852 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1853 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1854 	}
1855 }
1856 
1857 static int
1858 iwm_nic_rx_init(struct iwm_softc *sc)
1859 {
1860 	if (!iwm_nic_lock(sc))
1861 		return EBUSY;
1862 
1863 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1864 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1865 	    0, sc->rxq.stat_dma.size,
1866 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1867 
1868 	iwm_disable_rx_dma(sc);
1869 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1870 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1871 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1872 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1873 
1874 	/* Set physical address of RX ring (256-byte aligned). */
1875 	IWM_WRITE(sc,
1876 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1877 
1878 	/* Set physical address of RX status (16-byte aligned). */
1879 	IWM_WRITE(sc,
1880 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1881 
1882 	/* Enable RX. */
1883 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1884 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1885 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1886 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1887 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1888 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1889 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1890 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1891 
1892 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1893 
1894 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1895 	if (sc->host_interrupt_operation_mode)
1896 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1897 
1898 	/*
1899 	 * This value should initially be 0 (before preparing any RBs),
1900 	 * and should be 8 after preparing the first 8 RBs (for example).
1901 	 */
1902 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1903 
1904 	iwm_nic_unlock(sc);
1905 
1906 	return 0;
1907 }
1908 
1909 static int
1910 iwm_nic_tx_init(struct iwm_softc *sc)
1911 {
1912 	int qid;
1913 
1914 	if (!iwm_nic_lock(sc))
1915 		return EBUSY;
1916 
1917 	/* Deactivate TX scheduler. */
1918 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1919 
1920 	/* Set physical address of "keep warm" page (16-byte aligned). */
1921 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1922 
1923 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1924 		struct iwm_tx_ring *txq = &sc->txq[qid];
1925 
1926 		/* Set physical address of TX ring (256-byte aligned). */
1927 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1928 		    txq->desc_dma.paddr >> 8);
1929 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1930 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1931 	}
1932 
1933 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1934 
1935 	iwm_nic_unlock(sc);
1936 
1937 	return 0;
1938 }
1939 
1940 static int
1941 iwm_nic_init(struct iwm_softc *sc)
1942 {
1943 	int err;
1944 
1945 	iwm_apm_init(sc);
1946 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1947 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1948 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1949 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1950 	}
1951 
1952 	iwm_nic_config(sc);
1953 
1954 	err = iwm_nic_rx_init(sc);
1955 	if (err)
1956 		return err;
1957 
1958 	err = iwm_nic_tx_init(sc);
1959 	if (err)
1960 		return err;
1961 
1962 	DPRINTF(("shadow registers enabled\n"));
1963 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1964 
1965 	return 0;
1966 }
1967 
1968 static const uint8_t iwm_ac_to_tx_fifo[] = {
1969 	IWM_TX_FIFO_VO,
1970 	IWM_TX_FIFO_VI,
1971 	IWM_TX_FIFO_BE,
1972 	IWM_TX_FIFO_BK,
1973 };
1974 
1975 static int
1976 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1977 {
1978 	if (!iwm_nic_lock(sc)) {
1979 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1980 		return EBUSY;
1981 	}
1982 
1983 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1984 
1985 	if (qid == IWM_CMD_QUEUE) {
1986 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1987 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1988 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1989 
1990 		iwm_nic_unlock(sc);
1991 
1992 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1993 
1994 		if (!iwm_nic_lock(sc))
1995 			return EBUSY;
1996 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1997 		iwm_nic_unlock(sc);
1998 
1999 		iwm_write_mem32(sc,
2000 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2001 
2002 		/* Set scheduler window size and frame limit. */
2003 		iwm_write_mem32(sc,
2004 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2005 		    sizeof(uint32_t),
2006 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2007 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2008 		    ((IWM_FRAME_LIMIT
2009 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2010 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2011 
2012 		if (!iwm_nic_lock(sc))
2013 			return EBUSY;
2014 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2015 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2016 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2017 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2018 		    IWM_SCD_QUEUE_STTS_REG_MSK);
2019 	} else {
2020 		struct iwm_scd_txq_cfg_cmd cmd;
2021 		int err;
2022 
2023 		iwm_nic_unlock(sc);
2024 
2025 		memset(&cmd, 0, sizeof(cmd));
2026 		cmd.scd_queue = qid;
2027 		cmd.enable = 1;
2028 		cmd.sta_id = sta_id;
2029 		cmd.tx_fifo = fifo;
2030 		cmd.aggregate = 0;
2031 		cmd.window = IWM_FRAME_LIMIT;
2032 
2033 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2034 		    &cmd);
2035 		if (err)
2036 			return err;
2037 
2038 		if (!iwm_nic_lock(sc))
2039 			return EBUSY;
2040 	}
2041 
2042 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2043 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2044 
2045 	iwm_nic_unlock(sc);
2046 
2047 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2048 
2049 	return 0;
2050 }
2051 
2052 static int
2053 iwm_post_alive(struct iwm_softc *sc)
2054 {
2055 	int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2056 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2057 	int err, chnl;
2058 	uint32_t base;
2059 
2060 	if (!iwm_nic_lock(sc))
2061 		return EBUSY;
2062 
2063 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2064 	if (sc->sched_base != base) {
2065 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2066 		    DEVNAME(sc), sc->sched_base, base));
2067 		sc->sched_base = base;
2068 	}
2069 
2070 	iwm_nic_unlock(sc);
2071 
2072 	iwm_ict_reset(sc);
2073 
2074 	/* Clear TX scheduler state in SRAM. */
2075 	err = iwm_write_mem(sc,
2076 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2077 	if (err)
2078 		return err;
2079 
2080 	if (!iwm_nic_lock(sc))
2081 		return EBUSY;
2082 
2083 	/* Set physical address of TX scheduler rings (1KB aligned). */
2084 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2085 
2086 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2087 
2088 	iwm_nic_unlock(sc);
2089 
2090 	/* enable command channel */
2091 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2092 	if (err)
2093 		return err;
2094 
2095 	if (!iwm_nic_lock(sc))
2096 		return EBUSY;
2097 
2098 	/* Activate TX scheduler. */
2099 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2100 
2101 	/* Enable DMA channels. */
2102 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2103 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2104 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2105 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2106 	}
2107 
2108 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2109 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2110 
2111 	/* Enable L1-Active */
2112 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2113 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2114 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2115 	}
2116 
2117 	iwm_nic_unlock(sc);
2118 
2119 	return 0;
2120 }
2121 
2122 static struct iwm_phy_db_entry *
2123 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2124     uint16_t chg_id)
2125 {
2126 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2127 
2128 	if (type >= IWM_PHY_DB_MAX)
2129 		return NULL;
2130 
2131 	switch (type) {
2132 	case IWM_PHY_DB_CFG:
2133 		return &phy_db->cfg;
2134 	case IWM_PHY_DB_CALIB_NCH:
2135 		return &phy_db->calib_nch;
2136 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2137 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2138 			return NULL;
2139 		return &phy_db->calib_ch_group_papd[chg_id];
2140 	case IWM_PHY_DB_CALIB_CHG_TXP:
2141 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2142 			return NULL;
2143 		return &phy_db->calib_ch_group_txp[chg_id];
2144 	default:
2145 		return NULL;
2146 	}
2147 	return NULL;
2148 }
2149 
2150 static int
2151 iwm_phy_db_set_section(struct iwm_softc *sc,
2152     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2153 {
2154 	struct iwm_phy_db_entry *entry;
2155 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2156 	uint16_t chg_id = 0;
2157 
2158 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2159 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2160 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2161 
2162 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2163 	if (!entry)
2164 		return EINVAL;
2165 
2166 	if (entry->data)
2167 		kmem_intr_free(entry->data, entry->size);
2168 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2169 	if (!entry->data) {
2170 		entry->size = 0;
2171 		return ENOMEM;
2172 	}
2173 	memcpy(entry->data, phy_db_notif->data, size);
2174 	entry->size = size;
2175 
2176 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2177 	    __func__, __LINE__, type, size, entry->data));
2178 
2179 	return 0;
2180 }
2181 
2182 static int
2183 iwm_is_valid_channel(uint16_t ch_id)
2184 {
2185 	if (ch_id <= 14 ||
2186 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2187 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2188 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2189 		return 1;
2190 	return 0;
2191 }
2192 
2193 static uint8_t
2194 iwm_ch_id_to_ch_index(uint16_t ch_id)
2195 {
2196 	if (!iwm_is_valid_channel(ch_id))
2197 		return 0xff;
2198 
2199 	if (ch_id <= 14)
2200 		return ch_id - 1;
2201 	if (ch_id <= 64)
2202 		return (ch_id + 20) / 4;
2203 	if (ch_id <= 140)
2204 		return (ch_id - 12) / 4;
2205 	return (ch_id - 13) / 4;
2206 }
2207 
2208 
2209 static uint16_t
2210 iwm_channel_id_to_papd(uint16_t ch_id)
2211 {
2212 	if (!iwm_is_valid_channel(ch_id))
2213 		return 0xff;
2214 
2215 	if (1 <= ch_id && ch_id <= 14)
2216 		return 0;
2217 	if (36 <= ch_id && ch_id <= 64)
2218 		return 1;
2219 	if (100 <= ch_id && ch_id <= 140)
2220 		return 2;
2221 	return 3;
2222 }
2223 
2224 static uint16_t
2225 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2226 {
2227 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2228 	struct iwm_phy_db_chg_txp *txp_chg;
2229 	int i;
2230 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2231 
2232 	if (ch_index == 0xff)
2233 		return 0xff;
2234 
2235 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2236 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2237 		if (!txp_chg)
2238 			return 0xff;
2239 		/*
2240 		 * Looking for the first channel group the max channel
2241 		 * of which is higher than the requested channel.
2242 		 */
2243 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2244 			return i;
2245 	}
2246 	return 0xff;
2247 }
2248 
2249 static int
2250 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2251     uint16_t *size, uint16_t ch_id)
2252 {
2253 	struct iwm_phy_db_entry *entry;
2254 	uint16_t ch_group_id = 0;
2255 
2256 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2257 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2258 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2259 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2260 
2261 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2262 	if (!entry)
2263 		return EINVAL;
2264 
2265 	*data = entry->data;
2266 	*size = entry->size;
2267 
2268 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2269 		       __func__, __LINE__, type, *size));
2270 
2271 	return 0;
2272 }
2273 
2274 static int
2275 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2276     void *data)
2277 {
2278 	struct iwm_phy_db_cmd phy_db_cmd;
2279 	struct iwm_host_cmd cmd = {
2280 		.id = IWM_PHY_DB_CMD,
2281 		.flags = IWM_CMD_ASYNC,
2282 	};
2283 
2284 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2285 	    type, length));
2286 
2287 	phy_db_cmd.type = le16toh(type);
2288 	phy_db_cmd.length = le16toh(length);
2289 
2290 	cmd.data[0] = &phy_db_cmd;
2291 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2292 	cmd.data[1] = data;
2293 	cmd.len[1] = length;
2294 
2295 	return iwm_send_cmd(sc, &cmd);
2296 }
2297 
2298 static int
2299 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2300     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2301 {
2302 	uint16_t i;
2303 	int err;
2304 	struct iwm_phy_db_entry *entry;
2305 
2306 	/* Send all the channel-specific groups to operational fw */
2307 	for (i = 0; i < max_ch_groups; i++) {
2308 		entry = iwm_phy_db_get_section(sc, type, i);
2309 		if (!entry)
2310 			return EINVAL;
2311 
2312 		if (!entry->size)
2313 			continue;
2314 
2315 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2316 		if (err) {
2317 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2318 			    "err %d\n", DEVNAME(sc), type, i, err));
2319 			return err;
2320 		}
2321 
2322 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2323 		    DEVNAME(sc), type, i));
2324 
2325 		DELAY(1000);
2326 	}
2327 
2328 	return 0;
2329 }
2330 
2331 static int
2332 iwm_send_phy_db_data(struct iwm_softc *sc)
2333 {
2334 	uint8_t *data = NULL;
2335 	uint16_t size = 0;
2336 	int err;
2337 
2338 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2339 	if (err)
2340 		return err;
2341 
2342 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2343 	if (err)
2344 		return err;
2345 
2346 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2347 	    &data, &size, 0);
2348 	if (err)
2349 		return err;
2350 
2351 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2352 	if (err)
2353 		return err;
2354 
2355 	err = iwm_phy_db_send_all_channel_groups(sc,
2356 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2357 	if (err)
2358 		return err;
2359 
2360 	err = iwm_phy_db_send_all_channel_groups(sc,
2361 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2362 	if (err)
2363 		return err;
2364 
2365 	return 0;
2366 }
2367 
2368 /*
2369  * For the high priority TE use a time event type that has similar priority to
2370  * the FW's action scan priority.
2371  */
2372 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2373 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2374 
2375 /* used to convert from time event API v2 to v1 */
2376 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2377 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2378 static inline uint16_t
2379 iwm_te_v2_get_notify(uint16_t policy)
2380 {
2381 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2382 }
2383 
2384 static inline uint16_t
2385 iwm_te_v2_get_dep_policy(uint16_t policy)
2386 {
2387 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2388 		IWM_TE_V2_PLACEMENT_POS;
2389 }
2390 
2391 static inline uint16_t
2392 iwm_te_v2_get_absence(uint16_t policy)
2393 {
2394 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2395 }
2396 
2397 static void
2398 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2399     struct iwm_time_event_cmd_v1 *cmd_v1)
2400 {
2401 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2402 	cmd_v1->action = cmd_v2->action;
2403 	cmd_v1->id = cmd_v2->id;
2404 	cmd_v1->apply_time = cmd_v2->apply_time;
2405 	cmd_v1->max_delay = cmd_v2->max_delay;
2406 	cmd_v1->depends_on = cmd_v2->depends_on;
2407 	cmd_v1->interval = cmd_v2->interval;
2408 	cmd_v1->duration = cmd_v2->duration;
2409 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2410 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2411 	else
2412 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2413 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2414 	cmd_v1->interval_reciprocal = 0; /* unused */
2415 
2416 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2417 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2418 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2419 }
2420 
2421 static int
2422 iwm_send_time_event_cmd(struct iwm_softc *sc,
2423     const struct iwm_time_event_cmd_v2 *cmd)
2424 {
2425 	struct iwm_time_event_cmd_v1 cmd_v1;
2426 
2427 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2428 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2429 		    cmd);
2430 
2431 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2432 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2433 	    &cmd_v1);
2434 }
2435 
2436 static void
2437 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2438     uint32_t duration, uint32_t max_delay)
2439 {
2440 	struct iwm_time_event_cmd_v2 time_cmd;
2441 
2442 	memset(&time_cmd, 0, sizeof(time_cmd));
2443 
2444 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2445 	time_cmd.id_and_color =
2446 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2447 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2448 
2449 	time_cmd.apply_time = htole32(0);
2450 
2451 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2452 	time_cmd.max_delay = htole32(max_delay);
2453 	/* TODO: why do we need to interval = bi if it is not periodic? */
2454 	time_cmd.interval = htole32(1);
2455 	time_cmd.duration = htole32(duration);
2456 	time_cmd.repeat = 1;
2457 	time_cmd.policy
2458 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2459 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2460 		IWM_T2_V2_START_IMMEDIATELY);
2461 
2462 	iwm_send_time_event_cmd(sc, &time_cmd);
2463 }
2464 
2465 /*
2466  * NVM read access and content parsing.  We do not support
2467  * external NVM or writing NVM.
2468  */
2469 
2470 /* list of NVM sections we are allowed/need to read */
2471 static const int iwm_nvm_to_read[] = {
2472 	IWM_NVM_SECTION_TYPE_HW,
2473 	IWM_NVM_SECTION_TYPE_SW,
2474 	IWM_NVM_SECTION_TYPE_REGULATORY,
2475 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2476 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2477 	IWM_NVM_SECTION_TYPE_HW_8000,
2478 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2479 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2480 };
2481 
2482 /* Default NVM size to read */
2483 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2484 #define IWM_MAX_NVM_SECTION_SIZE_7000	(16 * 512 * sizeof(uint16_t)) /*16 KB*/
2485 #define IWM_MAX_NVM_SECTION_SIZE_8000	(32 * 512 * sizeof(uint16_t)) /*32 KB*/
2486 
2487 #define IWM_NVM_WRITE_OPCODE 1
2488 #define IWM_NVM_READ_OPCODE 0
2489 
2490 static int
2491 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2492     uint16_t length, uint8_t *data, uint16_t *len)
2493 {
2494 	offset = 0;
2495 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2496 		.offset = htole16(offset),
2497 		.length = htole16(length),
2498 		.type = htole16(section),
2499 		.op_code = IWM_NVM_READ_OPCODE,
2500 	};
2501 	struct iwm_nvm_access_resp *nvm_resp;
2502 	struct iwm_rx_packet *pkt;
2503 	struct iwm_host_cmd cmd = {
2504 		.id = IWM_NVM_ACCESS_CMD,
2505 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2506 		.data = { &nvm_access_cmd, },
2507 	};
2508 	int err, offset_read;
2509 	size_t bytes_read;
2510 	uint8_t *resp_data;
2511 
2512 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2513 
2514 	err = iwm_send_cmd(sc, &cmd);
2515 	if (err) {
2516 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2517 		    DEVNAME(sc), err));
2518 		return err;
2519 	}
2520 
2521 	pkt = cmd.resp_pkt;
2522 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2523 		err = EIO;
2524 		goto exit;
2525 	}
2526 
2527 	/* Extract NVM response */
2528 	nvm_resp = (void *)pkt->data;
2529 
2530 	err = le16toh(nvm_resp->status);
2531 	bytes_read = le16toh(nvm_resp->length);
2532 	offset_read = le16toh(nvm_resp->offset);
2533 	resp_data = nvm_resp->data;
2534 	if (err) {
2535 		err = EINVAL;
2536 		goto exit;
2537 	}
2538 
2539 	if (offset_read != offset) {
2540 		err = EINVAL;
2541 		goto exit;
2542 	}
2543 	if (bytes_read > length) {
2544 		err = EINVAL;
2545 		goto exit;
2546 	}
2547 
2548 	memcpy(data + offset, resp_data, bytes_read);
2549 	*len = bytes_read;
2550 
2551  exit:
2552 	iwm_free_resp(sc, &cmd);
2553 	return err;
2554 }
2555 
2556 /*
2557  * Reads an NVM section completely.
2558  * NICs prior to 7000 family doesn't have a real NVM, but just read
2559  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2560  * by uCode, we need to manually check in this case that we don't
2561  * overflow and try to read more than the EEPROM size.
2562  */
2563 static int
2564 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2565     uint16_t *len, size_t max_len)
2566 {
2567 	uint16_t chunklen, seglen;
2568 	int err;
2569 
2570 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2571 	*len = 0;
2572 
2573 	/* Read NVM chunks until exhausted (reading less than requested) */
2574 	while (seglen == chunklen && *len < max_len) {
2575 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2576 		    &seglen);
2577 		if (err) {
2578 			DPRINTF(("%s: Cannot read NVM from section %d "
2579 			    "offset %d, length %d\n",
2580 			    DEVNAME(sc), section, *len, chunklen));
2581 			return err;
2582 		}
2583 		*len += seglen;
2584 	}
2585 
2586 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2587 	return 0;
2588 }
2589 
2590 static uint8_t
2591 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2592 {
2593 	uint8_t tx_ant;
2594 
2595 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2596 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2597 
2598 	if (sc->sc_nvm.valid_tx_ant)
2599 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2600 
2601 	return tx_ant;
2602 }
2603 
2604 static uint8_t
2605 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2606 {
2607 	uint8_t rx_ant;
2608 
2609 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2610 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2611 
2612 	if (sc->sc_nvm.valid_rx_ant)
2613 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2614 
2615 	return rx_ant;
2616 }
2617 
2618 static void
2619 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2620     const uint8_t *nvm_channels, size_t nchan)
2621 {
2622 	struct ieee80211com *ic = &sc->sc_ic;
2623 	struct iwm_nvm_data *data = &sc->sc_nvm;
2624 	int ch_idx;
2625 	struct ieee80211_channel *channel;
2626 	uint16_t ch_flags;
2627 	int is_5ghz;
2628 	int flags, hw_value;
2629 
2630 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2631 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2632 		aprint_debug_dev(sc->sc_dev,
2633 		    "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2634 		    " %cwide %c40MHz %c80MHz %c160MHz\n",
2635 		    nvm_channels[ch_idx],
2636 		    ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2637 		    ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2638 		    ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2639 		    ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2640 		    ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2641 		    ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2642 		    ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2643 		    ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2644 		    ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2645 
2646 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2647 		    !data->sku_cap_band_52GHz_enable)
2648 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2649 
2650 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2651 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2652 			    nvm_channels[ch_idx], ch_flags,
2653 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2654 			continue;
2655 		}
2656 
2657 		hw_value = nvm_channels[ch_idx];
2658 		channel = &ic->ic_channels[hw_value];
2659 
2660 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2661 		if (!is_5ghz) {
2662 			flags = IEEE80211_CHAN_2GHZ;
2663 			channel->ic_flags
2664 			    = IEEE80211_CHAN_CCK
2665 			    | IEEE80211_CHAN_OFDM
2666 			    | IEEE80211_CHAN_DYN
2667 			    | IEEE80211_CHAN_2GHZ;
2668 		} else {
2669 			flags = IEEE80211_CHAN_5GHZ;
2670 			channel->ic_flags =
2671 			    IEEE80211_CHAN_A;
2672 		}
2673 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2674 
2675 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2676 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2677 
2678 #ifndef IEEE80211_NO_HT
2679 		if (data->sku_cap_11n_enable)
2680 			channel->ic_flags |= IEEE80211_CHAN_HT;
2681 #endif
2682 	}
2683 }
2684 
2685 #ifndef IEEE80211_NO_HT
2686 static void
2687 iwm_setup_ht_rates(struct iwm_softc *sc)
2688 {
2689 	struct ieee80211com *ic = &sc->sc_ic;
2690 
2691 	/* TX is supported with the same MCS as RX. */
2692 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2693 
2694 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2695 
2696 #ifdef notyet
2697 	if (sc->sc_nvm.sku_cap_mimo_disable)
2698 		return;
2699 
2700 	if (iwm_fw_valid_rx_ant(sc) > 1)
2701 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2702 	if (iwm_fw_valid_rx_ant(sc) > 2)
2703 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2704 #endif
2705 }
2706 
2707 #define IWM_MAX_RX_BA_SESSIONS 16
2708 
2709 static void
2710 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2711     uint16_t ssn, int start)
2712 {
2713 	struct ieee80211com *ic = &sc->sc_ic;
2714 	struct iwm_add_sta_cmd_v7 cmd;
2715 	struct iwm_node *in = (struct iwm_node *)ni;
2716 	int err, s;
2717 	uint32_t status;
2718 
2719 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2720 		ieee80211_addba_req_refuse(ic, ni, tid);
2721 		return;
2722 	}
2723 
2724 	memset(&cmd, 0, sizeof(cmd));
2725 
2726 	cmd.sta_id = IWM_STATION_ID;
2727 	cmd.mac_id_n_color
2728 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2729 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2730 
2731 	if (start) {
2732 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2733 		cmd.add_immediate_ba_ssn = ssn;
2734 	} else {
2735 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2736 	}
2737 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2738 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2739 
2740 	status = IWM_ADD_STA_SUCCESS;
2741 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2742 	    &status);
2743 
2744 	s = splnet();
2745 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2746 		if (start) {
2747 			sc->sc_rx_ba_sessions++;
2748 			ieee80211_addba_req_accept(ic, ni, tid);
2749 		} else if (sc->sc_rx_ba_sessions > 0)
2750 			sc->sc_rx_ba_sessions--;
2751 	} else if (start)
2752 		ieee80211_addba_req_refuse(ic, ni, tid);
2753 	splx(s);
2754 }
2755 
2756 static void
2757 iwm_htprot_task(void *arg)
2758 {
2759 	struct iwm_softc *sc = arg;
2760 	struct ieee80211com *ic = &sc->sc_ic;
2761 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2762 	int err;
2763 
2764 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2765 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2766 	if (err)
2767 		aprint_error_dev(sc->sc_dev,
2768 		    "could not change HT protection: error %d\n", err);
2769 }
2770 
2771 /*
2772  * This function is called by upper layer when HT protection settings in
2773  * beacons have changed.
2774  */
2775 static void
2776 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2777 {
2778 	struct iwm_softc *sc = ic->ic_softc;
2779 
2780 	/* assumes that ni == ic->ic_bss */
2781 	task_add(systq, &sc->htprot_task);
2782 }
2783 
2784 static void
2785 iwm_ba_task(void *arg)
2786 {
2787 	struct iwm_softc *sc = arg;
2788 	struct ieee80211com *ic = &sc->sc_ic;
2789 	struct ieee80211_node *ni = ic->ic_bss;
2790 
2791 	if (sc->ba_start)
2792 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2793 	else
2794 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2795 }
2796 
2797 /*
2798  * This function is called by upper layer when an ADDBA request is received
2799  * from another STA and before the ADDBA response is sent.
2800  */
2801 static int
2802 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2803     uint8_t tid)
2804 {
2805 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2806 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2807 
2808 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2809 		return ENOSPC;
2810 
2811 	sc->ba_start = 1;
2812 	sc->ba_tid = tid;
2813 	sc->ba_ssn = htole16(ba->ba_winstart);
2814 	task_add(systq, &sc->ba_task);
2815 
2816 	return EBUSY;
2817 }
2818 
2819 /*
2820  * This function is called by upper layer on teardown of an HT-immediate
2821  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2822  */
2823 static void
2824 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2825     uint8_t tid)
2826 {
2827 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2828 
2829 	sc->ba_start = 0;
2830 	sc->ba_tid = tid;
2831 	task_add(systq, &sc->ba_task);
2832 }
2833 #endif
2834 
2835 static void
2836 iwm_free_fw_paging(struct iwm_softc *sc)
2837 {
2838 	int i;
2839 
2840 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2841 		return;
2842 
2843 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2844 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2845 	}
2846 
2847 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2848 }
2849 
2850 static int
2851 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2852 {
2853 	int sec_idx, idx;
2854 	uint32_t offset = 0;
2855 
2856 	/*
2857 	 * find where is the paging image start point:
2858 	 * if CPU2 exist and it's in paging format, then the image looks like:
2859 	 * CPU1 sections (2 or more)
2860 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2861 	 * CPU2 sections (not paged)
2862 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2863 	 * non paged to CPU2 paging sec
2864 	 * CPU2 paging CSS
2865 	 * CPU2 paging image (including instruction and data)
2866 	 */
2867 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2868 		if (fws->fw_sect[sec_idx].fws_devoff ==
2869 		    IWM_PAGING_SEPARATOR_SECTION) {
2870 			sec_idx++;
2871 			break;
2872 		}
2873 	}
2874 
2875 	/*
2876 	 * If paging is enabled there should be at least 2 more sections left
2877 	 * (one for CSS and one for Paging data)
2878 	 */
2879 	if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2880 		aprint_verbose_dev(sc->sc_dev,
2881 		    "Paging: Missing CSS and/or paging sections\n");
2882 		iwm_free_fw_paging(sc);
2883 		return EINVAL;
2884 	}
2885 
2886 	/* copy the CSS block to the dram */
2887 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2888 	    sec_idx));
2889 
2890 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2891 	    fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2892 
2893 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2894 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2895 
2896 	sec_idx++;
2897 
2898 	/*
2899 	 * copy the paging blocks to the dram
2900 	 * loop index start from 1 since that CSS block already copied to dram
2901 	 * and CSS index is 0.
2902 	 * loop stop at num_of_paging_blk since that last block is not full.
2903 	 */
2904 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2905 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2906 		       (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2907 		       sc->fw_paging_db[idx].fw_paging_size);
2908 
2909 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2910 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2911 
2912 		offset += sc->fw_paging_db[idx].fw_paging_size;
2913 	}
2914 
2915 	/* copy the last paging block */
2916 	if (sc->num_of_pages_in_last_blk > 0) {
2917 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2918 		    (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2919 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2920 
2921 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2922 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2923 	}
2924 
2925 	return 0;
2926 }
2927 
2928 static int
2929 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2930 {
2931 	int blk_idx = 0;
2932 	int error, num_of_pages;
2933 	bus_dmamap_t dmap;
2934 
2935 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2936 		int i;
2937 		/* Device got reset, and we setup firmware paging again */
2938 		for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
2939 			dmap = sc->fw_paging_db[i].fw_paging_block.map;
2940 			bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2941 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2942 		}
2943 		return 0;
2944 	}
2945 
2946 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
2947 	CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
2948 
2949 	num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
2950 	sc->num_of_paging_blk =
2951 	    howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
2952 	sc->num_of_pages_in_last_blk = num_of_pages -
2953 	    IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
2954 
2955 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
2956 	    "each block holds 8 pages, last block holds %d pages\n",
2957 	    DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
2958 
2959 	/* allocate block of 4Kbytes for paging CSS */
2960 	error = iwm_dma_contig_alloc(sc->sc_dmat,
2961 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
2962 	    4096);
2963 	if (error) {
2964 		/* free all the previous pages since we failed */
2965 		iwm_free_fw_paging(sc);
2966 		return ENOMEM;
2967 	}
2968 
2969 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
2970 
2971 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
2972 	    DEVNAME(sc)));
2973 
2974 	/*
2975 	 * allocate blocks in dram.
2976 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
2977 	 */
2978 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
2979 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
2980 		/* XXX Use iwm_dma_contig_alloc for allocating */
2981 		error = iwm_dma_contig_alloc(sc->sc_dmat,
2982 		    &sc->fw_paging_db[blk_idx].fw_paging_block,
2983 		    IWM_PAGING_BLOCK_SIZE, 4096);
2984 		if (error) {
2985 			/* free all the previous pages since we failed */
2986 			iwm_free_fw_paging(sc);
2987 			return ENOMEM;
2988 		}
2989 
2990 		sc->fw_paging_db[blk_idx].fw_paging_size =
2991 		    IWM_PAGING_BLOCK_SIZE;
2992 
2993 		DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
2994 		    "paging.\n", DEVNAME(sc)));
2995 	}
2996 
2997 	return 0;
2998 }
2999 
3000 static int
3001 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3002 {
3003 	int err;
3004 
3005 	err = iwm_alloc_fw_paging_mem(sc, fws);
3006 	if (err)
3007 		return err;
3008 
3009 	return iwm_fill_paging_mem(sc, fws);
3010 }
3011 
3012 static bool
3013 iwm_has_new_tx_api(struct iwm_softc *sc)
3014 {
3015 	/* XXX */
3016 	return false;
3017 }
3018 
3019 /* send paging cmd to FW in case CPU2 has paging image */
3020 static int
3021 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3022 {
3023 	struct iwm_fw_paging_cmd fw_paging_cmd = {
3024 		.flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3025 		                 IWM_PAGING_CMD_IS_ENABLED |
3026 		                 (sc->num_of_pages_in_last_blk <<
3027 		                  IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3028 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3029 		.block_num = htole32(sc->num_of_paging_blk),
3030 	};
3031 	size_t size = sizeof(fw_paging_cmd);
3032 	int blk_idx;
3033 	bus_dmamap_t dmap;
3034 
3035 	if (!iwm_has_new_tx_api(sc))
3036 		size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3037 		    IWM_NUM_OF_FW_PAGING_BLOCKS;
3038 
3039 	/* loop for all paging blocks + CSS block */
3040 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3041 		bus_addr_t dev_phy_addr =
3042 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3043 		if (iwm_has_new_tx_api(sc)) {
3044 			fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3045 			    htole64(dev_phy_addr);
3046 		} else {
3047 			dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3048 			fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3049 			    htole32(dev_phy_addr);
3050 		}
3051 		dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map;
3052 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3053 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3054 	}
3055 
3056 	return iwm_send_cmd_pdu(sc,
3057 	    iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3058 	    0, size, &fw_paging_cmd);
3059 }
3060 
3061 static void
3062 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3063     const uint16_t *mac_override, const uint16_t *nvm_hw)
3064 {
3065 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3066 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3067 	};
3068 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3069 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3070 	};
3071 	const uint8_t *hw_addr;
3072 
3073 	if (mac_override) {
3074 		hw_addr = (const uint8_t *)(mac_override +
3075 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
3076 
3077 		/*
3078 		 * Store the MAC address from MAO section.
3079 		 * No byte swapping is required in MAO section
3080 		 */
3081 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3082 
3083 		/*
3084 		 * Force the use of the OTP MAC address in case of reserved MAC
3085 		 * address in the NVM, or if address is given but invalid.
3086 		 */
3087 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3088 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3089 		    sizeof(etherbroadcastaddr)) != 0) &&
3090 		    (memcmp(etheranyaddr, data->hw_addr,
3091 		    sizeof(etheranyaddr)) != 0) &&
3092 		    !ETHER_IS_MULTICAST(data->hw_addr))
3093 			return;
3094 	}
3095 
3096 	if (nvm_hw) {
3097 		/* Read the mac address from WFMP registers. */
3098 		uint32_t mac_addr0 =
3099 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3100 		uint32_t mac_addr1 =
3101 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3102 
3103 		hw_addr = (const uint8_t *)&mac_addr0;
3104 		data->hw_addr[0] = hw_addr[3];
3105 		data->hw_addr[1] = hw_addr[2];
3106 		data->hw_addr[2] = hw_addr[1];
3107 		data->hw_addr[3] = hw_addr[0];
3108 
3109 		hw_addr = (const uint8_t *)&mac_addr1;
3110 		data->hw_addr[4] = hw_addr[1];
3111 		data->hw_addr[5] = hw_addr[0];
3112 
3113 		return;
3114 	}
3115 
3116 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
3117 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3118 }
3119 
3120 static int
3121 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3122     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3123     const uint16_t *mac_override, const uint16_t *phy_sku,
3124     const uint16_t *regulatory)
3125 {
3126 	struct iwm_nvm_data *data = &sc->sc_nvm;
3127 	uint8_t hw_addr[ETHER_ADDR_LEN];
3128 	uint32_t sku;
3129 
3130 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3131 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3132 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3133 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3134 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3135 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3136 
3137 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3138 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3139 	} else {
3140 		uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3141 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3142 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3143 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3144 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3145 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3146 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3147 
3148 		data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3149 		sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3150 	}
3151 
3152 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3153 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3154 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3155 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3156 
3157 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3158 
3159 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3160 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3161 		data->hw_addr[0] = hw_addr[1];
3162 		data->hw_addr[1] = hw_addr[0];
3163 		data->hw_addr[2] = hw_addr[3];
3164 		data->hw_addr[3] = hw_addr[2];
3165 		data->hw_addr[4] = hw_addr[5];
3166 		data->hw_addr[5] = hw_addr[4];
3167 	} else
3168 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3169 
3170 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3171 		uint16_t lar_offset, lar_config;
3172 		lar_offset = data->nvm_version < 0xE39 ?
3173 		    IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3174 		lar_config = le16_to_cpup(regulatory + lar_offset);
3175                 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3176 	}
3177 
3178 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3179 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3180 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3181 	else
3182 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3183 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3184 
3185 	data->calib_version = 255;   /* TODO:
3186 					this value will prevent some checks from
3187 					failing, we need to check if this
3188 					field is still needed, and if it does,
3189 					where is it in the NVM */
3190 
3191 	return 0;
3192 }
3193 
3194 static int
3195 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3196 {
3197 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3198 	const uint16_t *regulatory = NULL;
3199 
3200 	/* Checking for required sections */
3201 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3202 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3203 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3204 			return ENOENT;
3205 		}
3206 
3207 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3208 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3209 		/* SW and REGULATORY sections are mandatory */
3210 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3211 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3212 			return ENOENT;
3213 		}
3214 		/* MAC_OVERRIDE or at least HW section must exist */
3215 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3216 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3217 			return ENOENT;
3218 		}
3219 
3220 		/* PHY_SKU section is mandatory in B0 */
3221 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3222 			return ENOENT;
3223 		}
3224 
3225 		regulatory = (const uint16_t *)
3226 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3227 		hw = (const uint16_t *)
3228 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3229 		mac_override =
3230 			(const uint16_t *)
3231 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3232 		phy_sku = (const uint16_t *)
3233 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3234 	} else {
3235 		panic("unknown device family %d\n", sc->sc_device_family);
3236 	}
3237 
3238 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3239 	calib = (const uint16_t *)
3240 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3241 
3242 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3243 	    phy_sku, regulatory);
3244 }
3245 
3246 static int
3247 iwm_nvm_init(struct iwm_softc *sc)
3248 {
3249 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3250 	int i, section, err;
3251 	uint16_t len;
3252 	uint8_t *buf;
3253 	const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3254 	    IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3255 
3256 	/* Read From FW NVM */
3257 	DPRINTF(("Read NVM\n"));
3258 
3259 	memset(nvm_sections, 0, sizeof(nvm_sections));
3260 
3261 	buf = kmem_alloc(bufsz, KM_SLEEP);
3262 
3263 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3264 		section = iwm_nvm_to_read[i];
3265 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3266 
3267 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3268 		if (err) {
3269 			err = 0;
3270 			continue;
3271 		}
3272 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3273 		memcpy(nvm_sections[section].data, buf, len);
3274 		nvm_sections[section].length = len;
3275 	}
3276 	kmem_free(buf, bufsz);
3277 	if (err == 0)
3278 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3279 
3280 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3281 		if (nvm_sections[i].data != NULL)
3282 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3283 	}
3284 
3285 	return err;
3286 }
3287 
3288 static int
3289 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3290     const uint8_t *section, uint32_t byte_cnt)
3291 {
3292 	int err = EINVAL;
3293 	uint32_t chunk_sz, offset;
3294 
3295 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3296 
3297 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3298 		uint32_t addr, len;
3299 		const uint8_t *data;
3300 		bool is_extended = false;
3301 
3302 		addr = dst_addr + offset;
3303 		len = MIN(chunk_sz, byte_cnt - offset);
3304 		data = section + offset;
3305 
3306 		if (addr >= IWM_FW_MEM_EXTENDED_START &&
3307 		    addr <= IWM_FW_MEM_EXTENDED_END)
3308 			is_extended = true;
3309 
3310 		if (is_extended)
3311 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3312 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3313 
3314 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3315 
3316 		if (is_extended)
3317 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3318 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3319 
3320 		if (err)
3321 			break;
3322 	}
3323 
3324 	return err;
3325 }
3326 
3327 static int
3328 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3329     const uint8_t *section, uint32_t byte_cnt)
3330 {
3331 	struct iwm_dma_info *dma = &sc->fw_dma;
3332 	int err;
3333 
3334 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3335 	memcpy(dma->vaddr, section, byte_cnt);
3336 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3337 	    BUS_DMASYNC_PREWRITE);
3338 
3339 	sc->sc_fw_chunk_done = 0;
3340 
3341 	if (!iwm_nic_lock(sc))
3342 		return EBUSY;
3343 
3344 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3345 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3346 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3347 	    dst_addr);
3348 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3349 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3350 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3351 	    (iwm_get_dma_hi_addr(dma->paddr)
3352 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3353 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3354 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3355 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3356 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3357 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3358 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3359 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3360 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3361 
3362 	iwm_nic_unlock(sc);
3363 
3364 	/* Wait for this segment to load. */
3365 	err = 0;
3366 	while (!sc->sc_fw_chunk_done) {
3367 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3368 		if (err)
3369 			break;
3370 	}
3371 	if (!sc->sc_fw_chunk_done) {
3372 		DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3373 		    DEVNAME(sc), dst_addr, byte_cnt));
3374 	}
3375 
3376 	return err;
3377 }
3378 
3379 static int
3380 iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3381     int cpu, int *first_ucode_section)
3382 {
3383 	int i, err = 0;
3384 	uint32_t last_read_idx = 0;
3385 	void *data;
3386 	uint32_t dlen;
3387 	uint32_t offset;
3388 
3389 	if (cpu == 1) {
3390 		*first_ucode_section = 0;
3391 	} else {
3392 		(*first_ucode_section)++;
3393 	}
3394 
3395 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3396 		last_read_idx = i;
3397 		data = fws->fw_sect[i].fws_data;
3398 		dlen = fws->fw_sect[i].fws_len;
3399 		offset = fws->fw_sect[i].fws_devoff;
3400 
3401 		/*
3402 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3403 		 * CPU1 to CPU2.
3404 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3405 		 * CPU2 non paged to CPU2 paging sec.
3406 		 */
3407 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3408 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3409 			break;
3410 
3411 		if (dlen > sc->sc_fwdmasegsz) {
3412 			err = EFBIG;
3413 		} else
3414 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3415 		if (err) {
3416 			DPRINTF(("%s: could not load firmware chunk %d "
3417 			    "(error %d)\n", DEVNAME(sc), i, err));
3418 			return err;
3419 		}
3420 	}
3421 
3422 	*first_ucode_section = last_read_idx;
3423 
3424 	return 0;
3425 }
3426 
3427 static int
3428 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3429 {
3430 	struct iwm_fw_sects *fws;
3431 	int err = 0;
3432 	int first_ucode_section;
3433 
3434 	fws = &sc->sc_fw.fw_sects[ucode_type];
3435 
3436 	DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3437 	    fws->is_dual_cpus ? "dual" : "single"));
3438 
3439 	/* load to FW the binary Secured sections of CPU1 */
3440 	err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3441 	if (err)
3442 		return err;
3443 
3444 	if (fws->is_dual_cpus) {
3445 		/* set CPU2 header address */
3446 		if (iwm_nic_lock(sc)) {
3447 			iwm_write_prph(sc,
3448 			    IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3449 			    IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3450 			iwm_nic_unlock(sc);
3451 		}
3452 
3453 		/* load to FW the binary sections of CPU2 */
3454 		err = iwm_load_cpu_sections_7000(sc, fws, 2,
3455 		    &first_ucode_section);
3456 		if (err)
3457 			return err;
3458 	}
3459 
3460 	/* release CPU reset */
3461 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3462 
3463 	return 0;
3464 }
3465 
3466 static int
3467 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3468     int cpu, int *first_ucode_section)
3469 {
3470 	int shift_param;
3471 	int i, err = 0, sec_num = 0x1;
3472 	uint32_t val, last_read_idx = 0;
3473 	void *data;
3474 	uint32_t dlen;
3475 	uint32_t offset;
3476 
3477 	if (cpu == 1) {
3478 		shift_param = 0;
3479 		*first_ucode_section = 0;
3480 	} else {
3481 		shift_param = 16;
3482 		(*first_ucode_section)++;
3483 	}
3484 
3485 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3486 		last_read_idx = i;
3487 		data = fws->fw_sect[i].fws_data;
3488 		dlen = fws->fw_sect[i].fws_len;
3489 		offset = fws->fw_sect[i].fws_devoff;
3490 
3491 		/*
3492 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3493 		 * CPU1 to CPU2.
3494 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3495 		 * CPU2 non paged to CPU2 paging sec.
3496 		 */
3497 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3498 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3499 			break;
3500 
3501 		if (dlen > sc->sc_fwdmasegsz) {
3502 			err = EFBIG;
3503 		} else
3504 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3505 		if (err) {
3506 			DPRINTF(("%s: could not load firmware chunk %d "
3507 			    "(error %d)\n", DEVNAME(sc), i, err));
3508 			return err;
3509 		}
3510 
3511 		/* Notify the ucode of the loaded section number and status */
3512 		if (iwm_nic_lock(sc)) {
3513 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3514 			val = val | (sec_num << shift_param);
3515 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3516 			sec_num = (sec_num << 1) | 0x1;
3517 			iwm_nic_unlock(sc);
3518 
3519 			/*
3520 			 * The firmware won't load correctly without this delay.
3521 			 */
3522 			DELAY(8000);
3523 		}
3524 	}
3525 
3526 	*first_ucode_section = last_read_idx;
3527 
3528 	if (iwm_nic_lock(sc)) {
3529 		if (cpu == 1)
3530 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3531 		else
3532 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3533 		iwm_nic_unlock(sc);
3534 	}
3535 
3536 	return 0;
3537 }
3538 
3539 static int
3540 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3541 {
3542 	struct iwm_fw_sects *fws;
3543 	int err = 0;
3544 	int first_ucode_section;
3545 
3546 	fws = &sc->sc_fw.fw_sects[ucode_type];
3547 
3548 	/* configure the ucode to be ready to get the secured image */
3549 	/* release CPU reset */
3550 	if (iwm_nic_lock(sc)) {
3551 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3552 		    IWM_RELEASE_CPU_RESET_BIT);
3553 		iwm_nic_unlock(sc);
3554 	}
3555 
3556 	/* load to FW the binary Secured sections of CPU1 */
3557 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3558 	if (err)
3559 		return err;
3560 
3561 	/* load to FW the binary sections of CPU2 */
3562 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3563 }
3564 
3565 static int
3566 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3567 {
3568 	int err, w;
3569 
3570 	sc->sc_uc.uc_intr = 0;
3571 
3572 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3573 		err = iwm_load_firmware_8000(sc, ucode_type);
3574 	else
3575 		err = iwm_load_firmware_7000(sc, ucode_type);
3576 	if (err)
3577 		return err;
3578 
3579 	/* wait for the firmware to load */
3580 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3581 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3582 	if (err || !sc->sc_uc.uc_ok) {
3583 		aprint_error_dev(sc->sc_dev,
3584 		    "could not load firmware (error %d, ok %d)\n",
3585 		    err, sc->sc_uc.uc_ok);
3586 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3587 			aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3588 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3589 			aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3590 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3591 		}
3592 	}
3593 
3594 	return err;
3595 }
3596 
3597 static int
3598 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3599 {
3600 	int err;
3601 
3602 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3603 
3604 	err = iwm_nic_init(sc);
3605 	if (err) {
3606 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3607 		return err;
3608 	}
3609 
3610 	/* make sure rfkill handshake bits are cleared */
3611 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3612 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3613 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3614 
3615 	/* clear (again), then enable host interrupts */
3616 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3617 	iwm_enable_interrupts(sc);
3618 
3619 	/* really make sure rfkill handshake bits are cleared */
3620 	/* maybe we should write a few times more?  just to make sure */
3621 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3622 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3623 
3624 	return iwm_load_firmware(sc, ucode_type);
3625 }
3626 
3627 static int
3628 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3629 {
3630 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3631 		.valid = htole32(valid_tx_ant),
3632 	};
3633 
3634 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3635 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
3636 }
3637 
3638 static int
3639 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3640 {
3641 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3642 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3643 
3644 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3645 	phy_cfg_cmd.calib_control.event_trigger =
3646 	    sc->sc_default_calib[ucode_type].event_trigger;
3647 	phy_cfg_cmd.calib_control.flow_trigger =
3648 	    sc->sc_default_calib[ucode_type].flow_trigger;
3649 
3650 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3651 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3652 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3653 }
3654 
3655 static int
3656 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3657 {
3658 	struct iwm_fw_sects *fws;
3659 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3660 	int err;
3661 
3662 	err = iwm_read_firmware(sc, ucode_type);
3663 	if (err)
3664 		return err;
3665 
3666 	sc->sc_uc_current = ucode_type;
3667 	err = iwm_start_fw(sc, ucode_type);
3668 	if (err) {
3669 		sc->sc_uc_current = old_type;
3670 		return err;
3671 	}
3672 
3673 	err = iwm_post_alive(sc);
3674 	if (err)
3675 		return err;
3676 
3677 	fws = &sc->sc_fw.fw_sects[ucode_type];
3678 	if (fws->paging_mem_size) {
3679 		err = iwm_save_fw_paging(sc, fws);
3680 		if (err)
3681 			return err;
3682 
3683 		err = iwm_send_paging_cmd(sc, fws);
3684 		if (err) {
3685 			iwm_free_fw_paging(sc);
3686 			return err;
3687 		}
3688 	}
3689 
3690 	return 0;
3691 }
3692 
3693 static int
3694 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3695 {
3696 	int err;
3697 
3698 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3699 		aprint_error_dev(sc->sc_dev,
3700 		    "radio is disabled by hardware switch\n");
3701 		return EPERM;
3702 	}
3703 
3704 	sc->sc_init_complete = 0;
3705 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3706 	if (err) {
3707 		DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3708 		return err;
3709 	}
3710 
3711 	if (justnvm) {
3712 		err = iwm_nvm_init(sc);
3713 		if (err) {
3714 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3715 			return err;
3716 		}
3717 
3718 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3719 		    ETHER_ADDR_LEN);
3720 		return 0;
3721 	}
3722 
3723 	err = iwm_send_bt_init_conf(sc);
3724 	if (err)
3725 		return err;
3726 
3727 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3728 	if (err)
3729 		return err;
3730 
3731 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3732 	if (err)
3733 		return err;
3734 
3735 	/*
3736 	 * Send phy configurations command to init uCode
3737 	 * to start the 16.0 uCode init image internal calibrations.
3738 	 */
3739 	err = iwm_send_phy_cfg_cmd(sc);
3740 	if (err)
3741 		return err;
3742 
3743 	/*
3744 	 * Nothing to do but wait for the init complete notification
3745 	 * from the firmware
3746 	 */
3747 	while (!sc->sc_init_complete) {
3748 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3749 		if (err)
3750 			break;
3751 	}
3752 
3753 	return err;
3754 }
3755 
3756 static int
3757 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3758 {
3759 	struct iwm_rx_ring *ring = &sc->rxq;
3760 	struct iwm_rx_data *data = &ring->data[idx];
3761 	struct mbuf *m;
3762 	int err;
3763 	int fatal = 0;
3764 
3765 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3766 	if (m == NULL)
3767 		return ENOBUFS;
3768 
3769 	if (size <= MCLBYTES) {
3770 		MCLGET(m, M_DONTWAIT);
3771 	} else {
3772 		MEXTMALLOC(m, size, M_DONTWAIT);
3773 	}
3774 	if ((m->m_flags & M_EXT) == 0) {
3775 		m_freem(m);
3776 		return ENOBUFS;
3777 	}
3778 
3779 	if (data->m != NULL) {
3780 		bus_dmamap_unload(sc->sc_dmat, data->map);
3781 		fatal = 1;
3782 	}
3783 
3784 	m->m_len = m->m_pkthdr.len = size;
3785 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3786 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3787 	if (err) {
3788 		/* XXX */
3789 		if (fatal)
3790 			panic("iwm: could not load RX mbuf");
3791 		m_freem(m);
3792 		return err;
3793 	}
3794 	data->m = m;
3795 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3796 
3797 	/* Update RX descriptor. */
3798 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3799 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3800 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3801 
3802 	return 0;
3803 }
3804 
3805 #define IWM_RSSI_OFFSET 50
3806 static int
3807 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3808 {
3809 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3810 	uint32_t agc_a, agc_b;
3811 	uint32_t val;
3812 
3813 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3814 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3815 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3816 
3817 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3818 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3819 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3820 
3821 	/*
3822 	 * dBm = rssi dB - agc dB - constant.
3823 	 * Higher AGC (higher radio gain) means lower signal.
3824 	 */
3825 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3826 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3827 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3828 
3829 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3830 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3831 
3832 	return max_rssi_dbm;
3833 }
3834 
3835 /*
3836  * RSSI values are reported by the FW as positive values - need to negate
3837  * to obtain their dBM.  Account for missing antennas by replacing 0
3838  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3839  */
3840 static int
3841 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3842 {
3843 	int energy_a, energy_b, energy_c, max_energy;
3844 	uint32_t val;
3845 
3846 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3847 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3848 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3849 	energy_a = energy_a ? -energy_a : -256;
3850 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3851 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3852 	energy_b = energy_b ? -energy_b : -256;
3853 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3854 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3855 	energy_c = energy_c ? -energy_c : -256;
3856 	max_energy = MAX(energy_a, energy_b);
3857 	max_energy = MAX(max_energy, energy_c);
3858 
3859 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3860 	    energy_a, energy_b, energy_c, max_energy));
3861 
3862 	return max_energy;
3863 }
3864 
3865 static void
3866 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3867     struct iwm_rx_data *data)
3868 {
3869 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3870 
3871 	DPRINTFN(20, ("received PHY stats\n"));
3872 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3873 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3874 
3875 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3876 }
3877 
3878 /*
3879  * Retrieve the average noise (in dBm) among receivers.
3880  */
3881 static int
3882 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3883 {
3884 	int i, total, nbant, noise;
3885 
3886 	total = nbant = noise = 0;
3887 	for (i = 0; i < 3; i++) {
3888 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3889 		if (noise) {
3890 			total += noise;
3891 			nbant++;
3892 		}
3893 	}
3894 
3895 	/* There should be at least one antenna but check anyway. */
3896 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3897 }
3898 
3899 static void
3900 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3901     struct iwm_rx_data *data)
3902 {
3903 	struct ieee80211com *ic = &sc->sc_ic;
3904 	struct ieee80211_frame *wh;
3905 	struct ieee80211_node *ni;
3906 	struct ieee80211_channel *c = NULL;
3907 	struct mbuf *m;
3908 	struct iwm_rx_phy_info *phy_info;
3909 	struct iwm_rx_mpdu_res_start *rx_res;
3910 	int device_timestamp;
3911 	uint32_t len;
3912 	uint32_t rx_pkt_status;
3913 	int rssi;
3914 	int s;
3915 
3916 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3917 	    BUS_DMASYNC_POSTREAD);
3918 
3919 	phy_info = &sc->sc_last_phy_info;
3920 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3921 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3922 	len = le16toh(rx_res->byte_count);
3923 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3924 	    sizeof(*rx_res) + len));
3925 
3926 	m = data->m;
3927 	m->m_data = pkt->data + sizeof(*rx_res);
3928 	m->m_pkthdr.len = m->m_len = len;
3929 
3930 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3931 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3932 		    phy_info->cfg_phy_cnt));
3933 		return;
3934 	}
3935 
3936 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3937 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3938 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3939 		return; /* drop */
3940 	}
3941 
3942 	device_timestamp = le32toh(phy_info->system_timestamp);
3943 
3944 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3945 		rssi = iwm_get_signal_strength(sc, phy_info);
3946 	} else {
3947 		rssi = iwm_calc_rssi(sc, phy_info);
3948 	}
3949 	rssi = -rssi;
3950 
3951 	if (ic->ic_state == IEEE80211_S_SCAN)
3952 		iwm_fix_channel(sc, m);
3953 
3954 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3955 		return;
3956 
3957 	m_set_rcvif(m, IC2IFP(ic));
3958 
3959 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3960 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3961 
3962 	s = splnet();
3963 
3964 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3965 	if (c)
3966 		ni->ni_chan = c;
3967 
3968 	if (__predict_false(sc->sc_drvbpf != NULL)) {
3969 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3970 
3971 		tap->wr_flags = 0;
3972 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3973 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3974 		tap->wr_chan_freq =
3975 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3976 		tap->wr_chan_flags =
3977 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3978 		tap->wr_dbm_antsignal = (int8_t)rssi;
3979 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3980 		tap->wr_tsft = phy_info->system_timestamp;
3981 		if (phy_info->phy_flags &
3982 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3983 			uint8_t mcs = (phy_info->rate_n_flags &
3984 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3985 			      IWM_RATE_HT_MCS_NSS_MSK));
3986 			tap->wr_rate = (0x80 | mcs);
3987 		} else {
3988 			uint8_t rate = (phy_info->rate_n_flags &
3989 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3990 			switch (rate) {
3991 			/* CCK rates. */
3992 			case  10: tap->wr_rate =   2; break;
3993 			case  20: tap->wr_rate =   4; break;
3994 			case  55: tap->wr_rate =  11; break;
3995 			case 110: tap->wr_rate =  22; break;
3996 			/* OFDM rates. */
3997 			case 0xd: tap->wr_rate =  12; break;
3998 			case 0xf: tap->wr_rate =  18; break;
3999 			case 0x5: tap->wr_rate =  24; break;
4000 			case 0x7: tap->wr_rate =  36; break;
4001 			case 0x9: tap->wr_rate =  48; break;
4002 			case 0xb: tap->wr_rate =  72; break;
4003 			case 0x1: tap->wr_rate =  96; break;
4004 			case 0x3: tap->wr_rate = 108; break;
4005 			/* Unknown rate: should not happen. */
4006 			default:  tap->wr_rate =   0;
4007 			}
4008 		}
4009 
4010 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN);
4011 	}
4012 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
4013 	ieee80211_free_node(ni);
4014 
4015 	splx(s);
4016 }
4017 
4018 static void
4019 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4020     struct iwm_node *in)
4021 {
4022 	struct ieee80211com *ic = &sc->sc_ic;
4023 	struct ifnet *ifp = IC2IFP(ic);
4024 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4025 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4026 	int failack = tx_resp->failure_frame;
4027 
4028 	KASSERT(tx_resp->frame_count == 1);
4029 
4030 	/* Update rate control statistics. */
4031 	in->in_amn.amn_txcnt++;
4032 	if (failack > 0) {
4033 		in->in_amn.amn_retrycnt++;
4034 	}
4035 
4036 	if (status != IWM_TX_STATUS_SUCCESS &&
4037 	    status != IWM_TX_STATUS_DIRECT_DONE)
4038 		if_statinc(ifp, if_oerrors);
4039 	else
4040 		if_statinc(ifp, if_opackets);
4041 }
4042 
4043 static void
4044 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4045     struct iwm_rx_data *data)
4046 {
4047 	struct ieee80211com *ic = &sc->sc_ic;
4048 	struct ifnet *ifp = IC2IFP(ic);
4049 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4050 	int idx = cmd_hdr->idx;
4051 	int qid = cmd_hdr->qid;
4052 	struct iwm_tx_ring *ring = &sc->txq[qid];
4053 	struct iwm_tx_data *txd = &ring->data[idx];
4054 	struct iwm_node *in = txd->in;
4055 	int s;
4056 
4057 	s = splnet();
4058 
4059 	if (txd->done) {
4060 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4061 		    DEVNAME(sc)));
4062 		splx(s);
4063 		return;
4064 	}
4065 
4066 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4067 	    BUS_DMASYNC_POSTREAD);
4068 
4069 	sc->sc_tx_timer = 0;
4070 
4071 	iwm_rx_tx_cmd_single(sc, pkt, in);
4072 
4073 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4074 	    BUS_DMASYNC_POSTWRITE);
4075 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4076 	m_freem(txd->m);
4077 
4078 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4079 	KASSERT(txd->done == 0);
4080 	txd->done = 1;
4081 	KASSERT(txd->in);
4082 
4083 	txd->m = NULL;
4084 	txd->in = NULL;
4085 	ieee80211_free_node(&in->in_ni);
4086 
4087 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4088 		sc->qfullmsk &= ~(1 << qid);
4089 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4090 			ifp->if_flags &= ~IFF_OACTIVE;
4091 			KASSERT(KERNEL_LOCKED_P());
4092 			iwm_start(ifp);
4093 		}
4094 	}
4095 
4096 	splx(s);
4097 }
4098 
4099 static int
4100 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4101 {
4102 	struct iwm_binding_cmd cmd;
4103 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4104 	int i, err;
4105 	uint32_t status;
4106 
4107 	memset(&cmd, 0, sizeof(cmd));
4108 
4109 	cmd.id_and_color
4110 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4111 	cmd.action = htole32(action);
4112 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4113 
4114 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4115 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4116 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4117 
4118 	status = 0;
4119 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4120 	    sizeof(cmd), &cmd, &status);
4121 	if (err == 0 && status != 0)
4122 		err = EIO;
4123 
4124 	return err;
4125 }
4126 
4127 static void
4128 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4129     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4130 {
4131 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4132 
4133 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4134 	    ctxt->color));
4135 	cmd->action = htole32(action);
4136 	cmd->apply_time = htole32(apply_time);
4137 }
4138 
4139 static void
4140 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4141     struct ieee80211_channel *chan, uint8_t chains_static,
4142     uint8_t chains_dynamic)
4143 {
4144 	struct ieee80211com *ic = &sc->sc_ic;
4145 	uint8_t active_cnt, idle_cnt;
4146 
4147 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4148 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4149 
4150 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4151 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4152 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4153 
4154 	/* Set rx the chains */
4155 	idle_cnt = chains_static;
4156 	active_cnt = chains_dynamic;
4157 
4158 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4159 	    IWM_PHY_RX_CHAIN_VALID_POS);
4160 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4161 	cmd->rxchain_info |= htole32(active_cnt <<
4162 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4163 
4164 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4165 }
4166 
4167 static int
4168 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4169     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4170     uint32_t apply_time)
4171 {
4172 	struct iwm_phy_context_cmd cmd;
4173 
4174 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4175 
4176 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4177 	    chains_static, chains_dynamic);
4178 
4179 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4180 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4181 }
4182 
4183 static int
4184 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4185 {
4186 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4187 	struct iwm_tfd *desc;
4188 	struct iwm_tx_data *txdata;
4189 	struct iwm_device_cmd *cmd;
4190 	struct mbuf *m;
4191 	bus_addr_t paddr;
4192 	uint32_t addr_lo;
4193 	int err = 0, i, paylen, off, s;
4194 	int code;
4195 	int async, wantresp;
4196 	int group_id;
4197 	size_t hdrlen, datasz;
4198 	uint8_t *data;
4199 
4200 	code = hcmd->id;
4201 	async = hcmd->flags & IWM_CMD_ASYNC;
4202 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4203 
4204 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4205 		paylen += hcmd->len[i];
4206 	}
4207 
4208 	/* if the command wants an answer, busy sc_cmd_resp */
4209 	if (wantresp) {
4210 		KASSERT(!async);
4211 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4212 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4213 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
4214 	}
4215 
4216 	/*
4217 	 * Is the hardware still available?  (after e.g. above wait).
4218 	 */
4219 	s = splnet();
4220 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
4221 		err = ENXIO;
4222 		goto out;
4223 	}
4224 
4225 	desc = &ring->desc[ring->cur];
4226 	txdata = &ring->data[ring->cur];
4227 
4228 	group_id = iwm_cmd_groupid(code);
4229 	if (group_id != 0) {
4230 		hdrlen = sizeof(cmd->hdr_wide);
4231 		datasz = sizeof(cmd->data_wide);
4232 	} else {
4233 		hdrlen = sizeof(cmd->hdr);
4234 		datasz = sizeof(cmd->data);
4235 	}
4236 
4237 	if (paylen > datasz) {
4238 		/* Command is too large to fit in pre-allocated space. */
4239 		size_t totlen = hdrlen + paylen;
4240 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4241 			aprint_error_dev(sc->sc_dev,
4242 			    "firmware command too long (%zd bytes)\n", totlen);
4243 			err = EINVAL;
4244 			goto out;
4245 		}
4246 		m = m_gethdr(M_DONTWAIT, MT_DATA);
4247 		if (m == NULL) {
4248 			err = ENOMEM;
4249 			goto out;
4250 		}
4251 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4252 		if (!(m->m_flags & M_EXT)) {
4253 			aprint_error_dev(sc->sc_dev,
4254 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4255 			m_freem(m);
4256 			err = ENOMEM;
4257 			goto out;
4258 		}
4259 		cmd = mtod(m, struct iwm_device_cmd *);
4260 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4261 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4262 		if (err) {
4263 			aprint_error_dev(sc->sc_dev,
4264 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4265 			m_freem(m);
4266 			goto out;
4267 		}
4268 		txdata->m = m;
4269 		paddr = txdata->map->dm_segs[0].ds_addr;
4270 	} else {
4271 		cmd = &ring->cmd[ring->cur];
4272 		paddr = txdata->cmd_paddr;
4273 	}
4274 
4275 	if (group_id != 0) {
4276 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4277 		cmd->hdr_wide.group_id = group_id;
4278 		cmd->hdr_wide.qid = ring->qid;
4279 		cmd->hdr_wide.idx = ring->cur;
4280 		cmd->hdr_wide.length = htole16(paylen);
4281 		cmd->hdr_wide.version = iwm_cmd_version(code);
4282 		data = cmd->data_wide;
4283 	} else {
4284 		cmd->hdr.code = code;
4285 		cmd->hdr.flags = 0;
4286 		cmd->hdr.qid = ring->qid;
4287 		cmd->hdr.idx = ring->cur;
4288 		data = cmd->data;
4289 	}
4290 
4291 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4292 		if (hcmd->len[i] == 0)
4293 			continue;
4294 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4295 		off += hcmd->len[i];
4296 	}
4297 	KASSERT(off == paylen);
4298 
4299 	/* lo field is not aligned */
4300 	addr_lo = htole32((uint32_t)paddr);
4301 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4302 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4303 	    | ((hdrlen + paylen) << 4));
4304 	desc->num_tbs = 1;
4305 
4306 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4307 	    code, hdrlen + paylen, async ? " (async)" : ""));
4308 
4309 	if (paylen > datasz) {
4310 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4311 		    BUS_DMASYNC_PREWRITE);
4312 	} else {
4313 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4314 		    (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4315 		    BUS_DMASYNC_PREWRITE);
4316 	}
4317 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4318 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4319 	    BUS_DMASYNC_PREWRITE);
4320 
4321 	err = iwm_set_cmd_in_flight(sc);
4322 	if (err)
4323 		goto out;
4324 	ring->queued++;
4325 
4326 #if 0
4327 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4328 #endif
4329 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4330 	    code, ring->qid, ring->cur));
4331 
4332 	/* Kick command ring. */
4333 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4334 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4335 
4336 	if (!async) {
4337 		int generation = sc->sc_generation;
4338 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4339 		if (err == 0) {
4340 			/* if hardware is no longer up, return error */
4341 			if (generation != sc->sc_generation) {
4342 				err = ENXIO;
4343 			} else {
4344 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4345 			}
4346 		}
4347 	}
4348  out:
4349 	if (wantresp && err) {
4350 		iwm_free_resp(sc, hcmd);
4351 	}
4352 	splx(s);
4353 
4354 	return err;
4355 }
4356 
4357 static int
4358 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4359     uint16_t len, const void *data)
4360 {
4361 	struct iwm_host_cmd cmd = {
4362 		.id = id,
4363 		.len = { len, },
4364 		.data = { data, },
4365 		.flags = flags,
4366 	};
4367 
4368 	return iwm_send_cmd(sc, &cmd);
4369 }
4370 
4371 static int
4372 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4373     uint32_t *status)
4374 {
4375 	struct iwm_rx_packet *pkt;
4376 	struct iwm_cmd_response *resp;
4377 	int err, resp_len;
4378 
4379 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4380 	cmd->flags |= IWM_CMD_WANT_SKB;
4381 
4382 	err = iwm_send_cmd(sc, cmd);
4383 	if (err)
4384 		return err;
4385 	pkt = cmd->resp_pkt;
4386 
4387 	/* Can happen if RFKILL is asserted */
4388 	if (!pkt) {
4389 		err = 0;
4390 		goto out_free_resp;
4391 	}
4392 
4393 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4394 		err = EIO;
4395 		goto out_free_resp;
4396 	}
4397 
4398 	resp_len = iwm_rx_packet_payload_len(pkt);
4399 	if (resp_len != sizeof(*resp)) {
4400 		err = EIO;
4401 		goto out_free_resp;
4402 	}
4403 
4404 	resp = (void *)pkt->data;
4405 	*status = le32toh(resp->status);
4406  out_free_resp:
4407 	iwm_free_resp(sc, cmd);
4408 	return err;
4409 }
4410 
4411 static int
4412 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4413     const void *data, uint32_t *status)
4414 {
4415 	struct iwm_host_cmd cmd = {
4416 		.id = id,
4417 		.len = { len, },
4418 		.data = { data, },
4419 	};
4420 
4421 	return iwm_send_cmd_status(sc, &cmd, status);
4422 }
4423 
4424 static void
4425 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4426 {
4427 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4428 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4429 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4430 	wakeup(&sc->sc_wantresp);
4431 }
4432 
4433 static void
4434 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4435 {
4436 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4437 	struct iwm_tx_data *data;
4438 	int s;
4439 
4440 	if (qid != IWM_CMD_QUEUE) {
4441 		return;	/* Not a command ack. */
4442 	}
4443 
4444 	s = splnet();
4445 
4446 	data = &ring->data[idx];
4447 
4448 	if (data->m != NULL) {
4449 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4450 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4451 		bus_dmamap_unload(sc->sc_dmat, data->map);
4452 		m_freem(data->m);
4453 		data->m = NULL;
4454 	}
4455 	wakeup(&ring->desc[idx]);
4456 
4457 	if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4458 		device_printf(sc->sc_dev,
4459 		    "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4460 		    idx, ring->queued, ring->cur);
4461 	}
4462 
4463 	if (ring->queued == 0) {
4464 		splx(s);
4465 		device_printf(sc->sc_dev, "cmd_done with empty ring\n");
4466 		return;
4467 	}
4468 
4469 	if (--ring->queued == 0)
4470 		iwm_clear_cmd_in_flight(sc);
4471 
4472 	splx(s);
4473 }
4474 
4475 #if 0
4476 /*
4477  * necessary only for block ack mode
4478  */
4479 void
4480 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4481     uint16_t len)
4482 {
4483 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4484 	uint16_t w_val;
4485 
4486 	scd_bc_tbl = sc->sched_dma.vaddr;
4487 
4488 	len += 8; /* magic numbers came naturally from paris */
4489 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4490 		len = roundup(len, 4) / 4;
4491 
4492 	w_val = htole16(sta_id << 12 | len);
4493 
4494 	/* Update TX scheduler. */
4495 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4496 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4497 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4498 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4499 
4500 	/* I really wonder what this is ?!? */
4501 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4502 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4503 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4504 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4505 		    (char *)(void *)sc->sched_dma.vaddr,
4506 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4507 	}
4508 }
4509 #endif
4510 
4511 /*
4512  * Fill in various bit for management frames, and leave them
4513  * unfilled for data frames (firmware takes care of that).
4514  * Return the selected TX rate.
4515  */
4516 static const struct iwm_rate *
4517 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4518     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4519 {
4520 	struct ieee80211com *ic = &sc->sc_ic;
4521 	struct ieee80211_node *ni = &in->in_ni;
4522 	const struct iwm_rate *rinfo;
4523 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4524 	int ridx, rate_flags, i, ind;
4525 	int nrates = ni->ni_rates.rs_nrates;
4526 
4527 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4528 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4529 
4530 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4531 	    type != IEEE80211_FC0_TYPE_DATA) {
4532 		/* for non-data, use the lowest supported rate */
4533 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4534 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4535 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4536 #ifndef IEEE80211_NO_HT
4537 	} else if (ic->ic_fixed_mcs != -1) {
4538 		ridx = sc->sc_fixed_ridx;
4539 #endif
4540 	} else if (ic->ic_fixed_rate != -1) {
4541 		ridx = sc->sc_fixed_ridx;
4542 	} else {
4543 		/* for data frames, use RS table */
4544 		tx->initial_rate_index = 0;
4545 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4546 		DPRINTFN(12, ("start with txrate %d\n",
4547 		    tx->initial_rate_index));
4548 #ifndef IEEE80211_NO_HT
4549 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4550 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4551 			return &iwm_rates[ridx];
4552 		}
4553 #endif
4554 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4555 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4556 		for (i = 0; i < nrates; i++) {
4557 			if (iwm_rates[i].rate == (ni->ni_txrate &
4558 			    IEEE80211_RATE_VAL)) {
4559 				ridx = i;
4560 				break;
4561 			}
4562 		}
4563 		return &iwm_rates[ridx];
4564 	}
4565 
4566 	rinfo = &iwm_rates[ridx];
4567 	for (i = 0, ind = sc->sc_mgmt_last_antenna;
4568 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4569 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4570 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4571 			sc->sc_mgmt_last_antenna = ind;
4572 			break;
4573 		}
4574 	}
4575 	rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4576 	if (IWM_RIDX_IS_CCK(ridx))
4577 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4578 #ifndef IEEE80211_NO_HT
4579 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4580 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4581 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4582 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4583 	} else
4584 #endif
4585 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4586 
4587 	return rinfo;
4588 }
4589 
4590 #define TB0_SIZE 16
4591 static int
4592 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4593 {
4594 	struct ieee80211com *ic = &sc->sc_ic;
4595 	struct iwm_node *in = (struct iwm_node *)ni;
4596 	struct iwm_tx_ring *ring;
4597 	struct iwm_tx_data *data;
4598 	struct iwm_tfd *desc;
4599 	struct iwm_device_cmd *cmd;
4600 	struct iwm_tx_cmd *tx;
4601 	struct ieee80211_frame *wh;
4602 	struct ieee80211_key *k = NULL;
4603 	struct mbuf *m1;
4604 	const struct iwm_rate *rinfo;
4605 	uint32_t flags;
4606 	u_int hdrlen;
4607 	bus_dma_segment_t *seg;
4608 	uint8_t tid, type;
4609 	int i, totlen, err, pad;
4610 
4611 	wh = mtod(m, struct ieee80211_frame *);
4612 	hdrlen = ieee80211_anyhdrsize(wh);
4613 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4614 
4615 	tid = 0;
4616 
4617 	ring = &sc->txq[ac];
4618 	desc = &ring->desc[ring->cur];
4619 	memset(desc, 0, sizeof(*desc));
4620 	data = &ring->data[ring->cur];
4621 
4622 	cmd = &ring->cmd[ring->cur];
4623 	cmd->hdr.code = IWM_TX_CMD;
4624 	cmd->hdr.flags = 0;
4625 	cmd->hdr.qid = ring->qid;
4626 	cmd->hdr.idx = ring->cur;
4627 
4628 	tx = (void *)cmd->data;
4629 	memset(tx, 0, sizeof(*tx));
4630 
4631 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4632 
4633 	if (__predict_false(sc->sc_drvbpf != NULL)) {
4634 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4635 
4636 		tap->wt_flags = 0;
4637 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4638 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4639 #ifndef IEEE80211_NO_HT
4640 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4641 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4642 		    type == IEEE80211_FC0_TYPE_DATA &&
4643 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
4644 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4645 		} else
4646 #endif
4647 			tap->wt_rate = rinfo->rate;
4648 		tap->wt_hwqueue = ac;
4649 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4650 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4651 
4652 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT);
4653 	}
4654 
4655 	/* Encrypt the frame if need be. */
4656 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4657 		k = ieee80211_crypto_encap(ic, ni, m);
4658 		if (k == NULL) {
4659 			m_freem(m);
4660 			return ENOBUFS;
4661 		}
4662 		/* Packet header may have moved, reset our local pointer. */
4663 		wh = mtod(m, struct ieee80211_frame *);
4664 	}
4665 	totlen = m->m_pkthdr.len;
4666 
4667 	flags = 0;
4668 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4669 		flags |= IWM_TX_CMD_FLG_ACK;
4670 	}
4671 
4672 	if (type == IEEE80211_FC0_TYPE_DATA &&
4673 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4674 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4675 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
4676 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4677 
4678 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4679 	    type != IEEE80211_FC0_TYPE_DATA)
4680 		tx->sta_id = IWM_AUX_STA_ID;
4681 	else
4682 		tx->sta_id = IWM_STATION_ID;
4683 
4684 	if (type == IEEE80211_FC0_TYPE_MGT) {
4685 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4686 
4687 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4688 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4689 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4690 		else
4691 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4692 	} else {
4693 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4694 	}
4695 
4696 	if (hdrlen & 3) {
4697 		/* First segment length must be a multiple of 4. */
4698 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4699 		pad = 4 - (hdrlen & 3);
4700 	} else
4701 		pad = 0;
4702 
4703 	tx->driver_txop = 0;
4704 	tx->next_frame_len = 0;
4705 
4706 	tx->len = htole16(totlen);
4707 	tx->tid_tspec = tid;
4708 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4709 
4710 	/* Set physical address of "scratch area". */
4711 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4712 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4713 
4714 	/* Copy 802.11 header in TX command. */
4715 	memcpy(tx + 1, wh, hdrlen);
4716 
4717 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4718 
4719 	tx->sec_ctl = 0;
4720 	tx->tx_flags |= htole32(flags);
4721 
4722 	/* Trim 802.11 header. */
4723 	m_adj(m, hdrlen);
4724 
4725 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4726 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4727 	if (err) {
4728 		if (err != EFBIG) {
4729 			aprint_error_dev(sc->sc_dev,
4730 			    "can't map mbuf (error %d)\n", err);
4731 			m_freem(m);
4732 			return err;
4733 		}
4734 		/* Too many DMA segments, linearize mbuf. */
4735 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
4736 		if (m1 == NULL) {
4737 			m_freem(m);
4738 			return ENOBUFS;
4739 		}
4740 		MCLAIM(m1, &sc->sc_ec.ec_rx_mowner);
4741 		if (m->m_pkthdr.len > MHLEN) {
4742 			MCLGET(m1, M_DONTWAIT);
4743 			if (!(m1->m_flags & M_EXT)) {
4744 				m_freem(m);
4745 				m_freem(m1);
4746 				return ENOBUFS;
4747 			}
4748 		}
4749 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4750 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4751 		m_freem(m);
4752 		m = m1;
4753 
4754 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4755 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4756 		if (err) {
4757 			aprint_error_dev(sc->sc_dev,
4758 			    "can't map mbuf (error %d)\n", err);
4759 			m_freem(m);
4760 			return err;
4761 		}
4762 	}
4763 	data->m = m;
4764 	data->in = in;
4765 	data->done = 0;
4766 
4767 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4768 	KASSERT(data->in != NULL);
4769 
4770 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4771 	    "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4772 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4773 	    (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4774 	    le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4775 	    le32toh(tx->rate_n_flags)));
4776 
4777 	/* Fill TX descriptor. */
4778 	desc->num_tbs = 2 + data->map->dm_nsegs;
4779 
4780 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4781 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4782 	    (TB0_SIZE << 4);
4783 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4784 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4785 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4786 	      + hdrlen + pad - TB0_SIZE) << 4);
4787 
4788 	/* Other DMA segments are for data payload. */
4789 	seg = data->map->dm_segs;
4790 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4791 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4792 		desc->tbs[i+2].hi_n_len =
4793 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4794 		    | ((seg->ds_len) << 4);
4795 	}
4796 
4797 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, m->m_pkthdr.len,
4798 	    BUS_DMASYNC_PREWRITE);
4799 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4800 	    (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4801 	    BUS_DMASYNC_PREWRITE);
4802 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4803 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4804 	    BUS_DMASYNC_PREWRITE);
4805 
4806 #if 0
4807 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4808 	    le16toh(tx->len));
4809 #endif
4810 
4811 	/* Kick TX ring. */
4812 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4813 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4814 
4815 	/* Mark TX ring as full if we reach a certain threshold. */
4816 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4817 		sc->qfullmsk |= 1 << ring->qid;
4818 	}
4819 
4820 	return 0;
4821 }
4822 
4823 #if 0
4824 /* not necessary? */
4825 static int
4826 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4827 {
4828 	struct iwm_tx_path_flush_cmd flush_cmd = {
4829 		.queues_ctl = htole32(tfd_msk),
4830 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4831 	};
4832 	int err;
4833 
4834 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4835 	    sizeof(flush_cmd), &flush_cmd);
4836 	if (err)
4837 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4838 		    err);
4839 	return err;
4840 }
4841 #endif
4842 
4843 static void
4844 iwm_led_enable(struct iwm_softc *sc)
4845 {
4846 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4847 }
4848 
4849 static void
4850 iwm_led_disable(struct iwm_softc *sc)
4851 {
4852 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4853 }
4854 
4855 static int
4856 iwm_led_is_enabled(struct iwm_softc *sc)
4857 {
4858 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4859 }
4860 
4861 static void
4862 iwm_led_blink_timeout(void *arg)
4863 {
4864 	struct iwm_softc *sc = arg;
4865 
4866 	if (iwm_led_is_enabled(sc))
4867 		iwm_led_disable(sc);
4868 	else
4869 		iwm_led_enable(sc);
4870 
4871 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4872 }
4873 
4874 static void
4875 iwm_led_blink_start(struct iwm_softc *sc)
4876 {
4877 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4878 }
4879 
4880 static void
4881 iwm_led_blink_stop(struct iwm_softc *sc)
4882 {
4883 	callout_stop(&sc->sc_led_blink_to);
4884 	iwm_led_disable(sc);
4885 }
4886 
4887 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4888 
4889 static int
4890 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4891     struct iwm_beacon_filter_cmd *cmd)
4892 {
4893 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4894 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4895 }
4896 
4897 static void
4898 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4899     struct iwm_beacon_filter_cmd *cmd)
4900 {
4901 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4902 }
4903 
4904 static int
4905 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4906 {
4907 	struct iwm_beacon_filter_cmd cmd = {
4908 		IWM_BF_CMD_CONFIG_DEFAULTS,
4909 		.bf_enable_beacon_filter = htole32(1),
4910 		.ba_enable_beacon_abort = htole32(enable),
4911 	};
4912 
4913 	if (!sc->sc_bf.bf_enabled)
4914 		return 0;
4915 
4916 	sc->sc_bf.ba_enabled = enable;
4917 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4918 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4919 }
4920 
4921 static void
4922 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4923     struct iwm_mac_power_cmd *cmd)
4924 {
4925 	struct ieee80211_node *ni = &in->in_ni;
4926 	int dtim_period, dtim_msec, keep_alive;
4927 
4928 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4929 	    in->in_color));
4930 	if (ni->ni_dtim_period)
4931 		dtim_period = ni->ni_dtim_period;
4932 	else
4933 		dtim_period = 1;
4934 
4935 	/*
4936 	 * Regardless of power management state the driver must set
4937 	 * keep alive period. FW will use it for sending keep alive NDPs
4938 	 * immediately after association. Check that keep alive period
4939 	 * is at least 3 * DTIM.
4940 	 */
4941 	dtim_msec = dtim_period * ni->ni_intval;
4942 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4943 	keep_alive = roundup(keep_alive, 1000) / 1000;
4944 	cmd->keep_alive_seconds = htole16(keep_alive);
4945 
4946 #ifdef notyet
4947 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4948 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4949 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4950 #endif
4951 }
4952 
4953 static int
4954 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4955 {
4956 	int err;
4957 	int ba_enable;
4958 	struct iwm_mac_power_cmd cmd;
4959 
4960 	memset(&cmd, 0, sizeof(cmd));
4961 
4962 	iwm_power_build_cmd(sc, in, &cmd);
4963 
4964 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4965 	    sizeof(cmd), &cmd);
4966 	if (err)
4967 		return err;
4968 
4969 	ba_enable = !!(cmd.flags &
4970 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4971 	return iwm_update_beacon_abort(sc, in, ba_enable);
4972 }
4973 
4974 static int
4975 iwm_power_update_device(struct iwm_softc *sc)
4976 {
4977 	struct iwm_device_power_cmd cmd = {
4978 #ifdef notyet
4979 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4980 #else
4981 		.flags = 0,
4982 #endif
4983 	};
4984 
4985 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4986 		return 0;
4987 
4988 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4989 	DPRINTF(("Sending device power command with flags = 0x%X\n",
4990 	    cmd.flags));
4991 
4992 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4993 }
4994 
4995 #ifdef notyet
4996 static int
4997 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4998 {
4999 	struct iwm_beacon_filter_cmd cmd = {
5000 		IWM_BF_CMD_CONFIG_DEFAULTS,
5001 		.bf_enable_beacon_filter = htole32(1),
5002 	};
5003 	int err;
5004 
5005 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5006 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5007 
5008 	if (err == 0)
5009 		sc->sc_bf.bf_enabled = 1;
5010 
5011 	return err;
5012 }
5013 #endif
5014 
5015 static int
5016 iwm_disable_beacon_filter(struct iwm_softc *sc)
5017 {
5018 	struct iwm_beacon_filter_cmd cmd;
5019 	int err;
5020 
5021 	memset(&cmd, 0, sizeof(cmd));
5022 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5023 		return 0;
5024 
5025 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5026 	if (err == 0)
5027 		sc->sc_bf.bf_enabled = 0;
5028 
5029 	return err;
5030 }
5031 
5032 static int
5033 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5034 {
5035 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
5036 	int err;
5037 	uint32_t status;
5038 
5039 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5040 
5041 	add_sta_cmd.sta_id = IWM_STATION_ID;
5042 	add_sta_cmd.mac_id_n_color
5043 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5044 	if (!update) {
5045 		int ac;
5046 		for (ac = 0; ac < WME_NUM_AC; ac++) {
5047 			add_sta_cmd.tfd_queue_msk |=
5048 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5049 		}
5050 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5051 	}
5052 	add_sta_cmd.add_modify = update ? 1 : 0;
5053 	add_sta_cmd.station_flags_msk
5054 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5055 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5056 	if (update)
5057 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5058 
5059 #ifndef IEEE80211_NO_HT
5060 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5061 		add_sta_cmd.station_flags_msk
5062 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5063 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5064 
5065 		add_sta_cmd.station_flags
5066 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5067 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5068 		case IEEE80211_AMPDU_PARAM_SS_2:
5069 			add_sta_cmd.station_flags
5070 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5071 			break;
5072 		case IEEE80211_AMPDU_PARAM_SS_4:
5073 			add_sta_cmd.station_flags
5074 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5075 			break;
5076 		case IEEE80211_AMPDU_PARAM_SS_8:
5077 			add_sta_cmd.station_flags
5078 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5079 			break;
5080 		case IEEE80211_AMPDU_PARAM_SS_16:
5081 			add_sta_cmd.station_flags
5082 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5083 			break;
5084 		default:
5085 			break;
5086 		}
5087 	}
5088 #endif
5089 
5090 	status = IWM_ADD_STA_SUCCESS;
5091 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5092 	    &add_sta_cmd, &status);
5093 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5094 		err = EIO;
5095 
5096 	return err;
5097 }
5098 
5099 static int
5100 iwm_add_aux_sta(struct iwm_softc *sc)
5101 {
5102 	struct iwm_add_sta_cmd_v7 cmd;
5103 	int err;
5104 	uint32_t status;
5105 
5106 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5107 	if (err)
5108 		return err;
5109 
5110 	memset(&cmd, 0, sizeof(cmd));
5111 	cmd.sta_id = IWM_AUX_STA_ID;
5112 	cmd.mac_id_n_color =
5113 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5114 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5115 	cmd.tid_disable_tx = htole16(0xffff);
5116 
5117 	status = IWM_ADD_STA_SUCCESS;
5118 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5119 	    &status);
5120 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5121 		err = EIO;
5122 
5123 	return err;
5124 }
5125 
5126 #define IWM_PLCP_QUIET_THRESH 1
5127 #define IWM_ACTIVE_QUIET_TIME 10
5128 #define LONG_OUT_TIME_PERIOD 600
5129 #define SHORT_OUT_TIME_PERIOD 200
5130 #define SUSPEND_TIME_PERIOD 100
5131 
5132 static uint16_t
5133 iwm_scan_rx_chain(struct iwm_softc *sc)
5134 {
5135 	uint16_t rx_chain;
5136 	uint8_t rx_ant;
5137 
5138 	rx_ant = iwm_fw_valid_rx_ant(sc);
5139 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5140 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5141 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5142 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5143 	return htole16(rx_chain);
5144 }
5145 
5146 static uint32_t
5147 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5148 {
5149 	uint32_t tx_ant;
5150 	int i, ind;
5151 
5152 	for (i = 0, ind = sc->sc_scan_last_antenna;
5153 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5154 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5155 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5156 			sc->sc_scan_last_antenna = ind;
5157 			break;
5158 		}
5159 	}
5160 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5161 
5162 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5163 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5164 				   tx_ant);
5165 	else
5166 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5167 }
5168 
5169 #ifdef notyet
5170 /*
5171  * If req->n_ssids > 0, it means we should do an active scan.
5172  * In case of active scan w/o directed scan, we receive a zero-length SSID
5173  * just to notify that this scan is active and not passive.
5174  * In order to notify the FW of the number of SSIDs we wish to scan (including
5175  * the zero-length one), we need to set the corresponding bits in chan->type,
5176  * one for each SSID, and set the active bit (first). If the first SSID is
5177  * already included in the probe template, so we need to set only
5178  * req->n_ssids - 1 bits in addition to the first bit.
5179  */
5180 static uint16_t
5181 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5182 {
5183 	if (flags & IEEE80211_CHAN_2GHZ)
5184 		return 30  + 3 * (n_ssids + 1);
5185 	return 20  + 2 * (n_ssids + 1);
5186 }
5187 
5188 static uint16_t
5189 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5190 {
5191 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5192 }
5193 #endif
5194 
5195 static uint8_t
5196 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5197     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5198 {
5199 	struct ieee80211com *ic = &sc->sc_ic;
5200 	struct ieee80211_channel *c;
5201 	uint8_t nchan;
5202 
5203 	for (nchan = 0, c = &ic->ic_channels[1];
5204 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5205 	    nchan < sc->sc_capa_n_scan_channels;
5206 	    c++) {
5207 		if (c->ic_flags == 0)
5208 			continue;
5209 
5210 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5211 		chan->iter_count = htole16(1);
5212 		chan->iter_interval = htole32(0);
5213 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5214 		chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5215 		if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5216 			chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5217 		chan++;
5218 		nchan++;
5219 	}
5220 
5221 	return nchan;
5222 }
5223 
5224 static uint8_t
5225 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5226     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5227 {
5228 	struct ieee80211com *ic = &sc->sc_ic;
5229 	struct ieee80211_channel *c;
5230 	uint8_t nchan;
5231 
5232 	for (nchan = 0, c = &ic->ic_channels[1];
5233 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5234 	    nchan < sc->sc_capa_n_scan_channels;
5235 	    c++) {
5236 		if (c->ic_flags == 0)
5237 			continue;
5238 
5239 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5240 		chan->iter_count = 1;
5241 		chan->iter_interval = htole16(0);
5242 		chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5243 		chan++;
5244 		nchan++;
5245 	}
5246 
5247 	return nchan;
5248 }
5249 
5250 static int
5251 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5252 {
5253 	struct ieee80211com *ic = &sc->sc_ic;
5254 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5255 	struct ieee80211_rateset *rs;
5256 	size_t remain = sizeof(preq->buf);
5257 	uint8_t *frm, *pos;
5258 
5259 	memset(preq, 0, sizeof(*preq));
5260 
5261 	KASSERT(ic->ic_des_esslen < sizeof(ic->ic_des_essid));
5262 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5263 		return ENOBUFS;
5264 
5265 	/*
5266 	 * Build a probe request frame.  Most of the following code is a
5267 	 * copy & paste of what is done in net80211.
5268 	 */
5269 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5270 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5271 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5272 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5273 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5274 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5275 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5276 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5277 
5278 	frm = (uint8_t *)(wh + 1);
5279 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5280 
5281 	/* Tell the firmware where the MAC header is. */
5282 	preq->mac_header.offset = 0;
5283 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5284 	remain -= frm - (uint8_t *)wh;
5285 
5286 	/* Fill in 2GHz IEs and tell firmware where they are. */
5287 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5288 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5289 		if (remain < 4 + rs->rs_nrates)
5290 			return ENOBUFS;
5291 	} else if (remain < 2 + rs->rs_nrates)
5292 		return ENOBUFS;
5293 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5294 	pos = frm;
5295 	frm = ieee80211_add_rates(frm, rs);
5296 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5297 		frm = ieee80211_add_xrates(frm, rs);
5298 	preq->band_data[0].len = htole16(frm - pos);
5299 	remain -= frm - pos;
5300 
5301 	if (isset(sc->sc_enabled_capa,
5302 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5303 		if (remain < 3)
5304 			return ENOBUFS;
5305 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5306 		*frm++ = 1;
5307 		*frm++ = 0;
5308 		remain -= 3;
5309 	}
5310 
5311 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5312 		/* Fill in 5GHz IEs. */
5313 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5314 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5315 			if (remain < 4 + rs->rs_nrates)
5316 				return ENOBUFS;
5317 		} else if (remain < 2 + rs->rs_nrates)
5318 			return ENOBUFS;
5319 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5320 		pos = frm;
5321 		frm = ieee80211_add_rates(frm, rs);
5322 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5323 			frm = ieee80211_add_xrates(frm, rs);
5324 		preq->band_data[1].len = htole16(frm - pos);
5325 		remain -= frm - pos;
5326 	}
5327 
5328 #ifndef IEEE80211_NO_HT
5329 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5330 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5331 	pos = frm;
5332 	if (ic->ic_flags & IEEE80211_F_HTON) {
5333 		if (remain < 28)
5334 			return ENOBUFS;
5335 		frm = ieee80211_add_htcaps(frm, ic);
5336 		/* XXX add WME info? */
5337 	}
5338 #endif
5339 
5340 	preq->common_data.len = htole16(frm - pos);
5341 
5342 	return 0;
5343 }
5344 
5345 static int
5346 iwm_lmac_scan(struct iwm_softc *sc)
5347 {
5348 	struct ieee80211com *ic = &sc->sc_ic;
5349 	struct iwm_host_cmd hcmd = {
5350 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5351 		.len = { 0, },
5352 		.data = { NULL, },
5353 		.flags = 0,
5354 	};
5355 	struct iwm_scan_req_lmac *req;
5356 	size_t req_len;
5357 	int err;
5358 
5359 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5360 
5361 	req_len = sizeof(struct iwm_scan_req_lmac) +
5362 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5363 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5364 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5365 		return ENOMEM;
5366 	req = kmem_zalloc(req_len, KM_SLEEP);
5367 	hcmd.len[0] = (uint16_t)req_len;
5368 	hcmd.data[0] = (void *)req;
5369 
5370 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5371 	req->active_dwell = 10;
5372 	req->passive_dwell = 110;
5373 	req->fragmented_dwell = 44;
5374 	req->extended_dwell = 90;
5375 	req->max_out_time = 0;
5376 	req->suspend_time = 0;
5377 
5378 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5379 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5380 	req->iter_num = htole32(1);
5381 	req->delay = 0;
5382 
5383 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5384 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5385 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5386 	if (ic->ic_des_esslen == 0)
5387 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5388 	else
5389 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5390 	if (isset(sc->sc_enabled_capa,
5391 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5392 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5393 
5394 	req->flags = htole32(IWM_PHY_BAND_24);
5395 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5396 		req->flags |= htole32(IWM_PHY_BAND_5);
5397 	req->filter_flags =
5398 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5399 
5400 	/* Tx flags 2 GHz. */
5401 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5402 	    IWM_TX_CMD_FLG_BT_DIS);
5403 	req->tx_cmd[0].rate_n_flags =
5404 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5405 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5406 
5407 	/* Tx flags 5 GHz. */
5408 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5409 	    IWM_TX_CMD_FLG_BT_DIS);
5410 	req->tx_cmd[1].rate_n_flags =
5411 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5412 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5413 
5414 	/* Check if we're doing an active directed scan. */
5415 	if (ic->ic_des_esslen != 0) {
5416 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5417 		req->direct_scan[0].len = ic->ic_des_esslen;
5418 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5419 		    ic->ic_des_esslen);
5420 	}
5421 
5422 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5423 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5424 	    ic->ic_des_esslen != 0);
5425 
5426 	err = iwm_fill_probe_req(sc,
5427 	    (struct iwm_scan_probe_req *)(req->data +
5428 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5429 	     sc->sc_capa_n_scan_channels)));
5430 	if (err) {
5431 		kmem_free(req, req_len);
5432 		return err;
5433 	}
5434 
5435 	/* Specify the scan plan: We'll do one iteration. */
5436 	req->schedule[0].iterations = 1;
5437 	req->schedule[0].full_scan_mul = 1;
5438 
5439 	/* Disable EBS. */
5440 	req->channel_opt[0].non_ebs_ratio = 1;
5441 	req->channel_opt[1].non_ebs_ratio = 1;
5442 
5443 	err = iwm_send_cmd(sc, &hcmd);
5444 	kmem_free(req, req_len);
5445 	return err;
5446 }
5447 
5448 static int
5449 iwm_config_umac_scan(struct iwm_softc *sc)
5450 {
5451 	struct ieee80211com *ic = &sc->sc_ic;
5452 	struct iwm_scan_config *scan_config;
5453 	int err, nchan;
5454 	size_t cmd_size;
5455 	struct ieee80211_channel *c;
5456 	struct iwm_host_cmd hcmd = {
5457 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5458 		.flags = 0,
5459 	};
5460 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5461 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5462 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5463 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5464 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5465 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5466 	    IWM_SCAN_CONFIG_RATE_54M);
5467 
5468 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5469 
5470 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5471 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5472 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5473 	scan_config->legacy_rates = htole32(rates |
5474 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5475 
5476 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5477 	scan_config->dwell_active = 10;
5478 	scan_config->dwell_passive = 110;
5479 	scan_config->dwell_fragmented = 44;
5480 	scan_config->dwell_extended = 90;
5481 	scan_config->out_of_channel_time = htole32(0);
5482 	scan_config->suspend_time = htole32(0);
5483 
5484 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5485 
5486 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5487 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5488 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5489 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5490 
5491 	for (c = &ic->ic_channels[1], nchan = 0;
5492 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5493 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5494 		if (c->ic_flags == 0)
5495 			continue;
5496 		scan_config->channel_array[nchan++] =
5497 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5498 	}
5499 
5500 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5501 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5502 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5503 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5504 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5505 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5506 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5507 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5508 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5509 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5510 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5511 
5512 	hcmd.data[0] = scan_config;
5513 	hcmd.len[0] = cmd_size;
5514 
5515 	err = iwm_send_cmd(sc, &hcmd);
5516 	kmem_free(scan_config, cmd_size);
5517 	return err;
5518 }
5519 
5520 static int
5521 iwm_umac_scan(struct iwm_softc *sc)
5522 {
5523 	struct ieee80211com *ic = &sc->sc_ic;
5524 	struct iwm_host_cmd hcmd = {
5525 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5526 		.len = { 0, },
5527 		.data = { NULL, },
5528 		.flags = 0,
5529 	};
5530 	struct iwm_scan_req_umac *req;
5531 	struct iwm_scan_req_umac_tail *tail;
5532 	size_t req_len;
5533 	int err;
5534 
5535 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5536 
5537 	req_len = sizeof(struct iwm_scan_req_umac) +
5538 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5539 	    sc->sc_capa_n_scan_channels) +
5540 	    sizeof(struct iwm_scan_req_umac_tail);
5541 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5542 		return ENOMEM;
5543 	req = kmem_zalloc(req_len, KM_SLEEP);
5544 
5545 	hcmd.len[0] = (uint16_t)req_len;
5546 	hcmd.data[0] = (void *)req;
5547 
5548 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5549 	req->active_dwell = 10;
5550 	req->passive_dwell = 110;
5551 	req->fragmented_dwell = 44;
5552 	req->extended_dwell = 90;
5553 	req->max_out_time = 0;
5554 	req->suspend_time = 0;
5555 
5556 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5557 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5558 
5559 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5560 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5561 	    ic->ic_des_esslen != 0);
5562 
5563 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5564 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5565 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5566 
5567 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
5568 		sizeof(struct iwm_scan_channel_cfg_umac) *
5569 			sc->sc_capa_n_scan_channels);
5570 
5571 	/* Check if we're doing an active directed scan. */
5572 	if (ic->ic_des_esslen != 0) {
5573 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5574 		tail->direct_scan[0].len = ic->ic_des_esslen;
5575 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5576 		    ic->ic_des_esslen);
5577 		req->general_flags |=
5578 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5579 	} else
5580 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5581 
5582 	if (isset(sc->sc_enabled_capa,
5583 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5584 		req->general_flags |=
5585 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5586 
5587 	err = iwm_fill_probe_req(sc, &tail->preq);
5588 	if (err) {
5589 		kmem_free(req, req_len);
5590 		return err;
5591 	}
5592 
5593 	/* Specify the scan plan: We'll do one iteration. */
5594 	tail->schedule[0].interval = 0;
5595 	tail->schedule[0].iter_count = 1;
5596 
5597 	err = iwm_send_cmd(sc, &hcmd);
5598 	kmem_free(req, req_len);
5599 	return err;
5600 }
5601 
5602 static uint8_t
5603 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5604 {
5605 	int i;
5606 	uint8_t rval;
5607 
5608 	for (i = 0; i < rs->rs_nrates; i++) {
5609 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5610 		if (rval == iwm_rates[ridx].rate)
5611 			return rs->rs_rates[i];
5612 	}
5613 	return 0;
5614 }
5615 
5616 static void
5617 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5618     int *ofdm_rates)
5619 {
5620 	struct ieee80211_node *ni = &in->in_ni;
5621 	struct ieee80211_rateset *rs = &ni->ni_rates;
5622 	int lowest_present_ofdm = -1;
5623 	int lowest_present_cck = -1;
5624 	uint8_t cck = 0;
5625 	uint8_t ofdm = 0;
5626 	int i;
5627 
5628 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5629 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5630 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5631 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5632 				continue;
5633 			cck |= (1 << i);
5634 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5635 				lowest_present_cck = i;
5636 		}
5637 	}
5638 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5639 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5640 			continue;
5641 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5642 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5643 			lowest_present_ofdm = i;
5644 	}
5645 
5646 	/*
5647 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5648 	 * variables. This isn't sufficient though, as there might not
5649 	 * be all the right rates in the bitmap. E.g. if the only basic
5650 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5651 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5652 	 *
5653 	 *    [...] a STA responding to a received frame shall transmit
5654 	 *    its Control Response frame [...] at the highest rate in the
5655 	 *    BSSBasicRateSet parameter that is less than or equal to the
5656 	 *    rate of the immediately previous frame in the frame exchange
5657 	 *    sequence ([...]) and that is of the same modulation class
5658 	 *    ([...]) as the received frame. If no rate contained in the
5659 	 *    BSSBasicRateSet parameter meets these conditions, then the
5660 	 *    control frame sent in response to a received frame shall be
5661 	 *    transmitted at the highest mandatory rate of the PHY that is
5662 	 *    less than or equal to the rate of the received frame, and
5663 	 *    that is of the same modulation class as the received frame.
5664 	 *
5665 	 * As a consequence, we need to add all mandatory rates that are
5666 	 * lower than all of the basic rates to these bitmaps.
5667 	 */
5668 
5669 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5670 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5671 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5672 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5673 	/* 6M already there or needed so always add */
5674 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5675 
5676 	/*
5677 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5678 	 * Note, however:
5679 	 *  - if no CCK rates are basic, it must be ERP since there must
5680 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5681 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5682 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5683 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5684 	 *  - if 2M is basic, 1M is mandatory
5685 	 *  - if 1M is basic, that's the only valid ACK rate.
5686 	 * As a consequence, it's not as complicated as it sounds, just add
5687 	 * any lower rates to the ACK rate bitmap.
5688 	 */
5689 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5690 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5691 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5692 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5693 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5694 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5695 	/* 1M already there or needed so always add */
5696 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5697 
5698 	*cck_rates = cck;
5699 	*ofdm_rates = ofdm;
5700 }
5701 
5702 static void
5703 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5704     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5705 {
5706 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5707 	struct ieee80211com *ic = &sc->sc_ic;
5708 	struct ieee80211_node *ni = ic->ic_bss;
5709 	int cck_ack_rates, ofdm_ack_rates;
5710 	int i;
5711 
5712 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5713 	    in->in_color));
5714 	cmd->action = htole32(action);
5715 
5716 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5717 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5718 
5719 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5720 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5721 
5722 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5723 	cmd->cck_rates = htole32(cck_ack_rates);
5724 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5725 
5726 	cmd->cck_short_preamble
5727 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5728 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5729 	cmd->short_slot
5730 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5731 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5732 
5733 	for (i = 0; i < WME_NUM_AC; i++) {
5734 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5735 		int txf = iwm_ac_to_tx_fifo[i];
5736 
5737 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5738 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5739 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5740 		cmd->ac[txf].fifos_mask = (1 << txf);
5741 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5742 	}
5743 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5744 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5745 
5746 #ifndef IEEE80211_NO_HT
5747 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5748 		enum ieee80211_htprot htprot =
5749 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5750 		switch (htprot) {
5751 		case IEEE80211_HTPROT_NONE:
5752 			break;
5753 		case IEEE80211_HTPROT_NONMEMBER:
5754 		case IEEE80211_HTPROT_NONHT_MIXED:
5755 			cmd->protection_flags |=
5756 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5757 		case IEEE80211_HTPROT_20MHZ:
5758 			cmd->protection_flags |=
5759 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5760 			    IWM_MAC_PROT_FLG_FAT_PROT);
5761 			break;
5762 		default:
5763 			break;
5764 		}
5765 
5766 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5767 	}
5768 #endif
5769 
5770 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5771 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5772 
5773 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5774 #undef IWM_EXP2
5775 }
5776 
5777 static void
5778 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5779     struct iwm_mac_data_sta *sta, int assoc)
5780 {
5781 	struct ieee80211_node *ni = &in->in_ni;
5782 	uint32_t dtim_off;
5783 	uint64_t tsf;
5784 
5785 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5786 	tsf = le64toh(ni->ni_tstamp.tsf);
5787 
5788 	sta->is_assoc = htole32(assoc);
5789 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5790 	sta->dtim_tsf = htole64(tsf + dtim_off);
5791 	sta->bi = htole32(ni->ni_intval);
5792 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5793 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5794 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5795 	sta->listen_interval = htole32(10);
5796 	sta->assoc_id = htole32(ni->ni_associd);
5797 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5798 }
5799 
5800 static int
5801 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5802     int assoc)
5803 {
5804 	struct ieee80211_node *ni = &in->in_ni;
5805 	struct iwm_mac_ctx_cmd cmd;
5806 
5807 	memset(&cmd, 0, sizeof(cmd));
5808 
5809 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5810 
5811 	/* Allow beacons to pass through as long as we are not associated or we
5812 	 * do not have dtim period information */
5813 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5814 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5815 	else
5816 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5817 
5818 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5819 }
5820 
5821 #define IWM_MISSED_BEACONS_THRESHOLD 8
5822 
5823 static void
5824 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5825 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5826 {
5827 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5828 	int s;
5829 
5830 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5831 	    le32toh(mb->mac_id),
5832 	    le32toh(mb->consec_missed_beacons),
5833 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5834 	    le32toh(mb->num_recvd_beacons),
5835 	    le32toh(mb->num_expected_beacons)));
5836 
5837 	/*
5838 	 * TODO: the threshold should be adjusted based on latency conditions,
5839 	 * and/or in case of a CS flow on one of the other AP vifs.
5840 	 */
5841 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5842 	    IWM_MISSED_BEACONS_THRESHOLD) {
5843 		s = splnet();
5844 		ieee80211_beacon_miss(&sc->sc_ic);
5845 		splx(s);
5846 	}
5847 }
5848 
5849 static int
5850 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5851 {
5852 	struct iwm_time_quota_cmd cmd;
5853 	int i, idx, num_active_macs, quota, quota_rem;
5854 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5855 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5856 	uint16_t id;
5857 
5858 	memset(&cmd, 0, sizeof(cmd));
5859 
5860 	/* currently, PHY ID == binding ID */
5861 	if (in) {
5862 		id = in->in_phyctxt->id;
5863 		KASSERT(id < IWM_MAX_BINDINGS);
5864 		colors[id] = in->in_phyctxt->color;
5865 
5866 		if (1)
5867 			n_ifs[id] = 1;
5868 	}
5869 
5870 	/*
5871 	 * The FW's scheduling session consists of
5872 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5873 	 * equally between all the bindings that require quota
5874 	 */
5875 	num_active_macs = 0;
5876 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5877 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5878 		num_active_macs += n_ifs[i];
5879 	}
5880 
5881 	quota = 0;
5882 	quota_rem = 0;
5883 	if (num_active_macs) {
5884 		quota = IWM_MAX_QUOTA / num_active_macs;
5885 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5886 	}
5887 
5888 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5889 		if (colors[i] < 0)
5890 			continue;
5891 
5892 		cmd.quotas[idx].id_and_color =
5893 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5894 
5895 		if (n_ifs[i] <= 0) {
5896 			cmd.quotas[idx].quota = htole32(0);
5897 			cmd.quotas[idx].max_duration = htole32(0);
5898 		} else {
5899 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5900 			cmd.quotas[idx].max_duration = htole32(0);
5901 		}
5902 		idx++;
5903 	}
5904 
5905 	/* Give the remainder of the session to the first binding */
5906 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5907 
5908 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5909 }
5910 
5911 static int
5912 iwm_auth(struct iwm_softc *sc)
5913 {
5914 	struct ieee80211com *ic = &sc->sc_ic;
5915 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5916 	uint32_t duration;
5917 	int err;
5918 
5919 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5920 	if (err)
5921 		return err;
5922 
5923 	err = iwm_allow_mcast(sc);
5924 	if (err)
5925 		return err;
5926 
5927 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5928 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5929 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5930 	if (err)
5931 		return err;
5932 	in->in_phyctxt = &sc->sc_phyctxt[0];
5933 
5934 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5935 	if (err) {
5936 		aprint_error_dev(sc->sc_dev,
5937 		    "could not add MAC context (error %d)\n", err);
5938 		return err;
5939 	}
5940 
5941 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5942 	if (err)
5943 		return err;
5944 
5945 	err = iwm_add_sta_cmd(sc, in, 0);
5946 	if (err)
5947 		return err;
5948 
5949 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5950 	if (err) {
5951 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5952 		return err;
5953 	}
5954 
5955 	/*
5956 	 * Prevent the FW from wandering off channel during association
5957 	 * by "protecting" the session with a time event.
5958 	 */
5959 	if (in->in_ni.ni_intval)
5960 		duration = in->in_ni.ni_intval * 2;
5961 	else
5962 		duration = IEEE80211_DUR_TU;
5963 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5964 	DELAY(100);
5965 
5966 	return 0;
5967 }
5968 
5969 static int
5970 iwm_assoc(struct iwm_softc *sc)
5971 {
5972 	struct ieee80211com *ic = &sc->sc_ic;
5973 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5974 	int err;
5975 
5976 	err = iwm_add_sta_cmd(sc, in, 1);
5977 	if (err)
5978 		return err;
5979 
5980 	return 0;
5981 }
5982 
5983 static struct ieee80211_node *
5984 iwm_node_alloc(struct ieee80211_node_table *nt)
5985 {
5986 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5987 }
5988 
5989 static void
5990 iwm_calib_timeout(void *arg)
5991 {
5992 	struct iwm_softc *sc = arg;
5993 	struct ieee80211com *ic = &sc->sc_ic;
5994 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5995 #ifndef IEEE80211_NO_HT
5996 	struct ieee80211_node *ni = &in->in_ni;
5997 	int otxrate;
5998 #endif
5999 	int s;
6000 
6001 	s = splnet();
6002 	if ((ic->ic_fixed_rate == -1
6003 #ifndef IEEE80211_NO_HT
6004 	    || ic->ic_fixed_mcs == -1
6005 #endif
6006 	    ) &&
6007 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
6008 #ifndef IEEE80211_NO_HT
6009 		if (ni->ni_flags & IEEE80211_NODE_HT)
6010 			otxrate = ni->ni_txmcs;
6011 		else
6012 			otxrate = ni->ni_txrate;
6013 #endif
6014 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6015 
6016 #ifndef IEEE80211_NO_HT
6017 		/*
6018 		 * If AMRR has chosen a new TX rate we must update
6019 		 * the firwmare's LQ rate table from process context.
6020 		 */
6021 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6022 		    otxrate != ni->ni_txmcs)
6023 			softint_schedule(sc->setrates_task);
6024 		else if (otxrate != ni->ni_txrate)
6025 			softint_schedule(sc->setrates_task);
6026 #endif
6027 	}
6028 	splx(s);
6029 
6030 	callout_schedule(&sc->sc_calib_to, mstohz(500));
6031 }
6032 
6033 #ifndef IEEE80211_NO_HT
6034 static void
6035 iwm_setrates_task(void *arg)
6036 {
6037 	struct iwm_softc *sc = arg;
6038 	struct ieee80211com *ic = &sc->sc_ic;
6039 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6040 
6041 	/* Update rates table based on new TX rate determined by AMRR. */
6042 	iwm_setrates(in);
6043 }
6044 
6045 static int
6046 iwm_setrates(struct iwm_node *in)
6047 {
6048 	struct ieee80211_node *ni = &in->in_ni;
6049 	struct ieee80211com *ic = ni->ni_ic;
6050 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6051 	struct iwm_lq_cmd *lq = &in->in_lq;
6052 	struct ieee80211_rateset *rs = &ni->ni_rates;
6053 	int i, j, ridx, ridx_min, tab = 0;
6054 #ifndef IEEE80211_NO_HT
6055 	int sgi_ok;
6056 #endif
6057 	struct iwm_host_cmd cmd = {
6058 		.id = IWM_LQ_CMD,
6059 		.len = { sizeof(in->in_lq), },
6060 	};
6061 
6062 	memset(lq, 0, sizeof(*lq));
6063 	lq->sta_id = IWM_STATION_ID;
6064 
6065 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6066 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6067 
6068 #ifndef IEEE80211_NO_HT
6069 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6070 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6071 #endif
6072 
6073 
6074 	/*
6075 	 * Fill the LQ rate selection table with legacy and/or HT rates
6076 	 * in descending order, i.e. with the node's current TX rate first.
6077 	 * In cases where throughput of an HT rate corresponds to a legacy
6078 	 * rate it makes no sense to add both. We rely on the fact that
6079 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
6080 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6081 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6082 	 */
6083 	j = 0;
6084 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6085 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6086 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6087 		if (j >= __arraycount(lq->rs_table))
6088 			break;
6089 		tab = 0;
6090 #ifndef IEEE80211_NO_HT
6091 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6092 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6093 			for (i = ni->ni_txmcs; i >= 0; i--) {
6094 				if (isclr(ni->ni_rxmcs, i))
6095 					continue;
6096 				if (ridx == iwm_mcs2ridx[i]) {
6097 					tab = iwm_rates[ridx].ht_plcp;
6098 					tab |= IWM_RATE_MCS_HT_MSK;
6099 					if (sgi_ok)
6100 						tab |= IWM_RATE_MCS_SGI_MSK;
6101 					break;
6102 				}
6103 			}
6104 		}
6105 #endif
6106 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6107 			for (i = ni->ni_txrate; i >= 0; i--) {
6108 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6109 				    IEEE80211_RATE_VAL)) {
6110 					tab = iwm_rates[ridx].plcp;
6111 					break;
6112 				}
6113 			}
6114 		}
6115 
6116 		if (tab == 0)
6117 			continue;
6118 
6119 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
6120 		if (IWM_RIDX_IS_CCK(ridx))
6121 			tab |= IWM_RATE_MCS_CCK_MSK;
6122 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
6123 		lq->rs_table[j++] = htole32(tab);
6124 	}
6125 
6126 	/* Fill the rest with the lowest possible rate */
6127 	i = j > 0 ? j - 1 : 0;
6128 	while (j < __arraycount(lq->rs_table))
6129 		lq->rs_table[j++] = lq->rs_table[i];
6130 
6131 	lq->single_stream_ant_msk = IWM_ANT_A;
6132 	lq->dual_stream_ant_msk = IWM_ANT_AB;
6133 
6134 	lq->agg_time_limit = htole16(4000);	/* 4ms */
6135 	lq->agg_disable_start_th = 3;
6136 #ifdef notyet
6137 	lq->agg_frame_cnt_limit = 0x3f;
6138 #else
6139 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6140 #endif
6141 
6142 	cmd.data[0] = &in->in_lq;
6143 	return iwm_send_cmd(sc, &cmd);
6144 }
6145 #endif
6146 
6147 static int
6148 iwm_media_change(struct ifnet *ifp)
6149 {
6150 	struct iwm_softc *sc = ifp->if_softc;
6151 	struct ieee80211com *ic = &sc->sc_ic;
6152 	uint8_t rate, ridx;
6153 	int err;
6154 
6155 	err = ieee80211_media_change(ifp);
6156 	if (err != ENETRESET)
6157 		return err;
6158 
6159 #ifndef IEEE80211_NO_HT
6160 	if (ic->ic_fixed_mcs != -1)
6161 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6162 	else
6163 #endif
6164 	if (ic->ic_fixed_rate != -1) {
6165 		rate = ic->ic_sup_rates[ic->ic_curmode].
6166 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6167 		/* Map 802.11 rate to HW rate index. */
6168 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6169 			if (iwm_rates[ridx].rate == rate)
6170 				break;
6171 		sc->sc_fixed_ridx = ridx;
6172 	}
6173 
6174 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6175 	    (IFF_UP | IFF_RUNNING)) {
6176 		iwm_stop(ifp, 0);
6177 		err = iwm_init(ifp);
6178 	}
6179 	return err;
6180 }
6181 
6182 static int
6183 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6184 {
6185 	struct ifnet *ifp = IC2IFP(ic);
6186 	struct iwm_softc *sc = ifp->if_softc;
6187 	enum ieee80211_state ostate = ic->ic_state;
6188 	struct iwm_node *in;
6189 	int err;
6190 
6191 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6192 	    ieee80211_state_name[nstate]));
6193 
6194 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6195 		iwm_led_blink_stop(sc);
6196 
6197 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
6198 		iwm_disable_beacon_filter(sc);
6199 
6200 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6201 	/* XXX Is there a way to switch states without a full reset? */
6202 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6203 		/*
6204 		 * Upon receiving a deauth frame from AP the net80211 stack
6205 		 * puts the driver into AUTH state. This will fail with this
6206 		 * driver so bring the FSM from RUN to SCAN in this case.
6207 		 */
6208 		if (nstate != IEEE80211_S_INIT) {
6209 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6210 			/* Always pass arg as -1 since we can't Tx right now. */
6211 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6212 			iwm_stop(ifp, 0);
6213 			iwm_init(ifp);
6214 			return 0;
6215 		}
6216 
6217 		iwm_stop_device(sc);
6218 		iwm_init_hw(sc);
6219 	}
6220 
6221 	switch (nstate) {
6222 	case IEEE80211_S_INIT:
6223 		break;
6224 
6225 	case IEEE80211_S_SCAN:
6226 		if (ostate == nstate &&
6227 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6228 			return 0;
6229 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6230 			err = iwm_umac_scan(sc);
6231 		else
6232 			err = iwm_lmac_scan(sc);
6233 		if (err) {
6234 			DPRINTF(("%s: could not initiate scan: %d\n",
6235 			    DEVNAME(sc), err));
6236 			return err;
6237 		}
6238 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
6239 		ic->ic_state = nstate;
6240 		iwm_led_blink_start(sc);
6241 		return 0;
6242 
6243 	case IEEE80211_S_AUTH:
6244 		err = iwm_auth(sc);
6245 		if (err) {
6246 			DPRINTF(("%s: could not move to auth state: %d\n",
6247 			    DEVNAME(sc), err));
6248 			return err;
6249 		}
6250 		break;
6251 
6252 	case IEEE80211_S_ASSOC:
6253 		err = iwm_assoc(sc);
6254 		if (err) {
6255 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6256 			    err));
6257 			return err;
6258 		}
6259 		break;
6260 
6261 	case IEEE80211_S_RUN:
6262 		in = (struct iwm_node *)ic->ic_bss;
6263 
6264 		/* We have now been assigned an associd by the AP. */
6265 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6266 		if (err) {
6267 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6268 			return err;
6269 		}
6270 
6271 		err = iwm_power_update_device(sc);
6272 		if (err) {
6273 			aprint_error_dev(sc->sc_dev,
6274 			    "could send power command (error %d)\n", err);
6275 			return err;
6276 		}
6277 #ifdef notyet
6278 		/*
6279 		 * Disabled for now. Default beacon filter settings
6280 		 * prevent net80211 from getting ERP and HT protection
6281 		 * updates from beacons.
6282 		 */
6283 		err = iwm_enable_beacon_filter(sc, in);
6284 		if (err) {
6285 			aprint_error_dev(sc->sc_dev,
6286 			    "could not enable beacon filter\n");
6287 			return err;
6288 		}
6289 #endif
6290 		err = iwm_power_mac_update_mode(sc, in);
6291 		if (err) {
6292 			aprint_error_dev(sc->sc_dev,
6293 			    "could not update MAC power (error %d)\n", err);
6294 			return err;
6295 		}
6296 
6297 		err = iwm_update_quotas(sc, in);
6298 		if (err) {
6299 			aprint_error_dev(sc->sc_dev,
6300 			    "could not update quotas (error %d)\n", err);
6301 			return err;
6302 		}
6303 
6304 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6305 
6306 		/* Start at lowest available bit-rate, AMRR will raise. */
6307 		in->in_ni.ni_txrate = 0;
6308 #ifndef IEEE80211_NO_HT
6309 		in->in_ni.ni_txmcs = 0;
6310 		iwm_setrates(in);
6311 #endif
6312 
6313 		callout_schedule(&sc->sc_calib_to, mstohz(500));
6314 		iwm_led_enable(sc);
6315 		break;
6316 
6317 	default:
6318 		break;
6319 	}
6320 
6321 	return sc->sc_newstate(ic, nstate, arg);
6322 }
6323 
6324 static void
6325 iwm_newstate_cb(struct work *wk, void *v)
6326 {
6327 	struct iwm_softc *sc = v;
6328 	struct ieee80211com *ic = &sc->sc_ic;
6329 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6330 	enum ieee80211_state nstate = iwmns->ns_nstate;
6331 	int generation = iwmns->ns_generation;
6332 	int arg = iwmns->ns_arg;
6333 	int s;
6334 
6335 	kmem_intr_free(iwmns, sizeof(*iwmns));
6336 
6337 	s = splnet();
6338 
6339 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6340 	if (sc->sc_generation != generation) {
6341 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6342 		if (nstate == IEEE80211_S_INIT) {
6343 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6344 			    "calling sc_newstate()\n"));
6345 			(void) sc->sc_newstate(ic, nstate, arg);
6346 		}
6347 	} else
6348 		(void) iwm_do_newstate(ic, nstate, arg);
6349 
6350 	splx(s);
6351 }
6352 
6353 static int
6354 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6355 {
6356 	struct iwm_newstate_state *iwmns;
6357 	struct ifnet *ifp = IC2IFP(ic);
6358 	struct iwm_softc *sc = ifp->if_softc;
6359 
6360 	callout_stop(&sc->sc_calib_to);
6361 
6362 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6363 	if (!iwmns) {
6364 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6365 		return ENOMEM;
6366 	}
6367 
6368 	iwmns->ns_nstate = nstate;
6369 	iwmns->ns_arg = arg;
6370 	iwmns->ns_generation = sc->sc_generation;
6371 
6372 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6373 
6374 	return 0;
6375 }
6376 
6377 static void
6378 iwm_endscan(struct iwm_softc *sc)
6379 {
6380 	struct ieee80211com *ic = &sc->sc_ic;
6381 	int s;
6382 
6383 	DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6384 
6385 	s = splnet();
6386 	if (ic->ic_state == IEEE80211_S_SCAN)
6387 		ieee80211_end_scan(ic);
6388 	splx(s);
6389 }
6390 
6391 /*
6392  * Aging and idle timeouts for the different possible scenarios
6393  * in default configuration
6394  */
6395 static const uint32_t
6396 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6397 	{
6398 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6399 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6400 	},
6401 	{
6402 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6403 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6404 	},
6405 	{
6406 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6407 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6408 	},
6409 	{
6410 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
6411 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6412 	},
6413 	{
6414 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6415 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6416 	},
6417 };
6418 
6419 /*
6420  * Aging and idle timeouts for the different possible scenarios
6421  * in single BSS MAC configuration.
6422  */
6423 static const uint32_t
6424 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6425 	{
6426 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6427 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6428 	},
6429 	{
6430 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6431 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6432 	},
6433 	{
6434 		htole32(IWM_SF_MCAST_AGING_TIMER),
6435 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6436 	},
6437 	{
6438 		htole32(IWM_SF_BA_AGING_TIMER),
6439 		htole32(IWM_SF_BA_IDLE_TIMER)
6440 	},
6441 	{
6442 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6443 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6444 	},
6445 };
6446 
6447 static void
6448 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6449     struct ieee80211_node *ni)
6450 {
6451 	int i, j, watermark;
6452 
6453 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6454 
6455 	/*
6456 	 * If we are in association flow - check antenna configuration
6457 	 * capabilities of the AP station, and choose the watermark accordingly.
6458 	 */
6459 	if (ni) {
6460 #ifndef IEEE80211_NO_HT
6461 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6462 #ifdef notyet
6463 			if (ni->ni_rxmcs[2] != 0)
6464 				watermark = IWM_SF_W_MARK_MIMO3;
6465 			else if (ni->ni_rxmcs[1] != 0)
6466 				watermark = IWM_SF_W_MARK_MIMO2;
6467 			else
6468 #endif
6469 				watermark = IWM_SF_W_MARK_SISO;
6470 		} else
6471 #endif
6472 			watermark = IWM_SF_W_MARK_LEGACY;
6473 	/* default watermark value for unassociated mode. */
6474 	} else {
6475 		watermark = IWM_SF_W_MARK_MIMO2;
6476 	}
6477 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6478 
6479 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6480 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6481 			sf_cmd->long_delay_timeouts[i][j] =
6482 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6483 		}
6484 	}
6485 
6486 	if (ni) {
6487 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6488 		       sizeof(iwm_sf_full_timeout));
6489 	} else {
6490 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6491 		       sizeof(iwm_sf_full_timeout_def));
6492 	}
6493 }
6494 
6495 static int
6496 iwm_sf_config(struct iwm_softc *sc, int new_state)
6497 {
6498 	struct ieee80211com *ic = &sc->sc_ic;
6499 	struct iwm_sf_cfg_cmd sf_cmd = {
6500 		.state = htole32(IWM_SF_FULL_ON),
6501 	};
6502 
6503 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6504 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6505 
6506 	switch (new_state) {
6507 	case IWM_SF_UNINIT:
6508 	case IWM_SF_INIT_OFF:
6509 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6510 		break;
6511 	case IWM_SF_FULL_ON:
6512 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6513 		break;
6514 	default:
6515 		return EINVAL;
6516 	}
6517 
6518 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6519 	    sizeof(sf_cmd), &sf_cmd);
6520 }
6521 
6522 static int
6523 iwm_send_bt_init_conf(struct iwm_softc *sc)
6524 {
6525 	struct iwm_bt_coex_cmd bt_cmd;
6526 
6527 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6528 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6529 
6530 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6531 }
6532 
6533 static bool
6534 iwm_is_lar_supported(struct iwm_softc *sc)
6535 {
6536 	bool nvm_lar = sc->sc_nvm.lar_enabled;
6537 	bool tlv_lar = isset(sc->sc_enabled_capa,
6538 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6539 
6540 	if (iwm_lar_disable)
6541 		return false;
6542 
6543 	/*
6544 	 * Enable LAR only if it is supported by the FW (TLV) &&
6545 	 * enabled in the NVM
6546 	 */
6547 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6548 		return nvm_lar && tlv_lar;
6549 	else
6550 		return tlv_lar;
6551 }
6552 
6553 static int
6554 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6555 {
6556 	struct iwm_mcc_update_cmd mcc_cmd;
6557 	struct iwm_host_cmd hcmd = {
6558 		.id = IWM_MCC_UPDATE_CMD,
6559 		.flags = IWM_CMD_WANT_SKB,
6560 		.data = { &mcc_cmd },
6561 	};
6562 	int err;
6563 	int resp_v2 = isset(sc->sc_enabled_capa,
6564 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6565 
6566 	if (!iwm_is_lar_supported(sc)) {
6567 		DPRINTF(("%s: no LAR support\n", __func__));
6568 		return 0;
6569 	}
6570 
6571 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6572 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6573 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6574 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6575 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6576 	else
6577 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6578 
6579 	if (resp_v2)
6580 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6581 	else
6582 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6583 
6584 	err = iwm_send_cmd(sc, &hcmd);
6585 	if (err)
6586 		return err;
6587 
6588 	iwm_free_resp(sc, &hcmd);
6589 
6590 	return 0;
6591 }
6592 
6593 static void
6594 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6595 {
6596 	struct iwm_host_cmd cmd = {
6597 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6598 		.len = { sizeof(uint32_t), },
6599 		.data = { &backoff, },
6600 	};
6601 
6602 	iwm_send_cmd(sc, &cmd);
6603 }
6604 
6605 static int
6606 iwm_init_hw(struct iwm_softc *sc)
6607 {
6608 	struct ieee80211com *ic = &sc->sc_ic;
6609 	int err, i, ac;
6610 
6611 	err = iwm_start_hw(sc);
6612 	if (err) {
6613 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6614 		return err;
6615 	}
6616 
6617 	err = iwm_run_init_mvm_ucode(sc, 0);
6618 	if (err)
6619 		return err;
6620 
6621 	/* Should stop and start HW since INIT image just loaded. */
6622 	iwm_stop_device(sc);
6623 	err = iwm_start_hw(sc);
6624 	if (err) {
6625 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6626 		return err;
6627 	}
6628 
6629 	/* Restart, this time with the regular firmware */
6630 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6631 	if (err) {
6632 		aprint_error_dev(sc->sc_dev,
6633 		    "could not load firmware (error %d)\n", err);
6634 		goto err;
6635 	}
6636 
6637 	err = iwm_send_bt_init_conf(sc);
6638 	if (err) {
6639 		aprint_error_dev(sc->sc_dev,
6640 		    "could not init bt coex (error %d)\n", err);
6641 		goto err;
6642 	}
6643 
6644 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6645 	if (err) {
6646 		aprint_error_dev(sc->sc_dev,
6647 		    "could not init tx ant config (error %d)\n", err);
6648 		goto err;
6649 	}
6650 
6651 	/* Send phy db control command and then phy db calibration*/
6652 	err = iwm_send_phy_db_data(sc);
6653 	if (err) {
6654 		aprint_error_dev(sc->sc_dev,
6655 		    "could not init phy db (error %d)\n", err);
6656 		goto err;
6657 	}
6658 
6659 	err = iwm_send_phy_cfg_cmd(sc);
6660 	if (err) {
6661 		aprint_error_dev(sc->sc_dev,
6662 		    "could not send phy config (error %d)\n", err);
6663 		goto err;
6664 	}
6665 
6666 	/* Add auxiliary station for scanning */
6667 	err = iwm_add_aux_sta(sc);
6668 	if (err) {
6669 		aprint_error_dev(sc->sc_dev,
6670 		    "could not add aux station (error %d)\n", err);
6671 		goto err;
6672 	}
6673 
6674 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6675 		/*
6676 		 * The channel used here isn't relevant as it's
6677 		 * going to be overwritten in the other flows.
6678 		 * For now use the first channel we have.
6679 		 */
6680 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6681 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6682 		    IWM_FW_CTXT_ACTION_ADD, 0);
6683 		if (err) {
6684 			aprint_error_dev(sc->sc_dev,
6685 			    "could not add phy context %d (error %d)\n",
6686 			    i, err);
6687 			goto err;
6688 		}
6689 	}
6690 
6691 	/* Initialize tx backoffs to the minimum. */
6692 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6693 		iwm_tt_tx_backoff(sc, 0);
6694 
6695 	err = iwm_power_update_device(sc);
6696 	if (err) {
6697 		aprint_error_dev(sc->sc_dev,
6698 		    "could send power command (error %d)\n", err);
6699 		goto err;
6700 	}
6701 
6702 	err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6703 	if (err) {
6704 		aprint_error_dev(sc->sc_dev,
6705 		    "could not init LAR (error %d)\n", err);
6706 		goto err;
6707 	}
6708 
6709 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6710 		err = iwm_config_umac_scan(sc);
6711 		if (err) {
6712 			aprint_error_dev(sc->sc_dev,
6713 			    "could not configure scan (error %d)\n", err);
6714 			goto err;
6715 		}
6716 	}
6717 
6718 	for (ac = 0; ac < WME_NUM_AC; ac++) {
6719 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6720 		    iwm_ac_to_tx_fifo[ac]);
6721 		if (err) {
6722 			aprint_error_dev(sc->sc_dev,
6723 			    "could not enable Tx queue %d (error %d)\n",
6724 			    i, err);
6725 			goto err;
6726 		}
6727 	}
6728 
6729 	err = iwm_disable_beacon_filter(sc);
6730 	if (err) {
6731 		aprint_error_dev(sc->sc_dev,
6732 		    "could not disable beacon filter (error %d)\n", err);
6733 		goto err;
6734 	}
6735 
6736 	return 0;
6737 
6738  err:
6739 	iwm_stop_device(sc);
6740 	return err;
6741 }
6742 
6743 /* Allow multicast from our BSSID. */
6744 static int
6745 iwm_allow_mcast(struct iwm_softc *sc)
6746 {
6747 	struct ieee80211com *ic = &sc->sc_ic;
6748 	struct ieee80211_node *ni = ic->ic_bss;
6749 	struct iwm_mcast_filter_cmd *cmd;
6750 	size_t size;
6751 	int err;
6752 
6753 	size = roundup(sizeof(*cmd), 4);
6754 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6755 	if (cmd == NULL)
6756 		return ENOMEM;
6757 	cmd->filter_own = 1;
6758 	cmd->port_id = 0;
6759 	cmd->count = 0;
6760 	cmd->pass_all = 1;
6761 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6762 
6763 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6764 	kmem_intr_free(cmd, size);
6765 	return err;
6766 }
6767 
6768 static int
6769 iwm_init(struct ifnet *ifp)
6770 {
6771 	struct iwm_softc *sc = ifp->if_softc;
6772 	int err;
6773 
6774 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6775 		return 0;
6776 
6777 	sc->sc_generation++;
6778 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
6779 
6780 	err = iwm_init_hw(sc);
6781 	if (err) {
6782 		iwm_stop(ifp, 1);
6783 		return err;
6784 	}
6785 
6786 	ifp->if_flags &= ~IFF_OACTIVE;
6787 	ifp->if_flags |= IFF_RUNNING;
6788 
6789 	ieee80211_begin_scan(&sc->sc_ic, 0);
6790 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6791 
6792 	return 0;
6793 }
6794 
6795 static void
6796 iwm_start(struct ifnet *ifp)
6797 {
6798 	struct iwm_softc *sc = ifp->if_softc;
6799 	struct ieee80211com *ic = &sc->sc_ic;
6800 	struct ieee80211_node *ni;
6801 	struct ether_header *eh;
6802 	struct mbuf *m;
6803 	int ac;
6804 
6805 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6806 		return;
6807 
6808 	for (;;) {
6809 		/* why isn't this done per-queue? */
6810 		if (sc->qfullmsk != 0) {
6811 			ifp->if_flags |= IFF_OACTIVE;
6812 			break;
6813 		}
6814 
6815 		/* need to send management frames even if we're not RUNning */
6816 		IF_DEQUEUE(&ic->ic_mgtq, m);
6817 		if (m) {
6818 			ni = M_GETCTX(m, struct ieee80211_node *);
6819 			M_CLEARCTX(m);
6820 			ac = WME_AC_BE;
6821 			goto sendit;
6822 		}
6823 		if (ic->ic_state != IEEE80211_S_RUN) {
6824 			break;
6825 		}
6826 
6827 		IFQ_DEQUEUE(&ifp->if_snd, m);
6828 		if (m == NULL)
6829 			break;
6830 
6831 		if (m->m_len < sizeof (*eh) &&
6832 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
6833 			if_statinc(ifp, if_oerrors);
6834 			continue;
6835 		}
6836 
6837 		eh = mtod(m, struct ether_header *);
6838 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6839 		if (ni == NULL) {
6840 			m_freem(m);
6841 			if_statinc(ifp, if_oerrors);
6842 			continue;
6843 		}
6844 
6845 		/* classify mbuf so we can find which tx ring to use */
6846 		if (ieee80211_classify(ic, m, ni) != 0) {
6847 			m_freem(m);
6848 			ieee80211_free_node(ni);
6849 			if_statinc(ifp, if_oerrors);
6850 			continue;
6851 		}
6852 
6853 		/* No QoS encapsulation for EAPOL frames. */
6854 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6855 		    M_WME_GETAC(m) : WME_AC_BE;
6856 
6857 		bpf_mtap(ifp, m, BPF_D_OUT);
6858 
6859 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6860 			ieee80211_free_node(ni);
6861 			if_statinc(ifp, if_oerrors);
6862 			continue;
6863 		}
6864 
6865  sendit:
6866 		bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT);
6867 
6868 		if (iwm_tx(sc, m, ni, ac) != 0) {
6869 			ieee80211_free_node(ni);
6870 			if_statinc(ifp, if_oerrors);
6871 			continue;
6872 		}
6873 
6874 		if (ifp->if_flags & IFF_UP) {
6875 			sc->sc_tx_timer = 15;
6876 			ifp->if_timer = 1;
6877 		}
6878 	}
6879 }
6880 
6881 static void
6882 iwm_stop(struct ifnet *ifp, int disable)
6883 {
6884 	struct iwm_softc *sc = ifp->if_softc;
6885 	struct ieee80211com *ic = &sc->sc_ic;
6886 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6887 
6888 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6889 	sc->sc_flags |= IWM_FLAG_STOPPED;
6890 	sc->sc_generation++;
6891 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6892 
6893 	if (in)
6894 		in->in_phyctxt = NULL;
6895 
6896 	if (ic->ic_state != IEEE80211_S_INIT)
6897 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6898 
6899 	callout_stop(&sc->sc_calib_to);
6900 	iwm_led_blink_stop(sc);
6901 	ifp->if_timer = sc->sc_tx_timer = 0;
6902 	iwm_stop_device(sc);
6903 }
6904 
6905 static void
6906 iwm_watchdog(struct ifnet *ifp)
6907 {
6908 	struct iwm_softc *sc = ifp->if_softc;
6909 
6910 	ifp->if_timer = 0;
6911 	if (sc->sc_tx_timer > 0) {
6912 		if (--sc->sc_tx_timer == 0) {
6913 			aprint_error_dev(sc->sc_dev, "device timeout\n");
6914 #ifdef IWM_DEBUG
6915 			iwm_nic_error(sc);
6916 #endif
6917 			ifp->if_flags &= ~IFF_UP;
6918 			iwm_stop(ifp, 1);
6919 			if_statinc(ifp, if_oerrors);
6920 			return;
6921 		}
6922 		ifp->if_timer = 1;
6923 	}
6924 
6925 	ieee80211_watchdog(&sc->sc_ic);
6926 }
6927 
6928 static int
6929 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6930 {
6931 	struct iwm_softc *sc = ifp->if_softc;
6932 	struct ieee80211com *ic = &sc->sc_ic;
6933 	const struct sockaddr *sa;
6934 	int s, err = 0;
6935 
6936 	s = splnet();
6937 
6938 	switch (cmd) {
6939 	case SIOCSIFADDR:
6940 		ifp->if_flags |= IFF_UP;
6941 		/* FALLTHROUGH */
6942 	case SIOCSIFFLAGS:
6943 		err = ifioctl_common(ifp, cmd, data);
6944 		if (err)
6945 			break;
6946 		if (ifp->if_flags & IFF_UP) {
6947 			if (!(ifp->if_flags & IFF_RUNNING)) {
6948 				err = iwm_init(ifp);
6949 				if (err)
6950 					ifp->if_flags &= ~IFF_UP;
6951 			}
6952 		} else {
6953 			if (ifp->if_flags & IFF_RUNNING)
6954 				iwm_stop(ifp, 1);
6955 		}
6956 		break;
6957 
6958 	case SIOCADDMULTI:
6959 	case SIOCDELMULTI:
6960 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6961 		err = (cmd == SIOCADDMULTI) ?
6962 		    ether_addmulti(sa, &sc->sc_ec) :
6963 		    ether_delmulti(sa, &sc->sc_ec);
6964 		if (err == ENETRESET)
6965 			err = 0;
6966 		break;
6967 
6968 	default:
6969 		err = ieee80211_ioctl(ic, cmd, data);
6970 		break;
6971 	}
6972 
6973 	if (err == ENETRESET) {
6974 		err = 0;
6975 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6976 		    (IFF_UP | IFF_RUNNING)) {
6977 			iwm_stop(ifp, 0);
6978 			err = iwm_init(ifp);
6979 		}
6980 	}
6981 
6982 	splx(s);
6983 	return err;
6984 }
6985 
6986 /*
6987  * Note: This structure is read from the device with IO accesses,
6988  * and the reading already does the endian conversion. As it is
6989  * read with uint32_t-sized accesses, any members with a different size
6990  * need to be ordered correctly though!
6991  */
6992 struct iwm_error_event_table {
6993 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6994 	uint32_t error_id;		/* type of error */
6995 	uint32_t trm_hw_status0;	/* TRM HW status */
6996 	uint32_t trm_hw_status1;	/* TRM HW status */
6997 	uint32_t blink2;		/* branch link */
6998 	uint32_t ilink1;		/* interrupt link */
6999 	uint32_t ilink2;		/* interrupt link */
7000 	uint32_t data1;		/* error-specific data */
7001 	uint32_t data2;		/* error-specific data */
7002 	uint32_t data3;		/* error-specific data */
7003 	uint32_t bcon_time;		/* beacon timer */
7004 	uint32_t tsf_low;		/* network timestamp function timer */
7005 	uint32_t tsf_hi;		/* network timestamp function timer */
7006 	uint32_t gp1;		/* GP1 timer register */
7007 	uint32_t gp2;		/* GP2 timer register */
7008 	uint32_t fw_rev_type;	/* firmware revision type */
7009 	uint32_t major;		/* uCode version major */
7010 	uint32_t minor;		/* uCode version minor */
7011 	uint32_t hw_ver;		/* HW Silicon version */
7012 	uint32_t brd_ver;		/* HW board version */
7013 	uint32_t log_pc;		/* log program counter */
7014 	uint32_t frame_ptr;		/* frame pointer */
7015 	uint32_t stack_ptr;		/* stack pointer */
7016 	uint32_t hcmd;		/* last host command header */
7017 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
7018 				 * rxtx_flag */
7019 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
7020 				 * host_flag */
7021 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
7022 				 * enc_flag */
7023 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
7024 				 * time_flag */
7025 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
7026 				 * wico interrupt */
7027 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
7028 	uint32_t wait_event;		/* wait event() caller address */
7029 	uint32_t l2p_control;	/* L2pControlField */
7030 	uint32_t l2p_duration;	/* L2pDurationField */
7031 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
7032 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
7033 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
7034 				 * (LMPM_PMG_SEL) */
7035 	uint32_t u_timestamp;	/* indicate when the date and time of the
7036 				 * compilation */
7037 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
7038 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7039 
7040 /*
7041  * UMAC error struct - relevant starting from family 8000 chip.
7042  * Note: This structure is read from the device with IO accesses,
7043  * and the reading already does the endian conversion. As it is
7044  * read with u32-sized accesses, any members with a different size
7045  * need to be ordered correctly though!
7046  */
7047 struct iwm_umac_error_event_table {
7048 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
7049 	uint32_t error_id;	/* type of error */
7050 	uint32_t blink1;	/* branch link */
7051 	uint32_t blink2;	/* branch link */
7052 	uint32_t ilink1;	/* interrupt link */
7053 	uint32_t ilink2;	/* interrupt link */
7054 	uint32_t data1;		/* error-specific data */
7055 	uint32_t data2;		/* error-specific data */
7056 	uint32_t data3;		/* error-specific data */
7057 	uint32_t umac_major;
7058 	uint32_t umac_minor;
7059 	uint32_t frame_pointer;	/* core register 27 */
7060 	uint32_t stack_pointer;	/* core register 28 */
7061 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
7062 	uint32_t nic_isr_pref;	/* ISR status register */
7063 } __packed;
7064 
7065 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
7066 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
7067 
7068 #ifdef IWM_DEBUG
7069 static const struct {
7070 	const char *name;
7071 	uint8_t num;
7072 } advanced_lookup[] = {
7073 	{ "NMI_INTERRUPT_WDG", 0x34 },
7074 	{ "SYSASSERT", 0x35 },
7075 	{ "UCODE_VERSION_MISMATCH", 0x37 },
7076 	{ "BAD_COMMAND", 0x38 },
7077 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7078 	{ "FATAL_ERROR", 0x3D },
7079 	{ "NMI_TRM_HW_ERR", 0x46 },
7080 	{ "NMI_INTERRUPT_TRM", 0x4C },
7081 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7082 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7083 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7084 	{ "NMI_INTERRUPT_HOST", 0x66 },
7085 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
7086 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7087 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7088 	{ "ADVANCED_SYSASSERT", 0 },
7089 };
7090 
7091 static const char *
7092 iwm_desc_lookup(uint32_t num)
7093 {
7094 	int i;
7095 
7096 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7097 		if (advanced_lookup[i].num == num)
7098 			return advanced_lookup[i].name;
7099 
7100 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7101 	return advanced_lookup[i].name;
7102 }
7103 
7104 /*
7105  * Support for dumping the error log seemed like a good idea ...
7106  * but it's mostly hex junk and the only sensible thing is the
7107  * hw/ucode revision (which we know anyway).  Since it's here,
7108  * I'll just leave it in, just in case e.g. the Intel guys want to
7109  * help us decipher some "ADVANCED_SYSASSERT" later.
7110  */
7111 static void
7112 iwm_nic_error(struct iwm_softc *sc)
7113 {
7114 	struct iwm_error_event_table t;
7115 	uint32_t base;
7116 
7117 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7118 	base = sc->sc_uc.uc_error_event_table;
7119 	if (base < 0x800000) {
7120 		aprint_error_dev(sc->sc_dev,
7121 		    "Invalid error log pointer 0x%08x\n", base);
7122 		return;
7123 	}
7124 
7125 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7126 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7127 		return;
7128 	}
7129 
7130 	if (!t.valid) {
7131 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7132 		return;
7133 	}
7134 
7135 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7136 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7137 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7138 		    sc->sc_flags, t.valid);
7139 	}
7140 
7141 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7142 	    iwm_desc_lookup(t.error_id));
7143 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7144 	    t.trm_hw_status0);
7145 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7146 	    t.trm_hw_status1);
7147 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7148 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7149 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7150 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7151 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7152 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7153 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7154 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7155 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7156 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7157 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7158 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7159 	    t.fw_rev_type);
7160 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7161 	    t.major);
7162 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7163 	    t.minor);
7164 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7165 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7166 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7167 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7168 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7169 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7170 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7171 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7172 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7173 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7174 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7175 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7176 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7177 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7178 	    t.l2p_addr_match);
7179 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7180 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7181 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7182 
7183 	if (sc->sc_uc.uc_umac_error_event_table)
7184 		iwm_nic_umac_error(sc);
7185 }
7186 
7187 static void
7188 iwm_nic_umac_error(struct iwm_softc *sc)
7189 {
7190 	struct iwm_umac_error_event_table t;
7191 	uint32_t base;
7192 
7193 	base = sc->sc_uc.uc_umac_error_event_table;
7194 
7195 	if (base < 0x800000) {
7196 		aprint_error_dev(sc->sc_dev,
7197 		    "Invalid error log pointer 0x%08x\n", base);
7198 		return;
7199 	}
7200 
7201 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7202 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7203 		return;
7204 	}
7205 
7206 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7207 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7208 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7209 		    sc->sc_flags, t.valid);
7210 	}
7211 
7212 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7213 		iwm_desc_lookup(t.error_id));
7214 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7215 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7216 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7217 	    t.ilink1);
7218 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7219 	    t.ilink2);
7220 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7221 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7222 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7223 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7224 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7225 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7226 	    t.frame_pointer);
7227 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7228 	    t.stack_pointer);
7229 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7230 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7231 	    t.nic_isr_pref);
7232 }
7233 #endif
7234 
7235 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7236 do {									\
7237 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7238 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7239 	_var_ = (void *)((_pkt_)+1);					\
7240 } while (/*CONSTCOND*/0)
7241 
7242 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7243 do {									\
7244 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7245 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7246 	_ptr_ = (void *)((_pkt_)+1);					\
7247 } while (/*CONSTCOND*/0)
7248 
7249 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7250 
7251 static void
7252 iwm_notif_intr(struct iwm_softc *sc)
7253 {
7254 	uint16_t hw;
7255 
7256 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7257 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7258 
7259 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7260 	while (sc->rxq.cur != hw) {
7261 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7262 		struct iwm_rx_packet *pkt;
7263 		struct iwm_cmd_response *cresp;
7264 		int orig_qid, qid, idx, code;
7265 
7266 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7267 		    BUS_DMASYNC_POSTREAD);
7268 		pkt = mtod(data->m, struct iwm_rx_packet *);
7269 
7270 		orig_qid = pkt->hdr.qid;
7271 		qid = orig_qid & ~0x80;
7272 		idx = pkt->hdr.idx;
7273 
7274 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7275 
7276 		/*
7277 		 * randomly get these from the firmware, no idea why.
7278 		 * they at least seem harmless, so just ignore them for now
7279 		 */
7280 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7281 		    || pkt->len_n_flags == htole32(0x55550000))) {
7282 			ADVANCE_RXQ(sc);
7283 			continue;
7284 		}
7285 
7286 		switch (code) {
7287 		case IWM_REPLY_RX_PHY_CMD:
7288 			iwm_rx_rx_phy_cmd(sc, pkt, data);
7289 			break;
7290 
7291 		case IWM_REPLY_RX_MPDU_CMD:
7292 			iwm_rx_rx_mpdu(sc, pkt, data);
7293 			break;
7294 
7295 		case IWM_TX_CMD:
7296 			iwm_rx_tx_cmd(sc, pkt, data);
7297 			break;
7298 
7299 		case IWM_MISSED_BEACONS_NOTIFICATION:
7300 			iwm_rx_missed_beacons_notif(sc, pkt, data);
7301 			break;
7302 
7303 		case IWM_MFUART_LOAD_NOTIFICATION:
7304 			break;
7305 
7306 		case IWM_ALIVE: {
7307 			struct iwm_alive_resp_v1 *resp1;
7308 			struct iwm_alive_resp_v2 *resp2;
7309 			struct iwm_alive_resp_v3 *resp3;
7310 
7311 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7312 				SYNC_RESP_STRUCT(resp1, pkt);
7313 				sc->sc_uc.uc_error_event_table
7314 				    = le32toh(resp1->error_event_table_ptr);
7315 				sc->sc_uc.uc_log_event_table
7316 				    = le32toh(resp1->log_event_table_ptr);
7317 				sc->sched_base = le32toh(resp1->scd_base_ptr);
7318 				if (resp1->status == IWM_ALIVE_STATUS_OK)
7319 					sc->sc_uc.uc_ok = 1;
7320 				else
7321 					sc->sc_uc.uc_ok = 0;
7322 			}
7323 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7324 				SYNC_RESP_STRUCT(resp2, pkt);
7325 				sc->sc_uc.uc_error_event_table
7326 				    = le32toh(resp2->error_event_table_ptr);
7327 				sc->sc_uc.uc_log_event_table
7328 				    = le32toh(resp2->log_event_table_ptr);
7329 				sc->sched_base = le32toh(resp2->scd_base_ptr);
7330 				sc->sc_uc.uc_umac_error_event_table
7331 				    = le32toh(resp2->error_info_addr);
7332 				if (resp2->status == IWM_ALIVE_STATUS_OK)
7333 					sc->sc_uc.uc_ok = 1;
7334 				else
7335 					sc->sc_uc.uc_ok = 0;
7336 			}
7337 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7338 				SYNC_RESP_STRUCT(resp3, pkt);
7339 				sc->sc_uc.uc_error_event_table
7340 				    = le32toh(resp3->error_event_table_ptr);
7341 				sc->sc_uc.uc_log_event_table
7342 				    = le32toh(resp3->log_event_table_ptr);
7343 				sc->sched_base = le32toh(resp3->scd_base_ptr);
7344 				sc->sc_uc.uc_umac_error_event_table
7345 				    = le32toh(resp3->error_info_addr);
7346 				if (resp3->status == IWM_ALIVE_STATUS_OK)
7347 					sc->sc_uc.uc_ok = 1;
7348 				else
7349 					sc->sc_uc.uc_ok = 0;
7350 			}
7351 
7352 			sc->sc_uc.uc_intr = 1;
7353 			wakeup(&sc->sc_uc);
7354 			break;
7355 		}
7356 
7357 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
7358 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
7359 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
7360 			uint16_t size = le16toh(phy_db_notif->length);
7361 			bus_dmamap_sync(sc->sc_dmat, data->map,
7362 			    sizeof(*pkt) + sizeof(*phy_db_notif),
7363 			    size, BUS_DMASYNC_POSTREAD);
7364 			iwm_phy_db_set_section(sc, phy_db_notif, size);
7365 			break;
7366 		}
7367 
7368 		case IWM_STATISTICS_NOTIFICATION: {
7369 			struct iwm_notif_statistics *stats;
7370 			SYNC_RESP_STRUCT(stats, pkt);
7371 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7372 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
7373 			break;
7374 		}
7375 
7376 		case IWM_NVM_ACCESS_CMD:
7377 		case IWM_MCC_UPDATE_CMD:
7378 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7379 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7380 				    sizeof(sc->sc_cmd_resp),
7381 				    BUS_DMASYNC_POSTREAD);
7382 				memcpy(sc->sc_cmd_resp,
7383 				    pkt, sizeof(sc->sc_cmd_resp));
7384 			}
7385 			break;
7386 
7387 		case IWM_MCC_CHUB_UPDATE_CMD: {
7388 			struct iwm_mcc_chub_notif *notif;
7389 			SYNC_RESP_STRUCT(notif, pkt);
7390 
7391 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7392 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7393 			sc->sc_fw_mcc[2] = '\0';
7394 			break;
7395 		}
7396 
7397 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
7398 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7399 		    IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7400 			struct iwm_dts_measurement_notif_v1 *notif1;
7401 			struct iwm_dts_measurement_notif_v2 *notif2;
7402 
7403 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7404 				SYNC_RESP_STRUCT(notif1, pkt);
7405 				DPRINTF(("%s: DTS temp=%d \n",
7406 				    DEVNAME(sc), notif1->temp));
7407 				break;
7408 			}
7409 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7410 				SYNC_RESP_STRUCT(notif2, pkt);
7411 				DPRINTF(("%s: DTS temp=%d \n",
7412 				    DEVNAME(sc), notif2->temp));
7413 				break;
7414 			}
7415 			break;
7416 		}
7417 
7418 		case IWM_PHY_CONFIGURATION_CMD:
7419 		case IWM_TX_ANT_CONFIGURATION_CMD:
7420 		case IWM_ADD_STA:
7421 		case IWM_MAC_CONTEXT_CMD:
7422 		case IWM_REPLY_SF_CFG_CMD:
7423 		case IWM_POWER_TABLE_CMD:
7424 		case IWM_PHY_CONTEXT_CMD:
7425 		case IWM_BINDING_CONTEXT_CMD:
7426 		case IWM_TIME_EVENT_CMD:
7427 		case IWM_SCAN_REQUEST_CMD:
7428 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7429 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7430 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7431 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7432 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
7433 		case IWM_REPLY_BEACON_FILTERING_CMD:
7434 		case IWM_MAC_PM_POWER_TABLE:
7435 		case IWM_TIME_QUOTA_CMD:
7436 		case IWM_REMOVE_STA:
7437 		case IWM_TXPATH_FLUSH:
7438 		case IWM_LQ_CMD:
7439 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7440 		case IWM_BT_CONFIG:
7441 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7442 			SYNC_RESP_STRUCT(cresp, pkt);
7443 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7444 				memcpy(sc->sc_cmd_resp,
7445 				    pkt, sizeof(*pkt) + sizeof(*cresp));
7446 			}
7447 			break;
7448 
7449 		/* ignore */
7450 		case IWM_PHY_DB_CMD:
7451 			break;
7452 
7453 		case IWM_INIT_COMPLETE_NOTIF:
7454 			sc->sc_init_complete = 1;
7455 			wakeup(&sc->sc_init_complete);
7456 			break;
7457 
7458 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7459 			struct iwm_periodic_scan_complete *notif;
7460 			SYNC_RESP_STRUCT(notif, pkt);
7461 			break;
7462 		}
7463 
7464 		case IWM_SCAN_ITERATION_COMPLETE: {
7465 			struct iwm_lmac_scan_complete_notif *notif;
7466 			SYNC_RESP_STRUCT(notif, pkt);
7467 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7468 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7469 				iwm_endscan(sc);
7470 			}
7471 			break;
7472 		}
7473 
7474 		case IWM_SCAN_COMPLETE_UMAC: {
7475 			struct iwm_umac_scan_complete *notif;
7476 			SYNC_RESP_STRUCT(notif, pkt);
7477 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7478 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7479 				iwm_endscan(sc);
7480 			}
7481 			break;
7482 		}
7483 
7484 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7485 			struct iwm_umac_scan_iter_complete_notif *notif;
7486 			SYNC_RESP_STRUCT(notif, pkt);
7487 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7488 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7489 				iwm_endscan(sc);
7490 			}
7491 			break;
7492 		}
7493 
7494 		case IWM_REPLY_ERROR: {
7495 			struct iwm_error_resp *resp;
7496 			SYNC_RESP_STRUCT(resp, pkt);
7497 			aprint_error_dev(sc->sc_dev,
7498 			    "firmware error 0x%x, cmd 0x%x\n",
7499 			    le32toh(resp->error_type), resp->cmd_id);
7500 			break;
7501 		}
7502 
7503 		case IWM_TIME_EVENT_NOTIFICATION: {
7504 			struct iwm_time_event_notif *notif;
7505 			SYNC_RESP_STRUCT(notif, pkt);
7506 			break;
7507 		}
7508 
7509 		case IWM_DEBUG_LOG_MSG:
7510 			break;
7511 
7512 		case IWM_MCAST_FILTER_CMD:
7513 			break;
7514 
7515 		case IWM_SCD_QUEUE_CFG: {
7516 			struct iwm_scd_txq_cfg_rsp *rsp;
7517 			SYNC_RESP_STRUCT(rsp, pkt);
7518 			break;
7519 		}
7520 
7521 		default:
7522 			aprint_error_dev(sc->sc_dev,
7523 			    "unhandled firmware response 0x%x 0x%x/0x%x "
7524 			    "rx ring %d[%d]\n",
7525 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7526 			break;
7527 		}
7528 
7529 		/*
7530 		 * uCode sets bit 0x80 when it originates the notification,
7531 		 * i.e. when the notification is not a direct response to a
7532 		 * command sent by the driver.
7533 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7534 		 * received frame to the driver.
7535 		 */
7536 		if (!(orig_qid & (1 << 7))) {
7537 			iwm_cmd_done(sc, qid, idx);
7538 		}
7539 
7540 		ADVANCE_RXQ(sc);
7541 	}
7542 
7543 	/*
7544 	 * Seems like the hardware gets upset unless we align the write by 8??
7545 	 */
7546 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7547 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7548 }
7549 
7550 static int
7551 iwm_intr(void *arg)
7552 {
7553 	struct iwm_softc *sc = arg;
7554 
7555 	/* Disable interrupts */
7556 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7557 
7558 	softint_schedule(sc->sc_soft_ih);
7559 	return 1;
7560 }
7561 
7562 static void
7563 iwm_softintr(void *arg)
7564 {
7565 	struct iwm_softc *sc = arg;
7566 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7567 	uint32_t r1, r2;
7568 	int isperiodic = 0, s;
7569 
7570 	if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7571 		uint32_t *ict = sc->ict_dma.vaddr;
7572 		int tmp;
7573 
7574 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7575 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7576 		tmp = htole32(ict[sc->ict_cur]);
7577 		if (tmp == 0)
7578 			goto out_ena;	/* Interrupt not for us. */
7579 
7580 		/*
7581 		 * ok, there was something.  keep plowing until we have all.
7582 		 */
7583 		r1 = r2 = 0;
7584 		while (tmp) {
7585 			r1 |= tmp;
7586 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
7587 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7588 			tmp = htole32(ict[sc->ict_cur]);
7589 		}
7590 
7591 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7592 		    0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7593 
7594 		/* this is where the fun begins.  don't ask */
7595 		if (r1 == 0xffffffff)
7596 			r1 = 0;
7597 
7598 		/* i am not expected to understand this */
7599 		if (r1 & 0xc0000)
7600 			r1 |= 0x8000;
7601 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7602 	} else {
7603 		r1 = IWM_READ(sc, IWM_CSR_INT);
7604 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7605 			return;	/* Hardware gone! */
7606 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7607 	}
7608 	if (r1 == 0 && r2 == 0) {
7609 		goto out_ena;	/* Interrupt not for us. */
7610 	}
7611 
7612 	/* Acknowledge interrupts. */
7613 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7614 	if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7615 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7616 
7617 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7618 #ifdef IWM_DEBUG
7619 		int i;
7620 
7621 		iwm_nic_error(sc);
7622 
7623 		/* Dump driver status (TX and RX rings) while we're here. */
7624 		DPRINTF(("driver status:\n"));
7625 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7626 			struct iwm_tx_ring *ring = &sc->txq[i];
7627 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7628 			    "queued=%-3d\n",
7629 			    i, ring->qid, ring->cur, ring->queued));
7630 		}
7631 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7632 		DPRINTF(("  802.11 state %s\n",
7633 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7634 #endif
7635 
7636 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7637  fatal:
7638 		s = splnet();
7639 		ifp->if_flags &= ~IFF_UP;
7640 		iwm_stop(ifp, 1);
7641 		splx(s);
7642 		/* Don't restore interrupt mask */
7643 		return;
7644 
7645 	}
7646 
7647 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7648 		aprint_error_dev(sc->sc_dev,
7649 		    "hardware error, stopping device\n");
7650 		goto fatal;
7651 	}
7652 
7653 	/* firmware chunk loaded */
7654 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7655 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7656 		sc->sc_fw_chunk_done = 1;
7657 		wakeup(&sc->sc_fw);
7658 	}
7659 
7660 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7661 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7662 			goto fatal;
7663 	}
7664 
7665 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7666 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7667 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7668 			IWM_WRITE_1(sc,
7669 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7670 		isperiodic = 1;
7671 	}
7672 
7673 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7674 	    isperiodic) {
7675 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7676 
7677 		iwm_notif_intr(sc);
7678 
7679 		/* enable periodic interrupt, see above */
7680 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7681 		    !isperiodic)
7682 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7683 			    IWM_CSR_INT_PERIODIC_ENA);
7684 	}
7685 
7686 out_ena:
7687 	iwm_restore_interrupts(sc);
7688 }
7689 
7690 /*
7691  * Autoconf glue-sniffing
7692  */
7693 
7694 static const pci_product_id_t iwm_devices[] = {
7695 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7696 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7697 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7698 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7699 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7700 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7701 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7702 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7703 	PCI_PRODUCT_INTEL_WIFI_LINK_3168,
7704 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7705 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7706 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7707 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7708 	PCI_PRODUCT_INTEL_WIFI_LINK_8265,
7709 };
7710 
7711 static int
7712 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7713 {
7714 	struct pci_attach_args *pa = aux;
7715 
7716 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7717 		return 0;
7718 
7719 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7720 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7721 			return 1;
7722 
7723 	return 0;
7724 }
7725 
7726 static int
7727 iwm_preinit(struct iwm_softc *sc)
7728 {
7729 	int err;
7730 
7731 	err = iwm_start_hw(sc);
7732 	if (err) {
7733 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7734 		return err;
7735 	}
7736 
7737 	err = iwm_run_init_mvm_ucode(sc, 1);
7738 	iwm_stop_device(sc);
7739 	if (err)
7740 		return err;
7741 
7742 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7743 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7744 	    ether_sprintf(sc->sc_nvm.hw_addr));
7745 
7746 	return 0;
7747 }
7748 
7749 static void
7750 iwm_attach_hook(device_t dev)
7751 {
7752 	struct iwm_softc *sc = device_private(dev);
7753 
7754 	iwm_config_complete(sc);
7755 }
7756 
7757 static void
7758 iwm_attach(device_t parent, device_t self, void *aux)
7759 {
7760 	struct iwm_softc *sc = device_private(self);
7761 	struct pci_attach_args *pa = aux;
7762 	pcireg_t reg, memtype;
7763 	char intrbuf[PCI_INTRSTR_LEN];
7764 	const char *intrstr;
7765 	int err;
7766 	int txq_i;
7767 	const struct sysctlnode *node;
7768 
7769 	sc->sc_dev = self;
7770 	sc->sc_pct = pa->pa_pc;
7771 	sc->sc_pcitag = pa->pa_tag;
7772 	sc->sc_dmat = pa->pa_dmat;
7773 	sc->sc_pciid = pa->pa_id;
7774 
7775 	pci_aprint_devinfo(pa, NULL);
7776 
7777 	if (workqueue_create(&sc->sc_nswq, "iwmns",
7778 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7779 		panic("%s: could not create workqueue: newstate",
7780 		    device_xname(self));
7781 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7782 	if (sc->sc_soft_ih == NULL)
7783 		panic("%s: could not establish softint", device_xname(self));
7784 
7785 	/*
7786 	 * Get the offset of the PCI Express Capability Structure in PCI
7787 	 * Configuration Space.
7788 	 */
7789 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7790 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7791 	if (err == 0) {
7792 		aprint_error_dev(self,
7793 		    "PCIe capability structure not found!\n");
7794 		return;
7795 	}
7796 
7797 	/* Clear device-specific "PCI retry timeout" register (41h). */
7798 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7799 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7800 
7801 	/* Enable bus-mastering */
7802 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7803 	reg |= PCI_COMMAND_MASTER_ENABLE;
7804 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7805 
7806 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7807 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7808 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7809 	if (err) {
7810 		aprint_error_dev(self, "can't map mem space\n");
7811 		return;
7812 	}
7813 
7814 	/* Install interrupt handler. */
7815 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7816 	if (err) {
7817 		aprint_error_dev(self, "can't allocate interrupt\n");
7818 		return;
7819 	}
7820 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7821 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7822 		CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7823 	else
7824 		SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7825 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7826 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7827 	    sizeof(intrbuf));
7828 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7829 	    IPL_NET, iwm_intr, sc, device_xname(self));
7830 	if (sc->sc_ih == NULL) {
7831 		aprint_error_dev(self, "can't establish interrupt");
7832 		if (intrstr != NULL)
7833 			aprint_error(" at %s", intrstr);
7834 		aprint_error("\n");
7835 		return;
7836 	}
7837 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7838 
7839 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7840 
7841 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7842 	switch (PCI_PRODUCT(sc->sc_pciid)) {
7843 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7844 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7845 		sc->sc_fwname = "iwlwifi-3160-17.ucode";
7846 		sc->host_interrupt_operation_mode = 1;
7847 		sc->apmg_wake_up_wa = 1;
7848 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7849 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7850 		break;
7851 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7852 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7853 		sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7854 		sc->host_interrupt_operation_mode = 0;
7855 		sc->apmg_wake_up_wa = 1;
7856 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7857 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7858 		break;
7859 	case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7860 		sc->sc_fwname = "iwlwifi-3168-22.ucode";
7861 		sc->host_interrupt_operation_mode = 0;
7862 		sc->apmg_wake_up_wa = 1;
7863 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7864 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7865 		break;
7866 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7867 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7868 		sc->sc_fwname = "iwlwifi-7260-17.ucode";
7869 		sc->host_interrupt_operation_mode = 1;
7870 		sc->apmg_wake_up_wa = 1;
7871 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7872 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7873 		break;
7874 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7875 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7876 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7877 		    IWM_CSR_HW_REV_TYPE_7265D ?
7878 		    "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7879 		sc->host_interrupt_operation_mode = 0;
7880 		sc->apmg_wake_up_wa = 1;
7881 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7882 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7883 		break;
7884 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7885 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7886 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7887 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7888 		sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7889 		sc->host_interrupt_operation_mode = 0;
7890 		sc->apmg_wake_up_wa = 0;
7891 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7892 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7893 		break;
7894 	case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7895 		sc->sc_fwname = "iwlwifi-8265-22.ucode";
7896 		sc->host_interrupt_operation_mode = 0;
7897 		sc->apmg_wake_up_wa = 0;
7898 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7899 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7900 		break;
7901 	default:
7902 		aprint_error_dev(self, "unknown product %#x",
7903 		    PCI_PRODUCT(sc->sc_pciid));
7904 		return;
7905 	}
7906 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7907 
7908 	/*
7909 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7910 	 * changed, and now the revision step also includes bit 0-1 (no more
7911 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7912 	 * in the old format.
7913 	 */
7914 
7915 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7916 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7917 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7918 
7919 	if (iwm_prepare_card_hw(sc) != 0) {
7920 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7921 		return;
7922 	}
7923 
7924 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7925 		uint32_t hw_step;
7926 
7927 		/*
7928 		 * In order to recognize C step the driver should read the
7929 		 * chip version id located at the AUX bus MISC address.
7930 		 */
7931 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7932 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7933 		DELAY(2);
7934 
7935 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7936 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7937 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7938 				   25000);
7939 		if (!err) {
7940 			aprint_error_dev(sc->sc_dev,
7941 			    "failed to wake up the nic\n");
7942 			return;
7943 		}
7944 
7945 		if (iwm_nic_lock(sc)) {
7946 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7947 			hw_step |= IWM_ENABLE_WFPM;
7948 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7949 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7950 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7951 			if (hw_step == 0x3)
7952 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7953 				    (IWM_SILICON_C_STEP << 2);
7954 			iwm_nic_unlock(sc);
7955 		} else {
7956 			aprint_error_dev(sc->sc_dev,
7957 			    "failed to lock the nic\n");
7958 			return;
7959 		}
7960 	}
7961 
7962 	/*
7963 	 * Allocate DMA memory for firmware transfers.
7964 	 * Must be aligned on a 16-byte boundary.
7965 	 */
7966 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7967 	    16);
7968 	if (err) {
7969 		aprint_error_dev(sc->sc_dev,
7970 		    "could not allocate memory for firmware\n");
7971 		return;
7972 	}
7973 
7974 	/* Allocate "Keep Warm" page, used internally by the card. */
7975 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7976 	if (err) {
7977 		aprint_error_dev(sc->sc_dev,
7978 		    "could not allocate keep warm page\n");
7979 		goto fail1;
7980 	}
7981 
7982 	/* Allocate interrupt cause table (ICT).*/
7983 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7984 	    1 << IWM_ICT_PADDR_SHIFT);
7985 	if (err) {
7986 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7987 		goto fail2;
7988 	}
7989 
7990 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7991 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7992 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7993 	if (err) {
7994 		aprint_error_dev(sc->sc_dev,
7995 		    "could not allocate TX scheduler rings\n");
7996 		goto fail3;
7997 	}
7998 
7999 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
8000 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8001 		if (err) {
8002 			aprint_error_dev(sc->sc_dev,
8003 			    "could not allocate TX ring %d\n", txq_i);
8004 			goto fail4;
8005 		}
8006 	}
8007 
8008 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
8009 	if (err) {
8010 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8011 		goto fail5;
8012 	}
8013 
8014 	/* Clear pending interrupts. */
8015 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8016 
8017 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8018 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8019 	    SYSCTL_DESCR("iwm per-controller controls"),
8020 	    NULL, 0, NULL, 0,
8021 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8022 	    CTL_EOL)) != 0) {
8023 		aprint_normal_dev(sc->sc_dev,
8024 		    "couldn't create iwm per-controller sysctl node\n");
8025 	}
8026 	if (err == 0) {
8027 		int iwm_nodenum = node->sysctl_num;
8028 
8029 		/* Reload firmware sysctl node */
8030 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8031 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8032 		    SYSCTL_DESCR("Reload firmware"),
8033 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8034 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8035 		    CTL_EOL)) != 0) {
8036 			aprint_normal_dev(sc->sc_dev,
8037 			    "couldn't create load_fw sysctl node\n");
8038 		}
8039 	}
8040 
8041 	callout_init(&sc->sc_calib_to, 0);
8042 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8043 	callout_init(&sc->sc_led_blink_to, 0);
8044 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8045 #ifndef IEEE80211_NO_HT
8046 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8047 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8048 		panic("%s: could not create workqueue: setrates",
8049 		    device_xname(self));
8050 	if (workqueue_create(&sc->sc_bawq, "iwmba",
8051 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8052 		panic("%s: could not create workqueue: blockack",
8053 		    device_xname(self));
8054 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8055 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8056 		panic("%s: could not create workqueue: htprot",
8057 		    device_xname(self));
8058 #endif
8059 
8060 	/*
8061 	 * We can't do normal attach before the file system is mounted
8062 	 * because we cannot read the MAC address without loading the
8063 	 * firmware from disk.  So we postpone until mountroot is done.
8064 	 * Notably, this will require a full driver unload/load cycle
8065 	 * (or reboot) in case the firmware is not present when the
8066 	 * hook runs.
8067 	 */
8068 	config_mountroot(self, iwm_attach_hook);
8069 
8070 	return;
8071 
8072 fail5:	while (--txq_i >= 0)
8073 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8074 fail4:	iwm_dma_contig_free(&sc->sched_dma);
8075 fail3:	if (sc->ict_dma.vaddr != NULL)
8076 		iwm_dma_contig_free(&sc->ict_dma);
8077 fail2:	iwm_dma_contig_free(&sc->kw_dma);
8078 fail1:	iwm_dma_contig_free(&sc->fw_dma);
8079 }
8080 
8081 static int
8082 iwm_config_complete(struct iwm_softc *sc)
8083 {
8084 	device_t self = sc->sc_dev;
8085 	struct ieee80211com *ic = &sc->sc_ic;
8086 	struct ifnet *ifp = &sc->sc_ec.ec_if;
8087 	int err;
8088 
8089 	KASSERT(!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED));
8090 
8091 	err = iwm_preinit(sc);
8092 	if (err)
8093 		return err;
8094 
8095 	/*
8096 	 * Attach interface
8097 	 */
8098 	ic->ic_ifp = ifp;
8099 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8100 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8101 	ic->ic_state = IEEE80211_S_INIT;
8102 
8103 	/* Set device capabilities. */
8104 	ic->ic_caps =
8105 	    IEEE80211_C_WEP |		/* WEP */
8106 	    IEEE80211_C_WPA |		/* 802.11i */
8107 #ifdef notyet
8108 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8109 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8110 #endif
8111 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8112 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8113 
8114 #ifndef IEEE80211_NO_HT
8115 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8116 	ic->ic_htxcaps = 0;
8117 	ic->ic_txbfcaps = 0;
8118 	ic->ic_aselcaps = 0;
8119 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8120 #endif
8121 
8122 	/* all hardware can do 2.4GHz band */
8123 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8124 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8125 
8126 	/* not all hardware can do 5GHz band */
8127 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
8128 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
8129 
8130 #ifndef IEEE80211_NO_HT
8131 	if (sc->sc_nvm.sku_cap_11n_enable)
8132 		iwm_setup_ht_rates(sc);
8133 #endif
8134 
8135 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8136 		sc->sc_phyctxt[i].id = i;
8137 	}
8138 
8139 	sc->sc_amrr.amrr_min_success_threshold =  1;
8140 	sc->sc_amrr.amrr_max_success_threshold = 15;
8141 
8142 	/* IBSS channel undefined for now. */
8143 	ic->ic_ibss_chan = &ic->ic_channels[1];
8144 
8145 #if 0
8146 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8147 #endif
8148 
8149 	ifp->if_softc = sc;
8150 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8151 	ifp->if_init = iwm_init;
8152 	ifp->if_stop = iwm_stop;
8153 	ifp->if_ioctl = iwm_ioctl;
8154 	ifp->if_start = iwm_start;
8155 	ifp->if_watchdog = iwm_watchdog;
8156 	IFQ_SET_READY(&ifp->if_snd);
8157 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8158 
8159 	if_initialize(ifp);
8160 	ieee80211_ifattach(ic);
8161 	/* Use common softint-based if_input */
8162 	ifp->if_percpuq = if_percpuq_create(ifp);
8163 	if_register(ifp);
8164 
8165 	ic->ic_node_alloc = iwm_node_alloc;
8166 
8167 	/* Override 802.11 state transition machine. */
8168 	sc->sc_newstate = ic->ic_newstate;
8169 	ic->ic_newstate = iwm_newstate;
8170 
8171 	/* XXX media locking needs revisiting */
8172 	mutex_init(&sc->sc_media_mtx, MUTEX_DEFAULT, IPL_SOFTNET);
8173 	ieee80211_media_init_with_lock(ic,
8174 	    iwm_media_change, ieee80211_media_status, &sc->sc_media_mtx);
8175 
8176 	ieee80211_announce(ic);
8177 
8178 	iwm_radiotap_attach(sc);
8179 
8180 	if (pmf_device_register(self, NULL, NULL))
8181 		pmf_class_network_register(self, ifp);
8182 	else
8183 		aprint_error_dev(self, "couldn't establish power handler\n");
8184 
8185 	sc->sc_flags |= IWM_FLAG_ATTACHED;
8186 
8187 	return 0;
8188 }
8189 
8190 void
8191 iwm_radiotap_attach(struct iwm_softc *sc)
8192 {
8193 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8194 
8195 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8196 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8197 	    &sc->sc_drvbpf);
8198 
8199 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8200 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8201 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8202 
8203 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8204 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8205 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8206 }
8207 
8208 #if 0
8209 static void
8210 iwm_init_task(void *arg)
8211 {
8212 	struct iwm_softc *sc = arg;
8213 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8214 	int s;
8215 
8216 	rw_enter_write(&sc->ioctl_rwl);
8217 	s = splnet();
8218 
8219 	iwm_stop(ifp, 0);
8220 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8221 		iwm_init(ifp);
8222 
8223 	splx(s);
8224 	rw_exit(&sc->ioctl_rwl);
8225 }
8226 
8227 static void
8228 iwm_wakeup(struct iwm_softc *sc)
8229 {
8230 	pcireg_t reg;
8231 
8232 	/* Clear device-specific "PCI retry timeout" register (41h). */
8233 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8234 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8235 
8236 	iwm_init_task(sc);
8237 }
8238 
8239 static int
8240 iwm_activate(device_t self, enum devact act)
8241 {
8242 	struct iwm_softc *sc = device_private(self);
8243 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8244 
8245 	switch (act) {
8246 	case DVACT_DEACTIVATE:
8247 		if (ifp->if_flags & IFF_RUNNING)
8248 			iwm_stop(ifp, 0);
8249 		return 0;
8250 	default:
8251 		return EOPNOTSUPP;
8252 	}
8253 }
8254 #endif
8255 
8256 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8257 	NULL, NULL);
8258 
8259 static int
8260 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8261 {
8262 	struct sysctlnode node;
8263 	struct iwm_softc *sc;
8264 	int err, t;
8265 
8266 	node = *rnode;
8267 	sc = node.sysctl_data;
8268 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8269 	node.sysctl_data = &t;
8270 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
8271 	if (err || newp == NULL)
8272 		return err;
8273 
8274 	if (t == 0)
8275 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8276 	return 0;
8277 }
8278 
8279 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8280 {
8281 	const struct sysctlnode *rnode;
8282 #ifdef IWM_DEBUG
8283 	const struct sysctlnode *cnode;
8284 #endif /* IWM_DEBUG */
8285 	int rc;
8286 
8287 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8288 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8289 	    SYSCTL_DESCR("iwm global controls"),
8290 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8291 		goto err;
8292 
8293 	iwm_sysctl_root_num = rnode->sysctl_num;
8294 
8295 #ifdef IWM_DEBUG
8296 	/* control debugging printfs */
8297 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8298 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8299 	    "debug", SYSCTL_DESCR("Enable debugging output"),
8300 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8301 		goto err;
8302 #endif /* IWM_DEBUG */
8303 
8304 	return;
8305 
8306  err:
8307 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8308 }
8309