xref: /netbsd-src/sys/dev/pci/if_iwm.c (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1 /*	$NetBSD: if_iwm.c,v 1.89 2024/02/09 06:01:03 mlelstv Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
3 #define IEEE80211_NO_HT
4 /*
5  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
6  *   Author: Stefan Sperling <stsp@openbsd.org>
7  * Copyright (c) 2014 Fixup Software Ltd.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016        Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <linuxwifi@intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62  * Copyright(c) 2016        Intel Deutschland GmbH
63  * All rights reserved.
64  *
65  * Redistribution and use in source and binary forms, with or without
66  * modification, are permitted provided that the following conditions
67  * are met:
68  *
69  *  * Redistributions of source code must retain the above copyright
70  *    notice, this list of conditions and the following disclaimer.
71  *  * Redistributions in binary form must reproduce the above copyright
72  *    notice, this list of conditions and the following disclaimer in
73  *    the documentation and/or other materials provided with the
74  *    distribution.
75  *  * Neither the name Intel Corporation nor the names of its
76  *    contributors may be used to endorse or promote products derived
77  *    from this software without specific prior written permission.
78  *
79  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90  */
91 
92 /*-
93  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
94  *
95  * Permission to use, copy, modify, and distribute this software for any
96  * purpose with or without fee is hereby granted, provided that the above
97  * copyright notice and this permission notice appear in all copies.
98  *
99  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106  */
107 
108 #include <sys/cdefs.h>
109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.89 2024/02/09 06:01:03 mlelstv Exp $");
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/kmem.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122 
123 #include <sys/cpu.h>
124 #include <sys/bus.h>
125 #include <sys/workqueue.h>
126 #include <machine/endian.h>
127 #include <sys/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 #include <dev/firmload.h>
133 
134 #include <net/bpf.h>
135 #include <net/if.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_ether.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/ip.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_radiotap.h>
146 
147 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
148 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
149 
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 0;
157 #else
158 #define DPRINTF(x)	do { ; } while (0)
159 #define DPRINTFN(n, x)	do { ; } while (0)
160 #endif
161 
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164 
165 static const uint8_t iwm_nvm_channels[] = {
166 	/* 2.4 GHz */
167 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 	/* 5 GHz */
169 	36, 40, 44, 48, 52, 56, 60, 64,
170 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 	149, 153, 157, 161, 165
172 };
173 
174 static const uint8_t iwm_nvm_channels_8000[] = {
175 	/* 2.4 GHz */
176 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 	/* 5 GHz */
178 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 	149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182 
183 #define IWM_NUM_2GHZ_CHANNELS	14
184 
185 static const struct iwm_rate {
186 	uint8_t rate;
187 	uint8_t plcp;
188 	uint8_t ht_plcp;
189 } iwm_rates[] = {
190 		/* Legacy */		/* HT */
191 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
196 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
198 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
199 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
200 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
201 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
202 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
203 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
204 };
205 #define IWM_RIDX_CCK	0
206 #define IWM_RIDX_OFDM	4
207 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
208 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210 
211 #ifndef IEEE80211_NO_HT
212 /* Convert an MCS index into an iwm_rates[] index. */
213 static const int iwm_mcs2ridx[] = {
214 	IWM_RATE_MCS_0_INDEX,
215 	IWM_RATE_MCS_1_INDEX,
216 	IWM_RATE_MCS_2_INDEX,
217 	IWM_RATE_MCS_3_INDEX,
218 	IWM_RATE_MCS_4_INDEX,
219 	IWM_RATE_MCS_5_INDEX,
220 	IWM_RATE_MCS_6_INDEX,
221 	IWM_RATE_MCS_7_INDEX,
222 };
223 #endif
224 
225 struct iwm_nvm_section {
226 	uint16_t length;
227 	uint8_t *data;
228 };
229 
230 struct iwm_newstate_state {
231 	struct work ns_wk;
232 	enum ieee80211_state ns_nstate;
233 	int ns_arg;
234 	int ns_generation;
235 };
236 
237 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 static int	iwm_firmware_store_section(struct iwm_softc *,
239 		    enum iwm_ucode_type, uint8_t *, size_t);
240 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 #ifdef IWM_DEBUG
245 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 #endif
247 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 static int	iwm_nic_lock(struct iwm_softc *);
251 static void	iwm_nic_unlock(struct iwm_softc *);
252 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 		    uint32_t);
254 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 		    bus_size_t, bus_size_t);
258 static void	iwm_dma_contig_free(struct iwm_dma_info *);
259 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 static void	iwm_disable_rx_dma(struct iwm_softc *);
261 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 		    int);
265 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void	iwm_enable_rfkill_int(struct iwm_softc *);
268 static int	iwm_check_rfkill(struct iwm_softc *);
269 static void	iwm_enable_interrupts(struct iwm_softc *);
270 static void	iwm_restore_interrupts(struct iwm_softc *);
271 static void	iwm_disable_interrupts(struct iwm_softc *);
272 static void	iwm_ict_reset(struct iwm_softc *);
273 static int	iwm_set_hw_ready(struct iwm_softc *);
274 static int	iwm_prepare_card_hw(struct iwm_softc *);
275 static void	iwm_apm_config(struct iwm_softc *);
276 static int	iwm_apm_init(struct iwm_softc *);
277 static void	iwm_apm_stop(struct iwm_softc *);
278 static int	iwm_allow_mcast(struct iwm_softc *);
279 static int	iwm_start_hw(struct iwm_softc *);
280 static void	iwm_stop_device(struct iwm_softc *);
281 static void	iwm_nic_config(struct iwm_softc *);
282 static int	iwm_nic_rx_init(struct iwm_softc *);
283 static int	iwm_nic_tx_init(struct iwm_softc *);
284 static int	iwm_nic_init(struct iwm_softc *);
285 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int	iwm_post_alive(struct iwm_softc *);
287 static struct iwm_phy_db_entry *
288 		iwm_phy_db_get_section(struct iwm_softc *,
289 		    enum iwm_phy_db_section_type, uint16_t);
290 static int	iwm_phy_db_set_section(struct iwm_softc *,
291 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
292 static int	iwm_is_valid_channel(uint16_t);
293 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
294 static uint16_t iwm_channel_id_to_papd(uint16_t);
295 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 		    uint8_t **, uint16_t *, uint16_t);
298 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 		    void *);
300 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 		    enum iwm_phy_db_section_type, uint8_t);
302 static int	iwm_send_phy_db_data(struct iwm_softc *);
303 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 		    struct iwm_time_event_cmd_v1 *);
305 static int	iwm_send_time_event_cmd(struct iwm_softc *,
306 		    const struct iwm_time_event_cmd_v2 *);
307 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 		    uint32_t, uint32_t);
309 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 		    uint16_t, uint8_t *, uint16_t *);
311 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 		    uint16_t *, size_t);
313 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 		    const uint8_t *, size_t);
315 #ifndef IEEE80211_NO_HT
316 static void	iwm_setup_ht_rates(struct iwm_softc *);
317 static void	iwm_htprot_task(void *);
318 static void	iwm_update_htprot(struct ieee80211com *,
319 		    struct ieee80211_node *);
320 static int	iwm_ampdu_rx_start(struct ieee80211com *,
321 		    struct ieee80211_node *, uint8_t);
322 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
323 		    struct ieee80211_node *, uint8_t);
324 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 		    uint8_t, uint16_t, int);
326 #ifdef notyet
327 static int	iwm_ampdu_tx_start(struct ieee80211com *,
328 		    struct ieee80211_node *, uint8_t);
329 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
330 		    struct ieee80211_node *, uint8_t);
331 #endif
332 static void	iwm_ba_task(void *);
333 #endif
334 
335 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 		    const uint16_t *, const uint16_t *, const uint16_t *,
337 		    const uint16_t *, const uint16_t *);
338 static void	iwm_set_hw_address_8000(struct iwm_softc *,
339 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 static int	iwm_parse_nvm_sections(struct iwm_softc *,
341 		    struct iwm_nvm_section *);
342 static int	iwm_nvm_init(struct iwm_softc *);
343 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 		    const uint8_t *, uint32_t);
345 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 		    const uint8_t *, uint32_t);
347 static int	iwm_load_cpu_sections_7000(struct iwm_softc *,
348 		    struct iwm_fw_sects *, int , int *);
349 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
351 		    struct iwm_fw_sects *, int , int *);
352 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
358 		    enum iwm_ucode_type);
359 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
361 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 static int	iwm_get_signal_strength(struct iwm_softc *,
363 		    struct iwm_rx_phy_info *);
364 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 		    struct iwm_rx_packet *, struct iwm_rx_data *);
366 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 		    struct iwm_rx_data *);
369 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
370 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 		    struct iwm_rx_data *);
372 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 		    uint32_t);
374 #if 0
375 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 #endif
378 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 		    uint8_t, uint8_t);
383 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 		    uint8_t, uint8_t, uint32_t, uint32_t);
385 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 		    uint16_t, const void *);
388 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 		    uint32_t *);
390 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 		    const void *, uint32_t *);
392 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 #if 0
395 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 		    uint16_t);
397 #endif
398 static const struct iwm_rate *
399 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
401 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
402 		    struct ieee80211_node *, int);
403 static void	iwm_led_enable(struct iwm_softc *);
404 static void	iwm_led_disable(struct iwm_softc *);
405 static int	iwm_led_is_enabled(struct iwm_softc *);
406 static void	iwm_led_blink_timeout(void *);
407 static void	iwm_led_blink_start(struct iwm_softc *);
408 static void	iwm_led_blink_stop(struct iwm_softc *);
409 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 		    struct iwm_beacon_filter_cmd *);
411 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 		    int);
415 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 		    struct iwm_mac_power_cmd *);
417 static int	iwm_power_mac_update_mode(struct iwm_softc *,
418 		    struct iwm_node *);
419 static int	iwm_power_update_device(struct iwm_softc *);
420 #ifdef notyet
421 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 #endif
423 static int	iwm_disable_beacon_filter(struct iwm_softc *);
424 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 static int	iwm_add_aux_sta(struct iwm_softc *);
426 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 #ifdef notyet
429 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 #endif
432 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 		    struct iwm_scan_channel_cfg_lmac *, int);
434 static int	iwm_fill_probe_req(struct iwm_softc *,
435 		    struct iwm_scan_probe_req *);
436 static int	iwm_lmac_scan(struct iwm_softc *);
437 static int	iwm_config_umac_scan(struct iwm_softc *);
438 static int	iwm_umac_scan(struct iwm_softc *);
439 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
440 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 		    int *);
442 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
444 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 		    struct iwm_mac_data_sta *, int);
446 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 		    uint32_t, int);
448 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 static int	iwm_auth(struct iwm_softc *);
450 static int	iwm_assoc(struct iwm_softc *);
451 static void	iwm_calib_timeout(void *);
452 #ifndef IEEE80211_NO_HT
453 static void	iwm_setrates_task(void *);
454 static int	iwm_setrates(struct iwm_node *);
455 #endif
456 static int	iwm_media_change(struct ifnet *);
457 static int	iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 		    int);
459 static void	iwm_newstate_cb(struct work *, void *);
460 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 static void	iwm_endscan(struct iwm_softc *);
462 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 		    struct ieee80211_node *);
464 static int	iwm_sf_config(struct iwm_softc *, int);
465 static int	iwm_send_bt_init_conf(struct iwm_softc *);
466 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 static int	iwm_init_hw(struct iwm_softc *);
469 static int	iwm_init(struct ifnet *);
470 static void	iwm_start(struct ifnet *);
471 static void	iwm_stop(struct ifnet *, int);
472 static void	iwm_watchdog(struct ifnet *);
473 static int	iwm_ioctl(struct ifnet *, u_long, void *);
474 #ifdef IWM_DEBUG
475 static const char *iwm_desc_lookup(uint32_t);
476 static void	iwm_nic_error(struct iwm_softc *);
477 static void	iwm_nic_umac_error(struct iwm_softc *);
478 #endif
479 static void	iwm_notif_intr(struct iwm_softc *);
480 static int	iwm_intr(void *);
481 static void	iwm_softintr(void *);
482 static int	iwm_preinit(struct iwm_softc *);
483 static void	iwm_attach_hook(device_t);
484 static void	iwm_attach(device_t, device_t, void *);
485 static int	iwm_config_complete(struct iwm_softc *);
486 #if 0
487 static void	iwm_init_task(void *);
488 static int	iwm_activate(device_t, enum devact);
489 static void	iwm_wakeup(struct iwm_softc *);
490 #endif
491 static void	iwm_radiotap_attach(struct iwm_softc *);
492 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
493 
494 static int iwm_sysctl_root_num;
495 static int iwm_lar_disable;
496 
497 #ifndef	IWM_DEFAULT_MCC
498 #define	IWM_DEFAULT_MCC	"ZZ"
499 #endif
500 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
501 
502 static int
503 iwm_firmload(struct iwm_softc *sc)
504 {
505 	struct iwm_fw_info *fw = &sc->sc_fw;
506 	firmware_handle_t fwh;
507 	int err;
508 
509 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
510 		return 0;
511 
512 	/* Open firmware image. */
513 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
514 	if (err) {
515 		aprint_error_dev(sc->sc_dev,
516 		    "could not get firmware handle %s\n", sc->sc_fwname);
517 		return err;
518 	}
519 
520 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
521 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
522 		fw->fw_rawdata = NULL;
523 	}
524 
525 	fw->fw_rawsize = firmware_get_size(fwh);
526 	/*
527 	 * Well, this is how the Linux driver checks it ....
528 	 */
529 	if (fw->fw_rawsize < sizeof(uint32_t)) {
530 		aprint_error_dev(sc->sc_dev,
531 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
532 		err = EINVAL;
533 		goto out;
534 	}
535 
536 	/* Read the firmware. */
537 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
538 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
539 	if (err) {
540 		aprint_error_dev(sc->sc_dev,
541 		    "could not read firmware %s\n", sc->sc_fwname);
542 		goto out;
543 	}
544 
545 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
546  out:
547 	/* caller will release memory, if necessary */
548 
549 	firmware_close(fwh);
550 	return err;
551 }
552 
553 /*
554  * just maintaining status quo.
555  */
556 static void
557 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
558 {
559 	struct ieee80211com *ic = &sc->sc_ic;
560 	struct ieee80211_frame *wh;
561 	uint8_t subtype;
562 
563 	wh = mtod(m, struct ieee80211_frame *);
564 
565 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
566 		return;
567 
568 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
569 
570 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
571 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
572 		return;
573 
574 	int chan = le32toh(sc->sc_last_phy_info.channel);
575 	if (chan < __arraycount(ic->ic_channels))
576 		ic->ic_curchan = &ic->ic_channels[chan];
577 }
578 
579 static int
580 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
581 {
582 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
583 
584 	if (dlen < sizeof(*l) ||
585 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
586 		return EINVAL;
587 
588 	/* we don't actually store anything for now, always use s/w crypto */
589 
590 	return 0;
591 }
592 
593 static int
594 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
595     uint8_t *data, size_t dlen)
596 {
597 	struct iwm_fw_sects *fws;
598 	struct iwm_fw_onesect *fwone;
599 
600 	if (type >= IWM_UCODE_TYPE_MAX)
601 		return EINVAL;
602 	if (dlen < sizeof(uint32_t))
603 		return EINVAL;
604 
605 	fws = &sc->sc_fw.fw_sects[type];
606 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
607 		return EINVAL;
608 
609 	fwone = &fws->fw_sect[fws->fw_count];
610 
611 	/* first 32bit are device load offset */
612 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
613 
614 	/* rest is data */
615 	fwone->fws_data = data + sizeof(uint32_t);
616 	fwone->fws_len = dlen - sizeof(uint32_t);
617 
618 	/* for freeing the buffer during driver unload */
619 	fwone->fws_alloc = data;
620 	fwone->fws_allocsize = dlen;
621 
622 	fws->fw_count++;
623 	fws->fw_totlen += fwone->fws_len;
624 
625 	return 0;
626 }
627 
628 struct iwm_tlv_calib_data {
629 	uint32_t ucode_type;
630 	struct iwm_tlv_calib_ctrl calib;
631 } __packed;
632 
633 static int
634 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
635 {
636 	const struct iwm_tlv_calib_data *def_calib = data;
637 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
638 
639 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
640 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
641 		    DEVNAME(sc), ucode_type));
642 		return EINVAL;
643 	}
644 
645 	sc->sc_default_calib[ucode_type].flow_trigger =
646 	    def_calib->calib.flow_trigger;
647 	sc->sc_default_calib[ucode_type].event_trigger =
648 	    def_calib->calib.event_trigger;
649 
650 	return 0;
651 }
652 
653 static int
654 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
655 {
656 	struct iwm_fw_info *fw = &sc->sc_fw;
657 	struct iwm_tlv_ucode_header *uhdr;
658 	struct iwm_ucode_tlv tlv;
659 	enum iwm_ucode_tlv_type tlv_type;
660 	uint8_t *data;
661 	int err, status;
662 	size_t len;
663 
664 	if (ucode_type != IWM_UCODE_TYPE_INIT &&
665 	    fw->fw_status == IWM_FW_STATUS_DONE)
666 		return 0;
667 
668 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
669 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
670 	} else {
671 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
672 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
673 	}
674 	status = fw->fw_status;
675 
676 	if (status == IWM_FW_STATUS_DONE)
677 		return 0;
678 
679 	err = iwm_firmload(sc);
680 	if (err) {
681 		aprint_error_dev(sc->sc_dev,
682 		    "could not read firmware %s (error %d)\n",
683 		    sc->sc_fwname, err);
684 		goto out;
685 	}
686 
687 	sc->sc_capaflags = 0;
688 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
689 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
690 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
691 
692 	uhdr = (void *)fw->fw_rawdata;
693 	if (*(uint32_t *)fw->fw_rawdata != 0
694 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
695 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
696 		    sc->sc_fwname);
697 		err = EINVAL;
698 		goto out;
699 	}
700 
701 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
702 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
703 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
704 	    IWM_UCODE_API(le32toh(uhdr->ver)));
705 	data = uhdr->data;
706 	len = fw->fw_rawsize - sizeof(*uhdr);
707 
708 	while (len >= sizeof(tlv)) {
709 		size_t tlv_len;
710 		void *tlv_data;
711 
712 		memcpy(&tlv, data, sizeof(tlv));
713 		tlv_len = le32toh(tlv.length);
714 		tlv_type = le32toh(tlv.type);
715 
716 		len -= sizeof(tlv);
717 		data += sizeof(tlv);
718 		tlv_data = data;
719 
720 		if (len < tlv_len) {
721 			aprint_error_dev(sc->sc_dev,
722 			    "firmware too short: %zu bytes\n", len);
723 			err = EINVAL;
724 			goto parse_out;
725 		}
726 
727 		switch (tlv_type) {
728 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
729 			if (tlv_len < sizeof(uint32_t)) {
730 				err = EINVAL;
731 				goto parse_out;
732 			}
733 			sc->sc_capa_max_probe_len
734 			    = le32toh(*(uint32_t *)tlv_data);
735 			/* limit it to something sensible */
736 			if (sc->sc_capa_max_probe_len >
737 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
738 				err = EINVAL;
739 				goto parse_out;
740 			}
741 			break;
742 		case IWM_UCODE_TLV_PAN:
743 			if (tlv_len) {
744 				err = EINVAL;
745 				goto parse_out;
746 			}
747 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
748 			break;
749 		case IWM_UCODE_TLV_FLAGS:
750 			if (tlv_len < sizeof(uint32_t)) {
751 				err = EINVAL;
752 				goto parse_out;
753 			}
754 			if (tlv_len % sizeof(uint32_t)) {
755 				err = EINVAL;
756 				goto parse_out;
757 			}
758 			/*
759 			 * Apparently there can be many flags, but Linux driver
760 			 * parses only the first one, and so do we.
761 			 *
762 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
763 			 * Intentional or a bug?  Observations from
764 			 * current firmware file:
765 			 *  1) TLV_PAN is parsed first
766 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
767 			 * ==> this resets TLV_PAN to itself... hnnnk
768 			 */
769 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
770 			break;
771 		case IWM_UCODE_TLV_CSCHEME:
772 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
773 			if (err)
774 				goto parse_out;
775 			break;
776 		case IWM_UCODE_TLV_NUM_OF_CPU: {
777 			uint32_t num_cpu;
778 			if (tlv_len != sizeof(uint32_t)) {
779 				err = EINVAL;
780 				goto parse_out;
781 			}
782 			num_cpu = le32toh(*(uint32_t *)tlv_data);
783 			if (num_cpu == 2) {
784 				fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
785 				    true;
786 				fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
787 				    true;
788 				fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
789 				    true;
790 			} else if (num_cpu < 1 || num_cpu > 2) {
791 				err = EINVAL;
792 				goto parse_out;
793 			}
794 			break;
795 		}
796 		case IWM_UCODE_TLV_SEC_RT:
797 			err = iwm_firmware_store_section(sc,
798 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
799 			if (err)
800 				goto parse_out;
801 			break;
802 		case IWM_UCODE_TLV_SEC_INIT:
803 			err = iwm_firmware_store_section(sc,
804 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
805 			if (err)
806 				goto parse_out;
807 			break;
808 		case IWM_UCODE_TLV_SEC_WOWLAN:
809 			err = iwm_firmware_store_section(sc,
810 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
811 			if (err)
812 				goto parse_out;
813 			break;
814 		case IWM_UCODE_TLV_DEF_CALIB:
815 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
816 				err = EINVAL;
817 				goto parse_out;
818 			}
819 			err = iwm_set_default_calib(sc, tlv_data);
820 			if (err)
821 				goto parse_out;
822 			break;
823 		case IWM_UCODE_TLV_PHY_SKU:
824 			if (tlv_len != sizeof(uint32_t)) {
825 				err = EINVAL;
826 				goto parse_out;
827 			}
828 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
829 			break;
830 
831 		case IWM_UCODE_TLV_API_CHANGES_SET: {
832 			struct iwm_ucode_api *api;
833 			uint32_t idx, bits;
834 			int i;
835 			if (tlv_len != sizeof(*api)) {
836 				err = EINVAL;
837 				goto parse_out;
838 			}
839 			api = (struct iwm_ucode_api *)tlv_data;
840 			idx = le32toh(api->api_index);
841 			bits = le32toh(api->api_flags);
842 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
843 				err = EINVAL;
844 				goto parse_out;
845 			}
846 			for (i = 0; i < 32; i++) {
847 				if (!ISSET(bits, __BIT(i)))
848 					continue;
849 				setbit(sc->sc_ucode_api, i + (32 * idx));
850 			}
851 			break;
852 		}
853 
854 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
855 			struct iwm_ucode_capa *capa;
856 			uint32_t idx, bits;
857 			int i;
858 			if (tlv_len != sizeof(*capa)) {
859 				err = EINVAL;
860 				goto parse_out;
861 			}
862 			capa = (struct iwm_ucode_capa *)tlv_data;
863 			idx = le32toh(capa->api_index);
864 			bits = le32toh(capa->api_capa);
865 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
866 				err = EINVAL;
867 				goto parse_out;
868 			}
869 			for (i = 0; i < 32; i++) {
870 				if (!ISSET(bits, __BIT(i)))
871 					continue;
872 				setbit(sc->sc_enabled_capa, i + (32 * idx));
873 			}
874 			break;
875 		}
876 
877 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
878 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
879 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
880 		case IWM_UCODE_TLV_FW_MEM_SEG:
881 			/* ignore, not used by current driver */
882 			break;
883 
884 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
885 			err = iwm_firmware_store_section(sc,
886 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
887 			    tlv_len);
888 			if (err)
889 				goto parse_out;
890 			break;
891 
892 		case IWM_UCODE_TLV_PAGING: {
893 			uint32_t paging_mem_size;
894 			if (tlv_len != sizeof(paging_mem_size)) {
895 				err = EINVAL;
896 				goto parse_out;
897 			}
898 			paging_mem_size = le32toh(*(uint32_t *)tlv_data);
899 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
900 				err = EINVAL;
901 				goto parse_out;
902 			}
903 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
904 				err = EINVAL;
905 				goto parse_out;
906 			}
907 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
908 			    paging_mem_size;
909 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
910 			    paging_mem_size;
911 			break;
912 		}
913 
914 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
915 			if (tlv_len != sizeof(uint32_t)) {
916 				err = EINVAL;
917 				goto parse_out;
918 			}
919 			sc->sc_capa_n_scan_channels =
920 			  le32toh(*(uint32_t *)tlv_data);
921 			break;
922 
923 		case IWM_UCODE_TLV_FW_VERSION:
924 			if (tlv_len != sizeof(uint32_t) * 3) {
925 				err = EINVAL;
926 				goto parse_out;
927 			}
928 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
929 			    "%d.%d.%d",
930 			    le32toh(((uint32_t *)tlv_data)[0]),
931 			    le32toh(((uint32_t *)tlv_data)[1]),
932 			    le32toh(((uint32_t *)tlv_data)[2]));
933 			break;
934 
935 		default:
936 			DPRINTF(("%s: unknown firmware section %d, abort\n",
937 			    DEVNAME(sc), tlv_type));
938 			err = EINVAL;
939 			goto parse_out;
940 		}
941 
942 		len -= roundup(tlv_len, 4);
943 		data += roundup(tlv_len, 4);
944 	}
945 
946 	KASSERT(err == 0);
947 
948  parse_out:
949 	if (err) {
950 		aprint_error_dev(sc->sc_dev,
951 		    "firmware parse error, section type %d\n", tlv_type);
952 	}
953 
954 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
955 		aprint_error_dev(sc->sc_dev,
956 		    "device uses unsupported power ops\n");
957 		err = ENOTSUP;
958 	}
959 
960  out:
961 	if (err)
962 		fw->fw_status = IWM_FW_STATUS_NONE;
963 	else
964 		fw->fw_status = IWM_FW_STATUS_DONE;
965 	wakeup(&sc->sc_fw);
966 
967 	if (err && fw->fw_rawdata != NULL) {
968 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
969 		fw->fw_rawdata = NULL;
970 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
971 		/* don't touch fw->fw_status */
972 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
973 	}
974 	return err;
975 }
976 
977 static uint32_t
978 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
979 {
980 	IWM_WRITE(sc,
981 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
982 	IWM_BARRIER_READ_WRITE(sc);
983 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
984 }
985 
986 static void
987 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
988 {
989 	IWM_WRITE(sc,
990 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
991 	IWM_BARRIER_WRITE(sc);
992 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
993 }
994 
995 #ifdef IWM_DEBUG
996 static int
997 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
998 {
999 	int offs;
1000 	uint32_t *vals = buf;
1001 
1002 	if (iwm_nic_lock(sc)) {
1003 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1004 		for (offs = 0; offs < dwords; offs++)
1005 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1006 		iwm_nic_unlock(sc);
1007 		return 0;
1008 	}
1009 	return EBUSY;
1010 }
1011 #endif
1012 
1013 static int
1014 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1015 {
1016 	int offs;
1017 	const uint32_t *vals = buf;
1018 
1019 	if (iwm_nic_lock(sc)) {
1020 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1021 		/* WADDR auto-increments */
1022 		for (offs = 0; offs < dwords; offs++) {
1023 			uint32_t val = vals ? vals[offs] : 0;
1024 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1025 		}
1026 		iwm_nic_unlock(sc);
1027 		return 0;
1028 	}
1029 	return EBUSY;
1030 }
1031 
1032 static int
1033 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1034 {
1035 	return iwm_write_mem(sc, addr, &val, 1);
1036 }
1037 
1038 static int
1039 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1040     int timo)
1041 {
1042 	for (;;) {
1043 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1044 			return 1;
1045 		}
1046 		if (timo < 10) {
1047 			return 0;
1048 		}
1049 		timo -= 10;
1050 		DELAY(10);
1051 	}
1052 }
1053 
1054 static int
1055 iwm_nic_lock(struct iwm_softc *sc)
1056 {
1057 	int rv = 0;
1058 
1059 	if (sc->sc_cmd_hold_nic_awake)
1060 		return 1;
1061 
1062 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1063 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1064 
1065 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1066 		DELAY(2);
1067 
1068 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1069 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1070 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1071 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1072 		rv = 1;
1073 	} else {
1074 		DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1075 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1076 	}
1077 
1078 	return rv;
1079 }
1080 
1081 static void
1082 iwm_nic_unlock(struct iwm_softc *sc)
1083 {
1084 
1085 	if (sc->sc_cmd_hold_nic_awake)
1086 		return;
1087 
1088 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1089 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1090 }
1091 
1092 static void
1093 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1094     uint32_t mask)
1095 {
1096 	uint32_t val;
1097 
1098 	/* XXX: no error path? */
1099 	if (iwm_nic_lock(sc)) {
1100 		val = iwm_read_prph(sc, reg) & mask;
1101 		val |= bits;
1102 		iwm_write_prph(sc, reg, val);
1103 		iwm_nic_unlock(sc);
1104 	}
1105 }
1106 
1107 static void
1108 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1109 {
1110 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1111 }
1112 
1113 static void
1114 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1115 {
1116 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1117 }
1118 
1119 static int
1120 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1121     bus_size_t size, bus_size_t alignment)
1122 {
1123 	int nsegs, err;
1124 	void *va;
1125 
1126 	dma->tag = tag;
1127 	dma->size = size;
1128 
1129 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1130 	    &dma->map);
1131 	if (err)
1132 		goto fail;
1133 
1134 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1135 	    BUS_DMA_NOWAIT);
1136 	if (err)
1137 		goto fail;
1138 
1139 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1140 	if (err)
1141 		goto fail;
1142 	dma->vaddr = va;
1143 
1144 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1145 	    BUS_DMA_NOWAIT);
1146 	if (err)
1147 		goto fail;
1148 
1149 	memset(dma->vaddr, 0, size);
1150 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1151 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1152 
1153 	return 0;
1154 
1155 fail:	iwm_dma_contig_free(dma);
1156 	return err;
1157 }
1158 
1159 static void
1160 iwm_dma_contig_free(struct iwm_dma_info *dma)
1161 {
1162 	if (dma->map != NULL) {
1163 		if (dma->vaddr != NULL) {
1164 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1165 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1166 			bus_dmamap_unload(dma->tag, dma->map);
1167 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1168 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1169 			dma->vaddr = NULL;
1170 		}
1171 		bus_dmamap_destroy(dma->tag, dma->map);
1172 		dma->map = NULL;
1173 	}
1174 }
1175 
1176 static int
1177 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1178 {
1179 	bus_size_t size;
1180 	int i, err;
1181 
1182 	ring->cur = 0;
1183 
1184 	/* Allocate RX descriptors (256-byte aligned). */
1185 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1186 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1187 	if (err) {
1188 		aprint_error_dev(sc->sc_dev,
1189 		    "could not allocate RX ring DMA memory\n");
1190 		goto fail;
1191 	}
1192 	ring->desc = ring->desc_dma.vaddr;
1193 
1194 	/* Allocate RX status area (16-byte aligned). */
1195 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1196 	    sizeof(*ring->stat), 16);
1197 	if (err) {
1198 		aprint_error_dev(sc->sc_dev,
1199 		    "could not allocate RX status DMA memory\n");
1200 		goto fail;
1201 	}
1202 	ring->stat = ring->stat_dma.vaddr;
1203 
1204 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1205 		struct iwm_rx_data *data = &ring->data[i];
1206 
1207 		memset(data, 0, sizeof(*data));
1208 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1209 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1210 		    &data->map);
1211 		if (err) {
1212 			aprint_error_dev(sc->sc_dev,
1213 			    "could not create RX buf DMA map\n");
1214 			goto fail;
1215 		}
1216 
1217 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1218 		if (err)
1219 			goto fail;
1220 	}
1221 	return 0;
1222 
1223 fail:	iwm_free_rx_ring(sc, ring);
1224 	return err;
1225 }
1226 
1227 static void
1228 iwm_disable_rx_dma(struct iwm_softc *sc)
1229 {
1230 	int ntries;
1231 
1232 	if (iwm_nic_lock(sc)) {
1233 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1234 		for (ntries = 0; ntries < 1000; ntries++) {
1235 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1236 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1237 				break;
1238 			DELAY(10);
1239 		}
1240 		iwm_nic_unlock(sc);
1241 	}
1242 }
1243 
1244 void
1245 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1246 {
1247 	ring->cur = 0;
1248 	memset(ring->stat, 0, sizeof(*ring->stat));
1249 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1250 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1251 }
1252 
1253 static void
1254 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1255 {
1256 	int i;
1257 
1258 	iwm_dma_contig_free(&ring->desc_dma);
1259 	iwm_dma_contig_free(&ring->stat_dma);
1260 
1261 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1262 		struct iwm_rx_data *data = &ring->data[i];
1263 
1264 		if (data->m != NULL) {
1265 			bus_size_t sz = data->m->m_pkthdr.len;
1266 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1267 			    sz, BUS_DMASYNC_POSTREAD);
1268 			bus_dmamap_unload(sc->sc_dmat, data->map);
1269 			m_freem(data->m);
1270 			data->m = NULL;
1271 		}
1272 		if (data->map != NULL) {
1273 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1274 			data->map = NULL;
1275 		}
1276 	}
1277 }
1278 
1279 static int
1280 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1281 {
1282 	bus_addr_t paddr;
1283 	bus_size_t size;
1284 	int i, err, nsegs;
1285 
1286 	ring->qid = qid;
1287 	ring->queued = 0;
1288 	ring->cur = 0;
1289 
1290 	/* Allocate TX descriptors (256-byte aligned). */
1291 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1292 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1293 	if (err) {
1294 		aprint_error_dev(sc->sc_dev,
1295 		    "could not allocate TX ring DMA memory\n");
1296 		goto fail;
1297 	}
1298 	ring->desc = ring->desc_dma.vaddr;
1299 
1300 	/*
1301 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1302 	 * to allocate commands space for other rings.
1303 	 */
1304 	if (qid > IWM_CMD_QUEUE)
1305 		return 0;
1306 
1307 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1308 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1309 	if (err) {
1310 		aprint_error_dev(sc->sc_dev,
1311 		    "could not allocate TX cmd DMA memory\n");
1312 		goto fail;
1313 	}
1314 	ring->cmd = ring->cmd_dma.vaddr;
1315 
1316 	paddr = ring->cmd_dma.paddr;
1317 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1318 		struct iwm_tx_data *data = &ring->data[i];
1319 		size_t mapsize;
1320 
1321 		data->cmd_paddr = paddr;
1322 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1323 		    + offsetof(struct iwm_tx_cmd, scratch);
1324 		paddr += sizeof(struct iwm_device_cmd);
1325 
1326 		/* FW commands may require more mapped space than packets. */
1327 		if (qid == IWM_CMD_QUEUE) {
1328 			mapsize = IWM_RBUF_SIZE;
1329 			nsegs = 1;
1330 		} else {
1331 			mapsize = MCLBYTES;
1332 			nsegs = IWM_NUM_OF_TBS - 2;
1333 		}
1334 		err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1335 		    0, BUS_DMA_NOWAIT, &data->map);
1336 		if (err) {
1337 			aprint_error_dev(sc->sc_dev,
1338 			    "could not create TX buf DMA map\n");
1339 			goto fail;
1340 		}
1341 	}
1342 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1343 	return 0;
1344 
1345 fail:	iwm_free_tx_ring(sc, ring);
1346 	return err;
1347 }
1348 
1349 static void
1350 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1351 {
1352 
1353 	if (!sc->apmg_wake_up_wa)
1354 		return;
1355 
1356 	if (!sc->sc_cmd_hold_nic_awake) {
1357 		aprint_error_dev(sc->sc_dev,
1358 		    "cmd_hold_nic_awake not set\n");
1359 		return;
1360 	}
1361 
1362 	sc->sc_cmd_hold_nic_awake = 0;
1363 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1364 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1365 }
1366 
1367 static int
1368 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1369 {
1370 	int ret;
1371 
1372 	/*
1373 	 * wake up the NIC to make sure that the firmware will see the host
1374 	 * command - we will let the NIC sleep once all the host commands
1375 	 * returned. This needs to be done only on NICs that have
1376 	 * apmg_wake_up_wa set.
1377 	 */
1378 	if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1379 
1380 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1381 		    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1382 
1383 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1384 		    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1385 		    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1386 		     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1387 		    15000);
1388 		if (ret == 0) {
1389 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1390 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1391 			aprint_error_dev(sc->sc_dev,
1392 			    "failed to wake NIC for hcmd\n");
1393 			return EIO;
1394 		}
1395 		sc->sc_cmd_hold_nic_awake = 1;
1396 	}
1397 
1398 	return 0;
1399 }
1400 static void
1401 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1402 {
1403 	int i;
1404 
1405 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1406 		struct iwm_tx_data *data = &ring->data[i];
1407 
1408 		if (data->m != NULL) {
1409 			bus_size_t sz = data->m->m_pkthdr.len;
1410 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1411 			    sz, BUS_DMASYNC_POSTWRITE);
1412 			bus_dmamap_unload(sc->sc_dmat, data->map);
1413 			m_freem(data->m);
1414 			data->m = NULL;
1415 		}
1416 	}
1417 	/* Clear TX descriptors. */
1418 	memset(ring->desc, 0, ring->desc_dma.size);
1419 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1420 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1421 	sc->qfullmsk &= ~(1 << ring->qid);
1422 	ring->queued = 0;
1423 	ring->cur = 0;
1424 
1425 	if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1426 		iwm_clear_cmd_in_flight(sc);
1427 }
1428 
1429 static void
1430 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1431 {
1432 	int i;
1433 
1434 	iwm_dma_contig_free(&ring->desc_dma);
1435 	iwm_dma_contig_free(&ring->cmd_dma);
1436 
1437 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1438 		struct iwm_tx_data *data = &ring->data[i];
1439 
1440 		if (data->m != NULL) {
1441 			bus_size_t sz = data->m->m_pkthdr.len;
1442 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1443 			    sz, BUS_DMASYNC_POSTWRITE);
1444 			bus_dmamap_unload(sc->sc_dmat, data->map);
1445 			m_freem(data->m);
1446 			data->m = NULL;
1447 		}
1448 		if (data->map != NULL) {
1449 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1450 			data->map = NULL;
1451 		}
1452 	}
1453 }
1454 
1455 static void
1456 iwm_enable_rfkill_int(struct iwm_softc *sc)
1457 {
1458 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1459 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1460 }
1461 
1462 static int
1463 iwm_check_rfkill(struct iwm_softc *sc)
1464 {
1465 	uint32_t v;
1466 	int s;
1467 	int rv;
1468 
1469 	s = splnet();
1470 
1471 	/*
1472 	 * "documentation" is not really helpful here:
1473 	 *  27:	HW_RF_KILL_SW
1474 	 *	Indicates state of (platform's) hardware RF-Kill switch
1475 	 *
1476 	 * But apparently when it's off, it's on ...
1477 	 */
1478 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1479 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1480 	if (rv) {
1481 		sc->sc_flags |= IWM_FLAG_RFKILL;
1482 	} else {
1483 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1484 	}
1485 
1486 	splx(s);
1487 	return rv;
1488 }
1489 
1490 static void
1491 iwm_enable_interrupts(struct iwm_softc *sc)
1492 {
1493 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1494 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1495 }
1496 
1497 static void
1498 iwm_restore_interrupts(struct iwm_softc *sc)
1499 {
1500 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1501 }
1502 
1503 static void
1504 iwm_disable_interrupts(struct iwm_softc *sc)
1505 {
1506 	int s = splnet();
1507 
1508 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1509 
1510 	/* acknowledge all interrupts */
1511 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1512 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1513 
1514 	splx(s);
1515 }
1516 
1517 static void
1518 iwm_ict_reset(struct iwm_softc *sc)
1519 {
1520 	iwm_disable_interrupts(sc);
1521 
1522 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1523 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, sc->ict_dma.size,
1524 	    BUS_DMASYNC_PREWRITE);
1525 	sc->ict_cur = 0;
1526 
1527 	/* Set physical address of ICT (4KB aligned). */
1528 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1529 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1530 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1531 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1532 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1533 
1534 	/* Switch to ICT interrupt mode in driver. */
1535 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1536 
1537 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1538 	iwm_enable_interrupts(sc);
1539 }
1540 
1541 #define IWM_HW_READY_TIMEOUT 50
1542 static int
1543 iwm_set_hw_ready(struct iwm_softc *sc)
1544 {
1545 	int ready;
1546 
1547 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1548 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1549 
1550 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1551 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1552 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1553 	    IWM_HW_READY_TIMEOUT);
1554 	if (ready)
1555 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1556 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1557 
1558 	return ready;
1559 }
1560 #undef IWM_HW_READY_TIMEOUT
1561 
1562 static int
1563 iwm_prepare_card_hw(struct iwm_softc *sc)
1564 {
1565 	int t = 0;
1566 
1567 	if (iwm_set_hw_ready(sc))
1568 		return 0;
1569 
1570 	DELAY(100);
1571 
1572 	/* If HW is not ready, prepare the conditions to check again */
1573 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1574 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1575 
1576 	do {
1577 		if (iwm_set_hw_ready(sc))
1578 			return 0;
1579 		DELAY(200);
1580 		t += 200;
1581 	} while (t < 150000);
1582 
1583 	return ETIMEDOUT;
1584 }
1585 
1586 static void
1587 iwm_apm_config(struct iwm_softc *sc)
1588 {
1589 	pcireg_t reg;
1590 
1591 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1592 	    sc->sc_cap_off + PCIE_LCSR);
1593 	if (reg & PCIE_LCSR_ASPM_L1) {
1594 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1595 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1596 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1597 	} else {
1598 		/* ... and "Enabling" here */
1599 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1600 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1601 	}
1602 }
1603 
1604 /*
1605  * Start up NIC's basic functionality after it has been reset
1606  * e.g. after platform boot or shutdown.
1607  * NOTE:  This does not load uCode nor start the embedded processor
1608  */
1609 static int
1610 iwm_apm_init(struct iwm_softc *sc)
1611 {
1612 	int err = 0;
1613 
1614 	/* Disable L0S exit timer (platform NMI workaround) */
1615 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1616 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1617 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1618 	}
1619 
1620 	/*
1621 	 * Disable L0s without affecting L1;
1622 	 *  don't wait for ICH L0s (ICH bug W/A)
1623 	 */
1624 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1625 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1626 
1627 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1628 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1629 
1630 	/*
1631 	 * Enable HAP INTA (interrupt from management bus) to
1632 	 * wake device's PCI Express link L1a -> L0s
1633 	 */
1634 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1635 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1636 
1637 	iwm_apm_config(sc);
1638 
1639 #if 0 /* not for 7k/8k */
1640 	/* Configure analog phase-lock-loop before activating to D0A */
1641 	if (trans->cfg->base_params->pll_cfg_val)
1642 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1643 		    trans->cfg->base_params->pll_cfg_val);
1644 #endif
1645 
1646 	/*
1647 	 * Set "initialization complete" bit to move adapter from
1648 	 * D0U* --> D0A* (powered-up active) state.
1649 	 */
1650 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1651 
1652 	/*
1653 	 * Wait for clock stabilization; once stabilized, access to
1654 	 * device-internal resources is supported, e.g. iwm_write_prph()
1655 	 * and accesses to uCode SRAM.
1656 	 */
1657 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1658 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1659 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1660 		aprint_error_dev(sc->sc_dev,
1661 		    "timeout waiting for clock stabilization\n");
1662 		err = ETIMEDOUT;
1663 		goto out;
1664 	}
1665 
1666 	if (sc->host_interrupt_operation_mode) {
1667 		/*
1668 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1669 		 * only check host_interrupt_operation_mode even if this is
1670 		 * not related to host_interrupt_operation_mode.
1671 		 *
1672 		 * Enable the oscillator to count wake up time for L1 exit. This
1673 		 * consumes slightly more power (100uA) - but allows to be sure
1674 		 * that we wake up from L1 on time.
1675 		 *
1676 		 * This looks weird: read twice the same register, discard the
1677 		 * value, set a bit, and yet again, read that same register
1678 		 * just to discard the value. But that's the way the hardware
1679 		 * seems to like it.
1680 		 */
1681 		iwm_read_prph(sc, IWM_OSC_CLK);
1682 		iwm_read_prph(sc, IWM_OSC_CLK);
1683 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1684 		iwm_read_prph(sc, IWM_OSC_CLK);
1685 		iwm_read_prph(sc, IWM_OSC_CLK);
1686 	}
1687 
1688 	/*
1689 	 * Enable DMA clock and wait for it to stabilize.
1690 	 *
1691 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1692 	 * do not disable clocks.  This preserves any hardware bits already
1693 	 * set by default in "CLK_CTRL_REG" after reset.
1694 	 */
1695 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1696 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1697 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1698 		DELAY(20);
1699 
1700 		/* Disable L1-Active */
1701 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1702 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1703 
1704 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1705 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1706 		    IWM_APMG_RTC_INT_STT_RFKILL);
1707 	}
1708  out:
1709 	if (err)
1710 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1711 	return err;
1712 }
1713 
1714 static void
1715 iwm_apm_stop(struct iwm_softc *sc)
1716 {
1717 	/* stop device's busmaster DMA activity */
1718 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1719 
1720 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1721 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1722 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1723 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1724 	DPRINTF(("iwm apm stop\n"));
1725 }
1726 
1727 static int
1728 iwm_start_hw(struct iwm_softc *sc)
1729 {
1730 	int err;
1731 
1732 	err = iwm_prepare_card_hw(sc);
1733 	if (err)
1734 		return err;
1735 
1736 	/* Reset the entire device */
1737 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1738 	DELAY(10);
1739 
1740 	err = iwm_apm_init(sc);
1741 	if (err)
1742 		return err;
1743 
1744 	iwm_enable_rfkill_int(sc);
1745 	iwm_check_rfkill(sc);
1746 
1747 	return 0;
1748 }
1749 
1750 static void
1751 iwm_stop_device(struct iwm_softc *sc)
1752 {
1753 	int chnl, ntries;
1754 	int qid;
1755 
1756 	iwm_disable_interrupts(sc);
1757 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1758 
1759 	/* Deactivate TX scheduler. */
1760 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1761 
1762 	/* Stop all DMA channels. */
1763 	if (iwm_nic_lock(sc)) {
1764 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1765 			IWM_WRITE(sc,
1766 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1767 			for (ntries = 0; ntries < 200; ntries++) {
1768 				uint32_t r;
1769 
1770 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1771 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1772 				    chnl))
1773 					break;
1774 				DELAY(20);
1775 			}
1776 		}
1777 		iwm_nic_unlock(sc);
1778 	}
1779 	iwm_disable_rx_dma(sc);
1780 
1781 	iwm_reset_rx_ring(sc, &sc->rxq);
1782 
1783 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1784 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1785 
1786 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1787 		/* Power-down device's busmaster DMA clocks */
1788 		if (iwm_nic_lock(sc)) {
1789 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1790 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1791 			DELAY(5);
1792 			iwm_nic_unlock(sc);
1793 		}
1794 	}
1795 
1796 	/* Make sure (redundant) we've released our request to stay awake */
1797 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1798 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1799 
1800 	/* Stop the device, and put it in low power state */
1801 	iwm_apm_stop(sc);
1802 
1803 	/*
1804 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1805 	 * Clean again the interrupt here
1806 	 */
1807 	iwm_disable_interrupts(sc);
1808 
1809 	/* Reset the on-board processor. */
1810 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1811 
1812 	/* Even though we stop the HW we still want the RF kill interrupt. */
1813 	iwm_enable_rfkill_int(sc);
1814 	iwm_check_rfkill(sc);
1815 }
1816 
1817 static void
1818 iwm_nic_config(struct iwm_softc *sc)
1819 {
1820 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1821 	uint32_t reg_val = 0;
1822 
1823 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1824 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1825 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1826 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1827 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1828 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1829 
1830 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1831 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1832 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1833 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1834 
1835 	/* radio configuration */
1836 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1837 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1838 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1839 
1840 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1841 
1842 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1843 	    radio_cfg_step, radio_cfg_dash));
1844 
1845 	/*
1846 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1847 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1848 	 * to lose ownership and not being able to obtain it back.
1849 	 */
1850 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1851 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1852 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1853 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1854 	}
1855 }
1856 
1857 static int
1858 iwm_nic_rx_init(struct iwm_softc *sc)
1859 {
1860 	if (!iwm_nic_lock(sc))
1861 		return EBUSY;
1862 
1863 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1864 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1865 	    0, sc->rxq.stat_dma.size,
1866 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1867 
1868 	iwm_disable_rx_dma(sc);
1869 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1870 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1871 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1872 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1873 
1874 	/* Set physical address of RX ring (256-byte aligned). */
1875 	IWM_WRITE(sc,
1876 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1877 
1878 	/* Set physical address of RX status (16-byte aligned). */
1879 	IWM_WRITE(sc,
1880 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1881 
1882 	/* Enable RX. */
1883 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1884 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1885 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1886 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1887 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1888 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1889 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1890 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1891 
1892 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1893 
1894 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1895 	if (sc->host_interrupt_operation_mode)
1896 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1897 
1898 	/*
1899 	 * This value should initially be 0 (before preparing any RBs),
1900 	 * and should be 8 after preparing the first 8 RBs (for example).
1901 	 */
1902 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1903 
1904 	iwm_nic_unlock(sc);
1905 
1906 	return 0;
1907 }
1908 
1909 static int
1910 iwm_nic_tx_init(struct iwm_softc *sc)
1911 {
1912 	int qid;
1913 
1914 	if (!iwm_nic_lock(sc))
1915 		return EBUSY;
1916 
1917 	/* Deactivate TX scheduler. */
1918 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1919 
1920 	/* Set physical address of "keep warm" page (16-byte aligned). */
1921 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1922 
1923 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1924 		struct iwm_tx_ring *txq = &sc->txq[qid];
1925 
1926 		/* Set physical address of TX ring (256-byte aligned). */
1927 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1928 		    txq->desc_dma.paddr >> 8);
1929 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1930 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1931 	}
1932 
1933 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1934 
1935 	iwm_nic_unlock(sc);
1936 
1937 	return 0;
1938 }
1939 
1940 static int
1941 iwm_nic_init(struct iwm_softc *sc)
1942 {
1943 	int err;
1944 
1945 	iwm_apm_init(sc);
1946 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1947 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1948 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1949 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1950 	}
1951 
1952 	iwm_nic_config(sc);
1953 
1954 	err = iwm_nic_rx_init(sc);
1955 	if (err)
1956 		return err;
1957 
1958 	err = iwm_nic_tx_init(sc);
1959 	if (err)
1960 		return err;
1961 
1962 	DPRINTF(("shadow registers enabled\n"));
1963 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1964 
1965 	return 0;
1966 }
1967 
1968 static const uint8_t iwm_ac_to_tx_fifo[] = {
1969 	IWM_TX_FIFO_VO,
1970 	IWM_TX_FIFO_VI,
1971 	IWM_TX_FIFO_BE,
1972 	IWM_TX_FIFO_BK,
1973 };
1974 
1975 static int
1976 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1977 {
1978 	if (!iwm_nic_lock(sc)) {
1979 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1980 		return EBUSY;
1981 	}
1982 
1983 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1984 
1985 	if (qid == IWM_CMD_QUEUE) {
1986 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1987 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1988 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1989 
1990 		iwm_nic_unlock(sc);
1991 
1992 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1993 
1994 		if (!iwm_nic_lock(sc))
1995 			return EBUSY;
1996 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1997 		iwm_nic_unlock(sc);
1998 
1999 		iwm_write_mem32(sc,
2000 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2001 
2002 		/* Set scheduler window size and frame limit. */
2003 		iwm_write_mem32(sc,
2004 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2005 		    sizeof(uint32_t),
2006 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2007 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2008 		    ((IWM_FRAME_LIMIT
2009 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2010 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2011 
2012 		if (!iwm_nic_lock(sc))
2013 			return EBUSY;
2014 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2015 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2016 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2017 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2018 		    IWM_SCD_QUEUE_STTS_REG_MSK);
2019 	} else {
2020 		struct iwm_scd_txq_cfg_cmd cmd;
2021 		int err;
2022 
2023 		iwm_nic_unlock(sc);
2024 
2025 		memset(&cmd, 0, sizeof(cmd));
2026 		cmd.scd_queue = qid;
2027 		cmd.enable = 1;
2028 		cmd.sta_id = sta_id;
2029 		cmd.tx_fifo = fifo;
2030 		cmd.aggregate = 0;
2031 		cmd.window = IWM_FRAME_LIMIT;
2032 
2033 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2034 		    &cmd);
2035 		if (err)
2036 			return err;
2037 
2038 		if (!iwm_nic_lock(sc))
2039 			return EBUSY;
2040 	}
2041 
2042 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2043 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2044 
2045 	iwm_nic_unlock(sc);
2046 
2047 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2048 
2049 	return 0;
2050 }
2051 
2052 static int
2053 iwm_post_alive(struct iwm_softc *sc)
2054 {
2055 	int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2056 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2057 	int err, chnl;
2058 	uint32_t base;
2059 
2060 	if (!iwm_nic_lock(sc))
2061 		return EBUSY;
2062 
2063 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2064 	if (sc->sched_base != base) {
2065 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2066 		    DEVNAME(sc), sc->sched_base, base));
2067 		sc->sched_base = base;
2068 	}
2069 
2070 	iwm_nic_unlock(sc);
2071 
2072 	iwm_ict_reset(sc);
2073 
2074 	/* Clear TX scheduler state in SRAM. */
2075 	err = iwm_write_mem(sc,
2076 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2077 	if (err)
2078 		return err;
2079 
2080 	if (!iwm_nic_lock(sc))
2081 		return EBUSY;
2082 
2083 	/* Set physical address of TX scheduler rings (1KB aligned). */
2084 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2085 
2086 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2087 
2088 	iwm_nic_unlock(sc);
2089 
2090 	/* enable command channel */
2091 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2092 	if (err)
2093 		return err;
2094 
2095 	if (!iwm_nic_lock(sc))
2096 		return EBUSY;
2097 
2098 	/* Activate TX scheduler. */
2099 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2100 
2101 	/* Enable DMA channels. */
2102 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2103 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2104 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2105 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2106 	}
2107 
2108 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2109 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2110 
2111 	/* Enable L1-Active */
2112 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2113 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2114 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2115 	}
2116 
2117 	iwm_nic_unlock(sc);
2118 
2119 	return 0;
2120 }
2121 
2122 static struct iwm_phy_db_entry *
2123 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2124     uint16_t chg_id)
2125 {
2126 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2127 
2128 	if (type >= IWM_PHY_DB_MAX)
2129 		return NULL;
2130 
2131 	switch (type) {
2132 	case IWM_PHY_DB_CFG:
2133 		return &phy_db->cfg;
2134 	case IWM_PHY_DB_CALIB_NCH:
2135 		return &phy_db->calib_nch;
2136 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2137 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2138 			return NULL;
2139 		return &phy_db->calib_ch_group_papd[chg_id];
2140 	case IWM_PHY_DB_CALIB_CHG_TXP:
2141 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2142 			return NULL;
2143 		return &phy_db->calib_ch_group_txp[chg_id];
2144 	default:
2145 		return NULL;
2146 	}
2147 	return NULL;
2148 }
2149 
2150 static int
2151 iwm_phy_db_set_section(struct iwm_softc *sc,
2152     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2153 {
2154 	struct iwm_phy_db_entry *entry;
2155 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2156 	uint16_t chg_id = 0;
2157 
2158 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2159 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2160 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2161 
2162 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2163 	if (!entry)
2164 		return EINVAL;
2165 
2166 	if (entry->data)
2167 		kmem_intr_free(entry->data, entry->size);
2168 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2169 	if (!entry->data) {
2170 		entry->size = 0;
2171 		return ENOMEM;
2172 	}
2173 	memcpy(entry->data, phy_db_notif->data, size);
2174 	entry->size = size;
2175 
2176 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2177 	    __func__, __LINE__, type, size, entry->data));
2178 
2179 	return 0;
2180 }
2181 
2182 static int
2183 iwm_is_valid_channel(uint16_t ch_id)
2184 {
2185 	if (ch_id <= 14 ||
2186 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2187 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2188 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2189 		return 1;
2190 	return 0;
2191 }
2192 
2193 static uint8_t
2194 iwm_ch_id_to_ch_index(uint16_t ch_id)
2195 {
2196 	if (!iwm_is_valid_channel(ch_id))
2197 		return 0xff;
2198 
2199 	if (ch_id <= 14)
2200 		return ch_id - 1;
2201 	if (ch_id <= 64)
2202 		return (ch_id + 20) / 4;
2203 	if (ch_id <= 140)
2204 		return (ch_id - 12) / 4;
2205 	return (ch_id - 13) / 4;
2206 }
2207 
2208 
2209 static uint16_t
2210 iwm_channel_id_to_papd(uint16_t ch_id)
2211 {
2212 	if (!iwm_is_valid_channel(ch_id))
2213 		return 0xff;
2214 
2215 	if (1 <= ch_id && ch_id <= 14)
2216 		return 0;
2217 	if (36 <= ch_id && ch_id <= 64)
2218 		return 1;
2219 	if (100 <= ch_id && ch_id <= 140)
2220 		return 2;
2221 	return 3;
2222 }
2223 
2224 static uint16_t
2225 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2226 {
2227 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2228 	struct iwm_phy_db_chg_txp *txp_chg;
2229 	int i;
2230 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2231 
2232 	if (ch_index == 0xff)
2233 		return 0xff;
2234 
2235 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2236 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2237 		if (!txp_chg)
2238 			return 0xff;
2239 		/*
2240 		 * Looking for the first channel group the max channel
2241 		 * of which is higher than the requested channel.
2242 		 */
2243 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2244 			return i;
2245 	}
2246 	return 0xff;
2247 }
2248 
2249 static int
2250 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2251     uint16_t *size, uint16_t ch_id)
2252 {
2253 	struct iwm_phy_db_entry *entry;
2254 	uint16_t ch_group_id = 0;
2255 
2256 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2257 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2258 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2259 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2260 
2261 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2262 	if (!entry)
2263 		return EINVAL;
2264 
2265 	*data = entry->data;
2266 	*size = entry->size;
2267 
2268 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2269 		       __func__, __LINE__, type, *size));
2270 
2271 	return 0;
2272 }
2273 
2274 static int
2275 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2276     void *data)
2277 {
2278 	struct iwm_phy_db_cmd phy_db_cmd;
2279 	struct iwm_host_cmd cmd = {
2280 		.id = IWM_PHY_DB_CMD,
2281 		.flags = IWM_CMD_ASYNC,
2282 	};
2283 
2284 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2285 	    type, length));
2286 
2287 	phy_db_cmd.type = le16toh(type);
2288 	phy_db_cmd.length = le16toh(length);
2289 
2290 	cmd.data[0] = &phy_db_cmd;
2291 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2292 	cmd.data[1] = data;
2293 	cmd.len[1] = length;
2294 
2295 	return iwm_send_cmd(sc, &cmd);
2296 }
2297 
2298 static int
2299 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2300     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2301 {
2302 	uint16_t i;
2303 	int err;
2304 	struct iwm_phy_db_entry *entry;
2305 
2306 	/* Send all the channel-specific groups to operational fw */
2307 	for (i = 0; i < max_ch_groups; i++) {
2308 		entry = iwm_phy_db_get_section(sc, type, i);
2309 		if (!entry)
2310 			return EINVAL;
2311 
2312 		if (!entry->size)
2313 			continue;
2314 
2315 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2316 		if (err) {
2317 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2318 			    "err %d\n", DEVNAME(sc), type, i, err));
2319 			return err;
2320 		}
2321 
2322 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2323 		    DEVNAME(sc), type, i));
2324 
2325 		DELAY(1000);
2326 	}
2327 
2328 	return 0;
2329 }
2330 
2331 static int
2332 iwm_send_phy_db_data(struct iwm_softc *sc)
2333 {
2334 	uint8_t *data = NULL;
2335 	uint16_t size = 0;
2336 	int err;
2337 
2338 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2339 	if (err)
2340 		return err;
2341 
2342 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2343 	if (err)
2344 		return err;
2345 
2346 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2347 	    &data, &size, 0);
2348 	if (err)
2349 		return err;
2350 
2351 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2352 	if (err)
2353 		return err;
2354 
2355 	err = iwm_phy_db_send_all_channel_groups(sc,
2356 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2357 	if (err)
2358 		return err;
2359 
2360 	err = iwm_phy_db_send_all_channel_groups(sc,
2361 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2362 	if (err)
2363 		return err;
2364 
2365 	return 0;
2366 }
2367 
2368 /*
2369  * For the high priority TE use a time event type that has similar priority to
2370  * the FW's action scan priority.
2371  */
2372 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2373 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2374 
2375 /* used to convert from time event API v2 to v1 */
2376 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2377 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2378 static inline uint16_t
2379 iwm_te_v2_get_notify(uint16_t policy)
2380 {
2381 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2382 }
2383 
2384 static inline uint16_t
2385 iwm_te_v2_get_dep_policy(uint16_t policy)
2386 {
2387 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2388 		IWM_TE_V2_PLACEMENT_POS;
2389 }
2390 
2391 static inline uint16_t
2392 iwm_te_v2_get_absence(uint16_t policy)
2393 {
2394 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2395 }
2396 
2397 static void
2398 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2399     struct iwm_time_event_cmd_v1 *cmd_v1)
2400 {
2401 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2402 	cmd_v1->action = cmd_v2->action;
2403 	cmd_v1->id = cmd_v2->id;
2404 	cmd_v1->apply_time = cmd_v2->apply_time;
2405 	cmd_v1->max_delay = cmd_v2->max_delay;
2406 	cmd_v1->depends_on = cmd_v2->depends_on;
2407 	cmd_v1->interval = cmd_v2->interval;
2408 	cmd_v1->duration = cmd_v2->duration;
2409 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2410 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2411 	else
2412 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2413 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2414 	cmd_v1->interval_reciprocal = 0; /* unused */
2415 
2416 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2417 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2418 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2419 }
2420 
2421 static int
2422 iwm_send_time_event_cmd(struct iwm_softc *sc,
2423     const struct iwm_time_event_cmd_v2 *cmd)
2424 {
2425 	struct iwm_time_event_cmd_v1 cmd_v1;
2426 
2427 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2428 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2429 		    cmd);
2430 
2431 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2432 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2433 	    &cmd_v1);
2434 }
2435 
2436 static void
2437 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2438     uint32_t duration, uint32_t max_delay)
2439 {
2440 	struct iwm_time_event_cmd_v2 time_cmd;
2441 
2442 	memset(&time_cmd, 0, sizeof(time_cmd));
2443 
2444 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2445 	time_cmd.id_and_color =
2446 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2447 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2448 
2449 	time_cmd.apply_time = htole32(0);
2450 
2451 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2452 	time_cmd.max_delay = htole32(max_delay);
2453 	/* TODO: why do we need to interval = bi if it is not periodic? */
2454 	time_cmd.interval = htole32(1);
2455 	time_cmd.duration = htole32(duration);
2456 	time_cmd.repeat = 1;
2457 	time_cmd.policy
2458 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2459 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2460 		IWM_T2_V2_START_IMMEDIATELY);
2461 
2462 	iwm_send_time_event_cmd(sc, &time_cmd);
2463 }
2464 
2465 /*
2466  * NVM read access and content parsing.  We do not support
2467  * external NVM or writing NVM.
2468  */
2469 
2470 /* list of NVM sections we are allowed/need to read */
2471 static const int iwm_nvm_to_read[] = {
2472 	IWM_NVM_SECTION_TYPE_HW,
2473 	IWM_NVM_SECTION_TYPE_SW,
2474 	IWM_NVM_SECTION_TYPE_REGULATORY,
2475 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2476 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2477 	IWM_NVM_SECTION_TYPE_HW_8000,
2478 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2479 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2480 };
2481 
2482 /* Default NVM size to read */
2483 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2484 #define IWM_MAX_NVM_SECTION_SIZE_7000	(16 * 512 * sizeof(uint16_t)) /*16 KB*/
2485 #define IWM_MAX_NVM_SECTION_SIZE_8000	(32 * 512 * sizeof(uint16_t)) /*32 KB*/
2486 
2487 #define IWM_NVM_WRITE_OPCODE 1
2488 #define IWM_NVM_READ_OPCODE 0
2489 
2490 static int
2491 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2492     uint16_t length, uint8_t *data, uint16_t *len)
2493 {
2494 	offset = 0;
2495 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2496 		.offset = htole16(offset),
2497 		.length = htole16(length),
2498 		.type = htole16(section),
2499 		.op_code = IWM_NVM_READ_OPCODE,
2500 	};
2501 	struct iwm_nvm_access_resp *nvm_resp;
2502 	struct iwm_rx_packet *pkt;
2503 	struct iwm_host_cmd cmd = {
2504 		.id = IWM_NVM_ACCESS_CMD,
2505 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2506 		.data = { &nvm_access_cmd, },
2507 	};
2508 	int err, offset_read;
2509 	size_t bytes_read;
2510 	uint8_t *resp_data;
2511 
2512 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2513 
2514 	err = iwm_send_cmd(sc, &cmd);
2515 	if (err) {
2516 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2517 		    DEVNAME(sc), err));
2518 		return err;
2519 	}
2520 
2521 	pkt = cmd.resp_pkt;
2522 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2523 		err = EIO;
2524 		goto exit;
2525 	}
2526 
2527 	/* Extract NVM response */
2528 	nvm_resp = (void *)pkt->data;
2529 
2530 	err = le16toh(nvm_resp->status);
2531 	bytes_read = le16toh(nvm_resp->length);
2532 	offset_read = le16toh(nvm_resp->offset);
2533 	resp_data = nvm_resp->data;
2534 	if (err) {
2535 		err = EINVAL;
2536 		goto exit;
2537 	}
2538 
2539 	if (offset_read != offset) {
2540 		err = EINVAL;
2541 		goto exit;
2542 	}
2543 	if (bytes_read > length) {
2544 		err = EINVAL;
2545 		goto exit;
2546 	}
2547 
2548 	memcpy(data + offset, resp_data, bytes_read);
2549 	*len = bytes_read;
2550 
2551  exit:
2552 	iwm_free_resp(sc, &cmd);
2553 	return err;
2554 }
2555 
2556 /*
2557  * Reads an NVM section completely.
2558  * NICs prior to 7000 family doesn't have a real NVM, but just read
2559  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2560  * by uCode, we need to manually check in this case that we don't
2561  * overflow and try to read more than the EEPROM size.
2562  */
2563 static int
2564 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2565     uint16_t *len, size_t max_len)
2566 {
2567 	uint16_t chunklen, seglen;
2568 	int err;
2569 
2570 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2571 	*len = 0;
2572 
2573 	/* Read NVM chunks until exhausted (reading less than requested) */
2574 	while (seglen == chunklen && *len < max_len) {
2575 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2576 		    &seglen);
2577 		if (err) {
2578 			DPRINTF(("%s: Cannot read NVM from section %d "
2579 			    "offset %d, length %d\n",
2580 			    DEVNAME(sc), section, *len, chunklen));
2581 			return err;
2582 		}
2583 		*len += seglen;
2584 	}
2585 
2586 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2587 	return 0;
2588 }
2589 
2590 static uint8_t
2591 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2592 {
2593 	uint8_t tx_ant;
2594 
2595 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2596 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2597 
2598 	if (sc->sc_nvm.valid_tx_ant)
2599 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2600 
2601 	return tx_ant;
2602 }
2603 
2604 static uint8_t
2605 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2606 {
2607 	uint8_t rx_ant;
2608 
2609 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2610 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2611 
2612 	if (sc->sc_nvm.valid_rx_ant)
2613 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2614 
2615 	return rx_ant;
2616 }
2617 
2618 static void
2619 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2620     const uint8_t *nvm_channels, size_t nchan)
2621 {
2622 	struct ieee80211com *ic = &sc->sc_ic;
2623 	struct iwm_nvm_data *data = &sc->sc_nvm;
2624 	int ch_idx;
2625 	struct ieee80211_channel *channel;
2626 	uint16_t ch_flags;
2627 	int is_5ghz;
2628 	int flags, hw_value;
2629 
2630 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2631 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2632 		aprint_debug_dev(sc->sc_dev,
2633 		    "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2634 		    " %cwide %c40MHz %c80MHz %c160MHz\n",
2635 		    nvm_channels[ch_idx],
2636 		    ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2637 		    ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2638 		    ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2639 		    ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2640 		    ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2641 		    ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2642 		    ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2643 		    ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2644 		    ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2645 
2646 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2647 		    !data->sku_cap_band_52GHz_enable)
2648 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2649 
2650 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2651 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2652 			    nvm_channels[ch_idx], ch_flags,
2653 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2654 			continue;
2655 		}
2656 
2657 		hw_value = nvm_channels[ch_idx];
2658 		channel = &ic->ic_channels[hw_value];
2659 
2660 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2661 		if (!is_5ghz) {
2662 			flags = IEEE80211_CHAN_2GHZ;
2663 			channel->ic_flags
2664 			    = IEEE80211_CHAN_CCK
2665 			    | IEEE80211_CHAN_OFDM
2666 			    | IEEE80211_CHAN_DYN
2667 			    | IEEE80211_CHAN_2GHZ;
2668 		} else {
2669 			flags = IEEE80211_CHAN_5GHZ;
2670 			channel->ic_flags =
2671 			    IEEE80211_CHAN_A;
2672 		}
2673 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2674 
2675 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2676 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2677 
2678 #ifndef IEEE80211_NO_HT
2679 		if (data->sku_cap_11n_enable)
2680 			channel->ic_flags |= IEEE80211_CHAN_HT;
2681 #endif
2682 	}
2683 }
2684 
2685 #ifndef IEEE80211_NO_HT
2686 static void
2687 iwm_setup_ht_rates(struct iwm_softc *sc)
2688 {
2689 	struct ieee80211com *ic = &sc->sc_ic;
2690 
2691 	/* TX is supported with the same MCS as RX. */
2692 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2693 
2694 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2695 
2696 #ifdef notyet
2697 	if (sc->sc_nvm.sku_cap_mimo_disable)
2698 		return;
2699 
2700 	if (iwm_fw_valid_rx_ant(sc) > 1)
2701 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2702 	if (iwm_fw_valid_rx_ant(sc) > 2)
2703 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2704 #endif
2705 }
2706 
2707 #define IWM_MAX_RX_BA_SESSIONS 16
2708 
2709 static void
2710 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2711     uint16_t ssn, int start)
2712 {
2713 	struct ieee80211com *ic = &sc->sc_ic;
2714 	struct iwm_add_sta_cmd_v7 cmd;
2715 	struct iwm_node *in = (struct iwm_node *)ni;
2716 	int err, s;
2717 	uint32_t status;
2718 
2719 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2720 		ieee80211_addba_req_refuse(ic, ni, tid);
2721 		return;
2722 	}
2723 
2724 	memset(&cmd, 0, sizeof(cmd));
2725 
2726 	cmd.sta_id = IWM_STATION_ID;
2727 	cmd.mac_id_n_color
2728 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2729 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2730 
2731 	if (start) {
2732 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2733 		cmd.add_immediate_ba_ssn = ssn;
2734 	} else {
2735 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2736 	}
2737 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2738 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2739 
2740 	status = IWM_ADD_STA_SUCCESS;
2741 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2742 	    &status);
2743 
2744 	s = splnet();
2745 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2746 		if (start) {
2747 			sc->sc_rx_ba_sessions++;
2748 			ieee80211_addba_req_accept(ic, ni, tid);
2749 		} else if (sc->sc_rx_ba_sessions > 0)
2750 			sc->sc_rx_ba_sessions--;
2751 	} else if (start)
2752 		ieee80211_addba_req_refuse(ic, ni, tid);
2753 	splx(s);
2754 }
2755 
2756 static void
2757 iwm_htprot_task(void *arg)
2758 {
2759 	struct iwm_softc *sc = arg;
2760 	struct ieee80211com *ic = &sc->sc_ic;
2761 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2762 	int err;
2763 
2764 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2765 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2766 	if (err)
2767 		aprint_error_dev(sc->sc_dev,
2768 		    "could not change HT protection: error %d\n", err);
2769 }
2770 
2771 /*
2772  * This function is called by upper layer when HT protection settings in
2773  * beacons have changed.
2774  */
2775 static void
2776 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2777 {
2778 	struct iwm_softc *sc = ic->ic_softc;
2779 
2780 	/* assumes that ni == ic->ic_bss */
2781 	task_add(systq, &sc->htprot_task);
2782 }
2783 
2784 static void
2785 iwm_ba_task(void *arg)
2786 {
2787 	struct iwm_softc *sc = arg;
2788 	struct ieee80211com *ic = &sc->sc_ic;
2789 	struct ieee80211_node *ni = ic->ic_bss;
2790 
2791 	if (sc->ba_start)
2792 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2793 	else
2794 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2795 }
2796 
2797 /*
2798  * This function is called by upper layer when an ADDBA request is received
2799  * from another STA and before the ADDBA response is sent.
2800  */
2801 static int
2802 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2803     uint8_t tid)
2804 {
2805 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2806 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2807 
2808 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2809 		return ENOSPC;
2810 
2811 	sc->ba_start = 1;
2812 	sc->ba_tid = tid;
2813 	sc->ba_ssn = htole16(ba->ba_winstart);
2814 	task_add(systq, &sc->ba_task);
2815 
2816 	return EBUSY;
2817 }
2818 
2819 /*
2820  * This function is called by upper layer on teardown of an HT-immediate
2821  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2822  */
2823 static void
2824 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2825     uint8_t tid)
2826 {
2827 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2828 
2829 	sc->ba_start = 0;
2830 	sc->ba_tid = tid;
2831 	task_add(systq, &sc->ba_task);
2832 }
2833 #endif
2834 
2835 static void
2836 iwm_free_fw_paging(struct iwm_softc *sc)
2837 {
2838 	int i;
2839 
2840 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2841 		return;
2842 
2843 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2844 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2845 	}
2846 
2847 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2848 }
2849 
2850 static int
2851 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2852 {
2853 	int sec_idx, idx;
2854 	uint32_t offset = 0;
2855 
2856 	/*
2857 	 * find where is the paging image start point:
2858 	 * if CPU2 exist and it's in paging format, then the image looks like:
2859 	 * CPU1 sections (2 or more)
2860 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2861 	 * CPU2 sections (not paged)
2862 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2863 	 * non paged to CPU2 paging sec
2864 	 * CPU2 paging CSS
2865 	 * CPU2 paging image (including instruction and data)
2866 	 */
2867 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2868 		if (fws->fw_sect[sec_idx].fws_devoff ==
2869 		    IWM_PAGING_SEPARATOR_SECTION) {
2870 			sec_idx++;
2871 			break;
2872 		}
2873 	}
2874 
2875 	/*
2876 	 * If paging is enabled there should be at least 2 more sections left
2877 	 * (one for CSS and one for Paging data)
2878 	 */
2879 	if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2880 		aprint_verbose_dev(sc->sc_dev,
2881 		    "Paging: Missing CSS and/or paging sections\n");
2882 		iwm_free_fw_paging(sc);
2883 		return EINVAL;
2884 	}
2885 
2886 	/* copy the CSS block to the dram */
2887 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2888 	    sec_idx));
2889 
2890 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2891 	    fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2892 
2893 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2894 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2895 
2896 	sec_idx++;
2897 
2898 	/*
2899 	 * copy the paging blocks to the dram
2900 	 * loop index start from 1 since that CSS block already copied to dram
2901 	 * and CSS index is 0.
2902 	 * loop stop at num_of_paging_blk since that last block is not full.
2903 	 */
2904 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2905 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2906 		       (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2907 		       sc->fw_paging_db[idx].fw_paging_size);
2908 
2909 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2910 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2911 
2912 		offset += sc->fw_paging_db[idx].fw_paging_size;
2913 	}
2914 
2915 	/* copy the last paging block */
2916 	if (sc->num_of_pages_in_last_blk > 0) {
2917 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2918 		    (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2919 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2920 
2921 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2922 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2923 	}
2924 
2925 	return 0;
2926 }
2927 
2928 static int
2929 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2930 {
2931 	int blk_idx = 0;
2932 	int error, num_of_pages;
2933 	bus_dmamap_t dmap;
2934 
2935 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2936 		int i;
2937 		/* Device got reset, and we setup firmware paging again */
2938 		for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
2939 			dmap = sc->fw_paging_db[i].fw_paging_block.map;
2940 			bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2941 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2942 		}
2943 		return 0;
2944 	}
2945 
2946 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
2947 	CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
2948 
2949 	num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
2950 	sc->num_of_paging_blk =
2951 	    howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
2952 	sc->num_of_pages_in_last_blk = num_of_pages -
2953 	    IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
2954 
2955 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
2956 	    "each block holds 8 pages, last block holds %d pages\n",
2957 	    DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
2958 
2959 	/* allocate block of 4Kbytes for paging CSS */
2960 	error = iwm_dma_contig_alloc(sc->sc_dmat,
2961 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
2962 	    4096);
2963 	if (error) {
2964 		/* free all the previous pages since we failed */
2965 		iwm_free_fw_paging(sc);
2966 		return ENOMEM;
2967 	}
2968 
2969 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
2970 
2971 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
2972 	    DEVNAME(sc)));
2973 
2974 	/*
2975 	 * allocate blocks in dram.
2976 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
2977 	 */
2978 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
2979 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
2980 		/* XXX Use iwm_dma_contig_alloc for allocating */
2981 		error = iwm_dma_contig_alloc(sc->sc_dmat,
2982 		    &sc->fw_paging_db[blk_idx].fw_paging_block,
2983 		    IWM_PAGING_BLOCK_SIZE, 4096);
2984 		if (error) {
2985 			/* free all the previous pages since we failed */
2986 			iwm_free_fw_paging(sc);
2987 			return ENOMEM;
2988 		}
2989 
2990 		sc->fw_paging_db[blk_idx].fw_paging_size =
2991 		    IWM_PAGING_BLOCK_SIZE;
2992 
2993 		DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
2994 		    "paging.\n", DEVNAME(sc)));
2995 	}
2996 
2997 	return 0;
2998 }
2999 
3000 static int
3001 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3002 {
3003 	int err;
3004 
3005 	err = iwm_alloc_fw_paging_mem(sc, fws);
3006 	if (err)
3007 		return err;
3008 
3009 	return iwm_fill_paging_mem(sc, fws);
3010 }
3011 
3012 static bool
3013 iwm_has_new_tx_api(struct iwm_softc *sc)
3014 {
3015 	/* XXX */
3016 	return false;
3017 }
3018 
3019 /* send paging cmd to FW in case CPU2 has paging image */
3020 static int
3021 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3022 {
3023 	struct iwm_fw_paging_cmd fw_paging_cmd = {
3024 		.flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3025 		                 IWM_PAGING_CMD_IS_ENABLED |
3026 		                 (sc->num_of_pages_in_last_blk <<
3027 		                  IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3028 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3029 		.block_num = htole32(sc->num_of_paging_blk),
3030 	};
3031 	size_t size = sizeof(fw_paging_cmd);
3032 	int blk_idx;
3033 	bus_dmamap_t dmap;
3034 
3035 	if (!iwm_has_new_tx_api(sc))
3036 		size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3037 		    IWM_NUM_OF_FW_PAGING_BLOCKS;
3038 
3039 	/* loop for all paging blocks + CSS block */
3040 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3041 		bus_addr_t dev_phy_addr =
3042 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3043 		if (iwm_has_new_tx_api(sc)) {
3044 			fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3045 			    htole64(dev_phy_addr);
3046 		} else {
3047 			dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3048 			fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3049 			    htole32(dev_phy_addr);
3050 		}
3051 		dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map;
3052 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3053 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3054 	}
3055 
3056 	return iwm_send_cmd_pdu(sc,
3057 	    iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3058 	    0, size, &fw_paging_cmd);
3059 }
3060 
3061 static void
3062 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3063     const uint16_t *mac_override, const uint16_t *nvm_hw)
3064 {
3065 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3066 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3067 	};
3068 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3069 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3070 	};
3071 	const uint8_t *hw_addr;
3072 
3073 	if (mac_override) {
3074 		hw_addr = (const uint8_t *)(mac_override +
3075 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
3076 
3077 		/*
3078 		 * Store the MAC address from MAO section.
3079 		 * No byte swapping is required in MAO section
3080 		 */
3081 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3082 
3083 		/*
3084 		 * Force the use of the OTP MAC address in case of reserved MAC
3085 		 * address in the NVM, or if address is given but invalid.
3086 		 */
3087 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3088 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3089 		    sizeof(etherbroadcastaddr)) != 0) &&
3090 		    (memcmp(etheranyaddr, data->hw_addr,
3091 		    sizeof(etheranyaddr)) != 0) &&
3092 		    !ETHER_IS_MULTICAST(data->hw_addr))
3093 			return;
3094 	}
3095 
3096 	if (nvm_hw) {
3097 		/* Read the mac address from WFMP registers. */
3098 		uint32_t mac_addr0 =
3099 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3100 		uint32_t mac_addr1 =
3101 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3102 
3103 		hw_addr = (const uint8_t *)&mac_addr0;
3104 		data->hw_addr[0] = hw_addr[3];
3105 		data->hw_addr[1] = hw_addr[2];
3106 		data->hw_addr[2] = hw_addr[1];
3107 		data->hw_addr[3] = hw_addr[0];
3108 
3109 		hw_addr = (const uint8_t *)&mac_addr1;
3110 		data->hw_addr[4] = hw_addr[1];
3111 		data->hw_addr[5] = hw_addr[0];
3112 
3113 		return;
3114 	}
3115 
3116 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
3117 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3118 }
3119 
3120 static int
3121 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3122     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3123     const uint16_t *mac_override, const uint16_t *phy_sku,
3124     const uint16_t *regulatory)
3125 {
3126 	struct iwm_nvm_data *data = &sc->sc_nvm;
3127 	uint8_t hw_addr[ETHER_ADDR_LEN];
3128 	uint32_t sku;
3129 
3130 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3131 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3132 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3133 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3134 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3135 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3136 
3137 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3138 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3139 	} else {
3140 		uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3141 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3142 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3143 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3144 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3145 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3146 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3147 
3148 		data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3149 		sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3150 	}
3151 
3152 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3153 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3154 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3155 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3156 
3157 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3158 
3159 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3160 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3161 		data->hw_addr[0] = hw_addr[1];
3162 		data->hw_addr[1] = hw_addr[0];
3163 		data->hw_addr[2] = hw_addr[3];
3164 		data->hw_addr[3] = hw_addr[2];
3165 		data->hw_addr[4] = hw_addr[5];
3166 		data->hw_addr[5] = hw_addr[4];
3167 	} else
3168 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3169 
3170 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3171 		uint16_t lar_offset, lar_config;
3172 		lar_offset = data->nvm_version < 0xE39 ?
3173 		    IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3174 		lar_config = le16_to_cpup(regulatory + lar_offset);
3175                 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3176 	}
3177 
3178 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3179 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3180 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3181 	else
3182 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3183 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3184 
3185 	data->calib_version = 255;   /* TODO:
3186 					this value will prevent some checks from
3187 					failing, we need to check if this
3188 					field is still needed, and if it does,
3189 					where is it in the NVM */
3190 
3191 	return 0;
3192 }
3193 
3194 static int
3195 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3196 {
3197 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3198 	const uint16_t *regulatory = NULL;
3199 
3200 	/* Checking for required sections */
3201 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3202 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3203 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3204 			return ENOENT;
3205 		}
3206 
3207 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3208 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3209 		/* SW and REGULATORY sections are mandatory */
3210 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3211 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3212 			return ENOENT;
3213 		}
3214 		/* MAC_OVERRIDE or at least HW section must exist */
3215 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3216 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3217 			return ENOENT;
3218 		}
3219 
3220 		/* PHY_SKU section is mandatory in B0 */
3221 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3222 			return ENOENT;
3223 		}
3224 
3225 		regulatory = (const uint16_t *)
3226 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3227 		hw = (const uint16_t *)
3228 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3229 		mac_override =
3230 			(const uint16_t *)
3231 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3232 		phy_sku = (const uint16_t *)
3233 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3234 	} else {
3235 		panic("unknown device family %d\n", sc->sc_device_family);
3236 	}
3237 
3238 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3239 	calib = (const uint16_t *)
3240 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3241 
3242 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3243 	    phy_sku, regulatory);
3244 }
3245 
3246 static int
3247 iwm_nvm_init(struct iwm_softc *sc)
3248 {
3249 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3250 	int i, section, err;
3251 	uint16_t len;
3252 	uint8_t *buf;
3253 	const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3254 	    IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3255 
3256 	/* Read From FW NVM */
3257 	DPRINTF(("Read NVM\n"));
3258 
3259 	memset(nvm_sections, 0, sizeof(nvm_sections));
3260 
3261 	buf = kmem_alloc(bufsz, KM_SLEEP);
3262 
3263 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3264 		section = iwm_nvm_to_read[i];
3265 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3266 
3267 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3268 		if (err) {
3269 			err = 0;
3270 			continue;
3271 		}
3272 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3273 		memcpy(nvm_sections[section].data, buf, len);
3274 		nvm_sections[section].length = len;
3275 	}
3276 	kmem_free(buf, bufsz);
3277 	if (err == 0)
3278 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3279 
3280 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3281 		if (nvm_sections[i].data != NULL)
3282 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3283 	}
3284 
3285 	return err;
3286 }
3287 
3288 static int
3289 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3290     const uint8_t *section, uint32_t byte_cnt)
3291 {
3292 	int err = EINVAL;
3293 	uint32_t chunk_sz, offset;
3294 
3295 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3296 
3297 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3298 		uint32_t addr, len;
3299 		const uint8_t *data;
3300 		bool is_extended = false;
3301 
3302 		addr = dst_addr + offset;
3303 		len = MIN(chunk_sz, byte_cnt - offset);
3304 		data = section + offset;
3305 
3306 		if (addr >= IWM_FW_MEM_EXTENDED_START &&
3307 		    addr <= IWM_FW_MEM_EXTENDED_END)
3308 			is_extended = true;
3309 
3310 		if (is_extended)
3311 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3312 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3313 
3314 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3315 
3316 		if (is_extended)
3317 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3318 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3319 
3320 		if (err)
3321 			break;
3322 	}
3323 
3324 	return err;
3325 }
3326 
3327 static int
3328 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3329     const uint8_t *section, uint32_t byte_cnt)
3330 {
3331 	struct iwm_dma_info *dma = &sc->fw_dma;
3332 	int err;
3333 
3334 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3335 	memcpy(dma->vaddr, section, byte_cnt);
3336 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3337 	    BUS_DMASYNC_PREWRITE);
3338 
3339 	sc->sc_fw_chunk_done = 0;
3340 
3341 	if (!iwm_nic_lock(sc))
3342 		return EBUSY;
3343 
3344 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3345 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3346 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3347 	    dst_addr);
3348 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3349 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3350 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3351 	    (iwm_get_dma_hi_addr(dma->paddr)
3352 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3353 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3354 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3355 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3356 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3357 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3358 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3359 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3360 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3361 
3362 	iwm_nic_unlock(sc);
3363 
3364 	/* Wait for this segment to load. */
3365 	err = 0;
3366 	while (!sc->sc_fw_chunk_done) {
3367 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3368 		if (err)
3369 			break;
3370 	}
3371 	if (!sc->sc_fw_chunk_done) {
3372 		DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3373 		    DEVNAME(sc), dst_addr, byte_cnt));
3374 	}
3375 
3376 	return err;
3377 }
3378 
3379 static int
3380 iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3381     int cpu, int *first_ucode_section)
3382 {
3383 	int i, err = 0;
3384 	uint32_t last_read_idx = 0;
3385 	void *data;
3386 	uint32_t dlen;
3387 	uint32_t offset;
3388 
3389 	if (cpu == 1) {
3390 		*first_ucode_section = 0;
3391 	} else {
3392 		(*first_ucode_section)++;
3393 	}
3394 
3395 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3396 		last_read_idx = i;
3397 		data = fws->fw_sect[i].fws_data;
3398 		dlen = fws->fw_sect[i].fws_len;
3399 		offset = fws->fw_sect[i].fws_devoff;
3400 
3401 		/*
3402 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3403 		 * CPU1 to CPU2.
3404 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3405 		 * CPU2 non paged to CPU2 paging sec.
3406 		 */
3407 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3408 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3409 			break;
3410 
3411 		if (dlen > sc->sc_fwdmasegsz) {
3412 			err = EFBIG;
3413 		} else
3414 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3415 		if (err) {
3416 			DPRINTF(("%s: could not load firmware chunk %d "
3417 			    "(error %d)\n", DEVNAME(sc), i, err));
3418 			return err;
3419 		}
3420 	}
3421 
3422 	*first_ucode_section = last_read_idx;
3423 
3424 	return 0;
3425 }
3426 
3427 static int
3428 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3429 {
3430 	struct iwm_fw_sects *fws;
3431 	int err = 0;
3432 	int first_ucode_section;
3433 
3434 	fws = &sc->sc_fw.fw_sects[ucode_type];
3435 
3436 	DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3437 	    fws->is_dual_cpus ? "dual" : "single"));
3438 
3439 	/* load to FW the binary Secured sections of CPU1 */
3440 	err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3441 	if (err)
3442 		return err;
3443 
3444 	if (fws->is_dual_cpus) {
3445 		/* set CPU2 header address */
3446 		if (iwm_nic_lock(sc)) {
3447 			iwm_write_prph(sc,
3448 			    IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3449 			    IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3450 			iwm_nic_unlock(sc);
3451 		}
3452 
3453 		/* load to FW the binary sections of CPU2 */
3454 		err = iwm_load_cpu_sections_7000(sc, fws, 2,
3455 		    &first_ucode_section);
3456 		if (err)
3457 			return err;
3458 	}
3459 
3460 	/* release CPU reset */
3461 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3462 
3463 	return 0;
3464 }
3465 
3466 static int
3467 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3468     int cpu, int *first_ucode_section)
3469 {
3470 	int shift_param;
3471 	int i, err = 0, sec_num = 0x1;
3472 	uint32_t val, last_read_idx = 0;
3473 	void *data;
3474 	uint32_t dlen;
3475 	uint32_t offset;
3476 
3477 	if (cpu == 1) {
3478 		shift_param = 0;
3479 		*first_ucode_section = 0;
3480 	} else {
3481 		shift_param = 16;
3482 		(*first_ucode_section)++;
3483 	}
3484 
3485 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3486 		last_read_idx = i;
3487 		data = fws->fw_sect[i].fws_data;
3488 		dlen = fws->fw_sect[i].fws_len;
3489 		offset = fws->fw_sect[i].fws_devoff;
3490 
3491 		/*
3492 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3493 		 * CPU1 to CPU2.
3494 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3495 		 * CPU2 non paged to CPU2 paging sec.
3496 		 */
3497 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3498 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3499 			break;
3500 
3501 		if (dlen > sc->sc_fwdmasegsz) {
3502 			err = EFBIG;
3503 		} else
3504 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3505 		if (err) {
3506 			DPRINTF(("%s: could not load firmware chunk %d "
3507 			    "(error %d)\n", DEVNAME(sc), i, err));
3508 			return err;
3509 		}
3510 
3511 		/* Notify the ucode of the loaded section number and status */
3512 		if (iwm_nic_lock(sc)) {
3513 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3514 			val = val | (sec_num << shift_param);
3515 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3516 			sec_num = (sec_num << 1) | 0x1;
3517 			iwm_nic_unlock(sc);
3518 
3519 			/*
3520 			 * The firmware won't load correctly without this delay.
3521 			 */
3522 			DELAY(8000);
3523 		}
3524 	}
3525 
3526 	*first_ucode_section = last_read_idx;
3527 
3528 	if (iwm_nic_lock(sc)) {
3529 		if (cpu == 1)
3530 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3531 		else
3532 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3533 		iwm_nic_unlock(sc);
3534 	}
3535 
3536 	return 0;
3537 }
3538 
3539 static int
3540 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3541 {
3542 	struct iwm_fw_sects *fws;
3543 	int err = 0;
3544 	int first_ucode_section;
3545 
3546 	fws = &sc->sc_fw.fw_sects[ucode_type];
3547 
3548 	/* configure the ucode to be ready to get the secured image */
3549 	/* release CPU reset */
3550 	if (iwm_nic_lock(sc)) {
3551 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3552 		    IWM_RELEASE_CPU_RESET_BIT);
3553 		iwm_nic_unlock(sc);
3554 	}
3555 
3556 	/* load to FW the binary Secured sections of CPU1 */
3557 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3558 	if (err)
3559 		return err;
3560 
3561 	/* load to FW the binary sections of CPU2 */
3562 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3563 }
3564 
3565 static int
3566 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3567 {
3568 	int err, w;
3569 
3570 	sc->sc_uc.uc_intr = 0;
3571 
3572 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3573 		err = iwm_load_firmware_8000(sc, ucode_type);
3574 	else
3575 		err = iwm_load_firmware_7000(sc, ucode_type);
3576 	if (err)
3577 		return err;
3578 
3579 	/* wait for the firmware to load */
3580 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3581 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3582 	if (err || !sc->sc_uc.uc_ok) {
3583 		aprint_error_dev(sc->sc_dev,
3584 		    "could not load firmware (error %d, ok %d)\n",
3585 		    err, sc->sc_uc.uc_ok);
3586 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3587 			aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3588 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3589 			aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3590 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3591 		}
3592 	}
3593 
3594 	return err;
3595 }
3596 
3597 static int
3598 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3599 {
3600 	int err;
3601 
3602 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3603 
3604 	err = iwm_nic_init(sc);
3605 	if (err) {
3606 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3607 		return err;
3608 	}
3609 
3610 	/* make sure rfkill handshake bits are cleared */
3611 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3612 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3613 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3614 
3615 	/* clear (again), then enable host interrupts */
3616 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3617 	iwm_enable_interrupts(sc);
3618 
3619 	/* really make sure rfkill handshake bits are cleared */
3620 	/* maybe we should write a few times more?  just to make sure */
3621 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3622 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3623 
3624 	return iwm_load_firmware(sc, ucode_type);
3625 }
3626 
3627 static int
3628 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3629 {
3630 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3631 		.valid = htole32(valid_tx_ant),
3632 	};
3633 
3634 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3635 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
3636 }
3637 
3638 static int
3639 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3640 {
3641 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3642 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3643 
3644 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3645 	phy_cfg_cmd.calib_control.event_trigger =
3646 	    sc->sc_default_calib[ucode_type].event_trigger;
3647 	phy_cfg_cmd.calib_control.flow_trigger =
3648 	    sc->sc_default_calib[ucode_type].flow_trigger;
3649 
3650 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3651 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3652 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3653 }
3654 
3655 static int
3656 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3657 {
3658 	struct iwm_fw_sects *fws;
3659 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3660 	int err;
3661 
3662 	err = iwm_read_firmware(sc, ucode_type);
3663 	if (err)
3664 		return err;
3665 
3666 	sc->sc_uc_current = ucode_type;
3667 	err = iwm_start_fw(sc, ucode_type);
3668 	if (err) {
3669 		sc->sc_uc_current = old_type;
3670 		return err;
3671 	}
3672 
3673 	err = iwm_post_alive(sc);
3674 	if (err)
3675 		return err;
3676 
3677 	fws = &sc->sc_fw.fw_sects[ucode_type];
3678 	if (fws->paging_mem_size) {
3679 		err = iwm_save_fw_paging(sc, fws);
3680 		if (err)
3681 			return err;
3682 
3683 		err = iwm_send_paging_cmd(sc, fws);
3684 		if (err) {
3685 			iwm_free_fw_paging(sc);
3686 			return err;
3687 		}
3688 	}
3689 
3690 	return 0;
3691 }
3692 
3693 static int
3694 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3695 {
3696 	int err;
3697 
3698 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3699 		aprint_error_dev(sc->sc_dev,
3700 		    "radio is disabled by hardware switch\n");
3701 		return EPERM;
3702 	}
3703 
3704 	sc->sc_init_complete = 0;
3705 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3706 	if (err) {
3707 		DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3708 		return err;
3709 	}
3710 
3711 	if (justnvm) {
3712 		err = iwm_nvm_init(sc);
3713 		if (err) {
3714 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3715 			return err;
3716 		}
3717 
3718 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3719 		    ETHER_ADDR_LEN);
3720 		return 0;
3721 	}
3722 
3723 	err = iwm_send_bt_init_conf(sc);
3724 	if (err)
3725 		return err;
3726 
3727 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3728 	if (err)
3729 		return err;
3730 
3731 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3732 	if (err)
3733 		return err;
3734 
3735 	/*
3736 	 * Send phy configurations command to init uCode
3737 	 * to start the 16.0 uCode init image internal calibrations.
3738 	 */
3739 	err = iwm_send_phy_cfg_cmd(sc);
3740 	if (err)
3741 		return err;
3742 
3743 	/*
3744 	 * Nothing to do but wait for the init complete notification
3745 	 * from the firmware
3746 	 */
3747 	while (!sc->sc_init_complete) {
3748 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3749 		if (err)
3750 			break;
3751 	}
3752 
3753 	return err;
3754 }
3755 
3756 static int
3757 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3758 {
3759 	struct iwm_rx_ring *ring = &sc->rxq;
3760 	struct iwm_rx_data *data = &ring->data[idx];
3761 	struct mbuf *m;
3762 	int err;
3763 	int fatal = 0;
3764 
3765 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3766 	if (m == NULL)
3767 		return ENOBUFS;
3768 
3769 	if (size <= MCLBYTES) {
3770 		MCLGET(m, M_DONTWAIT);
3771 	} else {
3772 		MEXTMALLOC(m, size, M_DONTWAIT);
3773 	}
3774 	if ((m->m_flags & M_EXT) == 0) {
3775 		m_freem(m);
3776 		return ENOBUFS;
3777 	}
3778 
3779 	if (data->m != NULL) {
3780 		bus_dmamap_unload(sc->sc_dmat, data->map);
3781 		fatal = 1;
3782 	}
3783 
3784 	m->m_len = m->m_pkthdr.len = size;
3785 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3786 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3787 	if (err) {
3788 		/* XXX */
3789 		if (fatal)
3790 			panic("iwm: could not load RX mbuf");
3791 		m_freem(m);
3792 		return err;
3793 	}
3794 	data->m = m;
3795 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3796 
3797 	/* Update RX descriptor. */
3798 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3799 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3800 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3801 
3802 	return 0;
3803 }
3804 
3805 #define IWM_RSSI_OFFSET 50
3806 static int
3807 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3808 {
3809 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3810 	uint32_t agc_a, agc_b;
3811 	uint32_t val;
3812 
3813 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3814 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3815 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3816 
3817 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3818 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3819 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3820 
3821 	/*
3822 	 * dBm = rssi dB - agc dB - constant.
3823 	 * Higher AGC (higher radio gain) means lower signal.
3824 	 */
3825 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3826 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3827 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3828 
3829 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3830 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3831 
3832 	return max_rssi_dbm;
3833 }
3834 
3835 /*
3836  * RSSI values are reported by the FW as positive values - need to negate
3837  * to obtain their dBM.  Account for missing antennas by replacing 0
3838  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3839  */
3840 static int
3841 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3842 {
3843 	int energy_a, energy_b, energy_c, max_energy;
3844 	uint32_t val;
3845 
3846 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3847 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3848 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3849 	energy_a = energy_a ? -energy_a : -256;
3850 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3851 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3852 	energy_b = energy_b ? -energy_b : -256;
3853 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3854 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3855 	energy_c = energy_c ? -energy_c : -256;
3856 	max_energy = MAX(energy_a, energy_b);
3857 	max_energy = MAX(max_energy, energy_c);
3858 
3859 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3860 	    energy_a, energy_b, energy_c, max_energy));
3861 
3862 	return max_energy;
3863 }
3864 
3865 static void
3866 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3867     struct iwm_rx_data *data)
3868 {
3869 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3870 
3871 	DPRINTFN(20, ("received PHY stats\n"));
3872 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3873 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3874 
3875 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3876 }
3877 
3878 /*
3879  * Retrieve the average noise (in dBm) among receivers.
3880  */
3881 static int
3882 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3883 {
3884 	int i, total, nbant, noise;
3885 
3886 	total = nbant = noise = 0;
3887 	for (i = 0; i < 3; i++) {
3888 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3889 		if (noise) {
3890 			total += noise;
3891 			nbant++;
3892 		}
3893 	}
3894 
3895 	/* There should be at least one antenna but check anyway. */
3896 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3897 }
3898 
3899 static void
3900 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3901     struct iwm_rx_data *data)
3902 {
3903 	struct ieee80211com *ic = &sc->sc_ic;
3904 	struct ieee80211_frame *wh;
3905 	struct ieee80211_node *ni;
3906 	struct ieee80211_channel *c = NULL;
3907 	struct mbuf *m;
3908 	struct iwm_rx_phy_info *phy_info;
3909 	struct iwm_rx_mpdu_res_start *rx_res;
3910 	int device_timestamp;
3911 	uint32_t len;
3912 	uint32_t rx_pkt_status;
3913 	int rssi;
3914 	int s;
3915 
3916 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3917 	    BUS_DMASYNC_POSTREAD);
3918 
3919 	phy_info = &sc->sc_last_phy_info;
3920 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3921 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3922 	len = le16toh(rx_res->byte_count);
3923 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3924 	    sizeof(*rx_res) + len));
3925 
3926 	m = data->m;
3927 	m->m_data = pkt->data + sizeof(*rx_res);
3928 	m->m_pkthdr.len = m->m_len = len;
3929 
3930 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3931 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3932 		    phy_info->cfg_phy_cnt));
3933 		return;
3934 	}
3935 
3936 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3937 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3938 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3939 		return; /* drop */
3940 	}
3941 
3942 	device_timestamp = le32toh(phy_info->system_timestamp);
3943 
3944 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3945 		rssi = iwm_get_signal_strength(sc, phy_info);
3946 	} else {
3947 		rssi = iwm_calc_rssi(sc, phy_info);
3948 	}
3949 	rssi = -rssi;
3950 
3951 	if (ic->ic_state == IEEE80211_S_SCAN)
3952 		iwm_fix_channel(sc, m);
3953 
3954 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3955 		return;
3956 
3957 	m_set_rcvif(m, IC2IFP(ic));
3958 
3959 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3960 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3961 
3962 	s = splnet();
3963 
3964 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3965 	if (c)
3966 		ni->ni_chan = c;
3967 
3968 	if (__predict_false(sc->sc_drvbpf != NULL)) {
3969 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3970 
3971 		tap->wr_flags = 0;
3972 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3973 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3974 		tap->wr_chan_freq =
3975 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3976 		tap->wr_chan_flags =
3977 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3978 		tap->wr_dbm_antsignal = (int8_t)rssi;
3979 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3980 		tap->wr_tsft = phy_info->system_timestamp;
3981 		if (phy_info->phy_flags &
3982 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3983 			uint8_t mcs = (phy_info->rate_n_flags &
3984 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3985 			      IWM_RATE_HT_MCS_NSS_MSK));
3986 			tap->wr_rate = (0x80 | mcs);
3987 		} else {
3988 			uint8_t rate = (phy_info->rate_n_flags &
3989 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3990 			switch (rate) {
3991 			/* CCK rates. */
3992 			case  10: tap->wr_rate =   2; break;
3993 			case  20: tap->wr_rate =   4; break;
3994 			case  55: tap->wr_rate =  11; break;
3995 			case 110: tap->wr_rate =  22; break;
3996 			/* OFDM rates. */
3997 			case 0xd: tap->wr_rate =  12; break;
3998 			case 0xf: tap->wr_rate =  18; break;
3999 			case 0x5: tap->wr_rate =  24; break;
4000 			case 0x7: tap->wr_rate =  36; break;
4001 			case 0x9: tap->wr_rate =  48; break;
4002 			case 0xb: tap->wr_rate =  72; break;
4003 			case 0x1: tap->wr_rate =  96; break;
4004 			case 0x3: tap->wr_rate = 108; break;
4005 			/* Unknown rate: should not happen. */
4006 			default:  tap->wr_rate =   0;
4007 			}
4008 		}
4009 
4010 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN);
4011 	}
4012 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
4013 	ieee80211_free_node(ni);
4014 
4015 	splx(s);
4016 }
4017 
4018 static void
4019 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4020     struct iwm_node *in)
4021 {
4022 	struct ieee80211com *ic = &sc->sc_ic;
4023 	struct ifnet *ifp = IC2IFP(ic);
4024 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4025 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4026 	int failack = tx_resp->failure_frame;
4027 
4028 	KASSERT(tx_resp->frame_count == 1);
4029 
4030 	/* Update rate control statistics. */
4031 	in->in_amn.amn_txcnt++;
4032 	if (failack > 0) {
4033 		in->in_amn.amn_retrycnt++;
4034 	}
4035 
4036 	if (status != IWM_TX_STATUS_SUCCESS &&
4037 	    status != IWM_TX_STATUS_DIRECT_DONE)
4038 		if_statinc(ifp, if_oerrors);
4039 	else
4040 		if_statinc(ifp, if_opackets);
4041 }
4042 
4043 static void
4044 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4045     struct iwm_rx_data *data)
4046 {
4047 	struct ieee80211com *ic = &sc->sc_ic;
4048 	struct ifnet *ifp = IC2IFP(ic);
4049 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4050 	int idx = cmd_hdr->idx;
4051 	int qid = cmd_hdr->qid;
4052 	struct iwm_tx_ring *ring = &sc->txq[qid];
4053 	struct iwm_tx_data *txd = &ring->data[idx];
4054 	struct iwm_node *in = txd->in;
4055 	int s;
4056 
4057 	s = splnet();
4058 
4059 	if (txd->done) {
4060 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4061 		    DEVNAME(sc)));
4062 		splx(s);
4063 		return;
4064 	}
4065 
4066 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4067 	    BUS_DMASYNC_POSTREAD);
4068 
4069 	sc->sc_tx_timer = 0;
4070 
4071 	iwm_rx_tx_cmd_single(sc, pkt, in);
4072 
4073 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4074 	    BUS_DMASYNC_POSTWRITE);
4075 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4076 	m_freem(txd->m);
4077 
4078 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4079 	KASSERT(txd->done == 0);
4080 	txd->done = 1;
4081 	KASSERT(txd->in);
4082 
4083 	txd->m = NULL;
4084 	txd->in = NULL;
4085 	ieee80211_free_node(&in->in_ni);
4086 
4087 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4088 		sc->qfullmsk &= ~(1 << qid);
4089 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4090 			ifp->if_flags &= ~IFF_OACTIVE;
4091 			KASSERT(KERNEL_LOCKED_P());
4092 			iwm_start(ifp);
4093 		}
4094 	}
4095 
4096 	splx(s);
4097 }
4098 
4099 static int
4100 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4101 {
4102 	struct iwm_binding_cmd cmd;
4103 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4104 	int i, err;
4105 	uint32_t status;
4106 
4107 	memset(&cmd, 0, sizeof(cmd));
4108 
4109 	cmd.id_and_color
4110 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4111 	cmd.action = htole32(action);
4112 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4113 
4114 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4115 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4116 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4117 
4118 	status = 0;
4119 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4120 	    sizeof(cmd), &cmd, &status);
4121 	if (err == 0 && status != 0)
4122 		err = EIO;
4123 
4124 	return err;
4125 }
4126 
4127 static void
4128 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4129     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4130 {
4131 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4132 
4133 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4134 	    ctxt->color));
4135 	cmd->action = htole32(action);
4136 	cmd->apply_time = htole32(apply_time);
4137 }
4138 
4139 static void
4140 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4141     struct ieee80211_channel *chan, uint8_t chains_static,
4142     uint8_t chains_dynamic)
4143 {
4144 	struct ieee80211com *ic = &sc->sc_ic;
4145 	uint8_t active_cnt, idle_cnt;
4146 
4147 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4148 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4149 
4150 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4151 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4152 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4153 
4154 	/* Set rx the chains */
4155 	idle_cnt = chains_static;
4156 	active_cnt = chains_dynamic;
4157 
4158 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4159 	    IWM_PHY_RX_CHAIN_VALID_POS);
4160 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4161 	cmd->rxchain_info |= htole32(active_cnt <<
4162 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4163 
4164 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4165 }
4166 
4167 static int
4168 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4169     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4170     uint32_t apply_time)
4171 {
4172 	struct iwm_phy_context_cmd cmd;
4173 
4174 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4175 
4176 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4177 	    chains_static, chains_dynamic);
4178 
4179 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4180 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4181 }
4182 
4183 static int
4184 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4185 {
4186 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4187 	struct iwm_tfd *desc;
4188 	struct iwm_tx_data *txdata;
4189 	struct iwm_device_cmd *cmd;
4190 	struct mbuf *m;
4191 	bus_addr_t paddr;
4192 	uint32_t addr_lo;
4193 	int err = 0, i, paylen, off, s;
4194 	int code;
4195 	int async, wantresp;
4196 	int group_id;
4197 	size_t hdrlen, datasz;
4198 	uint8_t *data;
4199 
4200 	code = hcmd->id;
4201 	async = hcmd->flags & IWM_CMD_ASYNC;
4202 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4203 
4204 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4205 		paylen += hcmd->len[i];
4206 	}
4207 
4208 	/* if the command wants an answer, busy sc_cmd_resp */
4209 	if (wantresp) {
4210 		KASSERT(!async);
4211 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4212 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4213 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
4214 	}
4215 
4216 	/*
4217 	 * Is the hardware still available?  (after e.g. above wait).
4218 	 */
4219 	s = splnet();
4220 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
4221 		err = ENXIO;
4222 		goto out;
4223 	}
4224 
4225 	desc = &ring->desc[ring->cur];
4226 	txdata = &ring->data[ring->cur];
4227 
4228 	group_id = iwm_cmd_groupid(code);
4229 	if (group_id != 0) {
4230 		hdrlen = sizeof(cmd->hdr_wide);
4231 		datasz = sizeof(cmd->data_wide);
4232 	} else {
4233 		hdrlen = sizeof(cmd->hdr);
4234 		datasz = sizeof(cmd->data);
4235 	}
4236 
4237 	if (paylen > datasz) {
4238 		/* Command is too large to fit in pre-allocated space. */
4239 		size_t totlen = hdrlen + paylen;
4240 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4241 			aprint_error_dev(sc->sc_dev,
4242 			    "firmware command too long (%zd bytes)\n", totlen);
4243 			err = EINVAL;
4244 			goto out;
4245 		}
4246 		m = m_gethdr(M_DONTWAIT, MT_DATA);
4247 		if (m == NULL) {
4248 			err = ENOMEM;
4249 			goto out;
4250 		}
4251 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4252 		if (!(m->m_flags & M_EXT)) {
4253 			aprint_error_dev(sc->sc_dev,
4254 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4255 			m_freem(m);
4256 			err = ENOMEM;
4257 			goto out;
4258 		}
4259 		cmd = mtod(m, struct iwm_device_cmd *);
4260 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4261 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4262 		if (err) {
4263 			aprint_error_dev(sc->sc_dev,
4264 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4265 			m_freem(m);
4266 			goto out;
4267 		}
4268 		txdata->m = m;
4269 		paddr = txdata->map->dm_segs[0].ds_addr;
4270 	} else {
4271 		cmd = &ring->cmd[ring->cur];
4272 		paddr = txdata->cmd_paddr;
4273 	}
4274 
4275 	if (group_id != 0) {
4276 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4277 		cmd->hdr_wide.group_id = group_id;
4278 		cmd->hdr_wide.qid = ring->qid;
4279 		cmd->hdr_wide.idx = ring->cur;
4280 		cmd->hdr_wide.length = htole16(paylen);
4281 		cmd->hdr_wide.version = iwm_cmd_version(code);
4282 		data = cmd->data_wide;
4283 	} else {
4284 		cmd->hdr.code = code;
4285 		cmd->hdr.flags = 0;
4286 		cmd->hdr.qid = ring->qid;
4287 		cmd->hdr.idx = ring->cur;
4288 		data = cmd->data;
4289 	}
4290 
4291 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4292 		if (hcmd->len[i] == 0)
4293 			continue;
4294 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4295 		off += hcmd->len[i];
4296 	}
4297 	KASSERT(off == paylen);
4298 
4299 	/* lo field is not aligned */
4300 	addr_lo = htole32((uint32_t)paddr);
4301 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4302 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4303 	    | ((hdrlen + paylen) << 4));
4304 	desc->num_tbs = 1;
4305 
4306 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4307 	    code, hdrlen + paylen, async ? " (async)" : ""));
4308 
4309 	if (paylen > datasz) {
4310 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4311 		    BUS_DMASYNC_PREWRITE);
4312 	} else {
4313 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4314 		    (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4315 		    BUS_DMASYNC_PREWRITE);
4316 	}
4317 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4318 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4319 	    BUS_DMASYNC_PREWRITE);
4320 
4321 	err = iwm_set_cmd_in_flight(sc);
4322 	if (err)
4323 		goto out;
4324 	ring->queued++;
4325 
4326 #if 0
4327 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4328 #endif
4329 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4330 	    code, ring->qid, ring->cur));
4331 
4332 	/* Kick command ring. */
4333 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4334 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4335 
4336 	if (!async) {
4337 		int generation = sc->sc_generation;
4338 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4339 		if (err == 0) {
4340 			/* if hardware is no longer up, return error */
4341 			if (generation != sc->sc_generation) {
4342 				err = ENXIO;
4343 			} else {
4344 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4345 			}
4346 		}
4347 	}
4348  out:
4349 	if (wantresp && err) {
4350 		iwm_free_resp(sc, hcmd);
4351 	}
4352 	splx(s);
4353 
4354 	return err;
4355 }
4356 
4357 static int
4358 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4359     uint16_t len, const void *data)
4360 {
4361 	struct iwm_host_cmd cmd = {
4362 		.id = id,
4363 		.len = { len, },
4364 		.data = { data, },
4365 		.flags = flags,
4366 	};
4367 
4368 	return iwm_send_cmd(sc, &cmd);
4369 }
4370 
4371 static int
4372 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4373     uint32_t *status)
4374 {
4375 	struct iwm_rx_packet *pkt;
4376 	struct iwm_cmd_response *resp;
4377 	int err, resp_len;
4378 
4379 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4380 	cmd->flags |= IWM_CMD_WANT_SKB;
4381 
4382 	err = iwm_send_cmd(sc, cmd);
4383 	if (err)
4384 		return err;
4385 	pkt = cmd->resp_pkt;
4386 
4387 	/* Can happen if RFKILL is asserted */
4388 	if (!pkt) {
4389 		err = 0;
4390 		goto out_free_resp;
4391 	}
4392 
4393 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4394 		err = EIO;
4395 		goto out_free_resp;
4396 	}
4397 
4398 	resp_len = iwm_rx_packet_payload_len(pkt);
4399 	if (resp_len != sizeof(*resp)) {
4400 		err = EIO;
4401 		goto out_free_resp;
4402 	}
4403 
4404 	resp = (void *)pkt->data;
4405 	*status = le32toh(resp->status);
4406  out_free_resp:
4407 	iwm_free_resp(sc, cmd);
4408 	return err;
4409 }
4410 
4411 static int
4412 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4413     const void *data, uint32_t *status)
4414 {
4415 	struct iwm_host_cmd cmd = {
4416 		.id = id,
4417 		.len = { len, },
4418 		.data = { data, },
4419 	};
4420 
4421 	return iwm_send_cmd_status(sc, &cmd, status);
4422 }
4423 
4424 static void
4425 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4426 {
4427 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4428 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4429 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4430 	wakeup(&sc->sc_wantresp);
4431 }
4432 
4433 static void
4434 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4435 {
4436 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4437 	struct iwm_tx_data *data;
4438 	int s;
4439 
4440 	if (qid != IWM_CMD_QUEUE) {
4441 		return;	/* Not a command ack. */
4442 	}
4443 
4444 	s = splnet();
4445 
4446 	data = &ring->data[idx];
4447 
4448 	if (data->m != NULL) {
4449 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4450 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4451 		bus_dmamap_unload(sc->sc_dmat, data->map);
4452 		m_freem(data->m);
4453 		data->m = NULL;
4454 	}
4455 	wakeup(&ring->desc[idx]);
4456 
4457 	if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4458 		device_printf(sc->sc_dev,
4459 		    "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4460 		    idx, ring->queued, ring->cur);
4461 	}
4462 
4463 	if (ring->queued == 0) {
4464 		splx(s);
4465 		device_printf(sc->sc_dev, "cmd_done with empty ring\n");
4466 		return;
4467 	}
4468 
4469 	if (--ring->queued == 0)
4470 		iwm_clear_cmd_in_flight(sc);
4471 
4472 	splx(s);
4473 }
4474 
4475 #if 0
4476 /*
4477  * necessary only for block ack mode
4478  */
4479 void
4480 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4481     uint16_t len)
4482 {
4483 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4484 	uint16_t w_val;
4485 
4486 	scd_bc_tbl = sc->sched_dma.vaddr;
4487 
4488 	len += 8; /* magic numbers came naturally from paris */
4489 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4490 		len = roundup(len, 4) / 4;
4491 
4492 	w_val = htole16(sta_id << 12 | len);
4493 
4494 	/* Update TX scheduler. */
4495 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4496 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4497 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4498 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4499 
4500 	/* I really wonder what this is ?!? */
4501 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4502 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4503 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4504 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4505 		    (char *)(void *)sc->sched_dma.vaddr,
4506 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4507 	}
4508 }
4509 #endif
4510 
4511 /*
4512  * Fill in various bit for management frames, and leave them
4513  * unfilled for data frames (firmware takes care of that).
4514  * Return the selected TX rate.
4515  */
4516 static const struct iwm_rate *
4517 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4518     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4519 {
4520 	struct ieee80211com *ic = &sc->sc_ic;
4521 	struct ieee80211_node *ni = &in->in_ni;
4522 	const struct iwm_rate *rinfo;
4523 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4524 	int ridx, rate_flags, i, ind;
4525 	int nrates = ni->ni_rates.rs_nrates;
4526 
4527 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4528 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4529 
4530 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4531 	    type != IEEE80211_FC0_TYPE_DATA) {
4532 		/* for non-data, use the lowest supported rate */
4533 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4534 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4535 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4536 #ifndef IEEE80211_NO_HT
4537 	} else if (ic->ic_fixed_mcs != -1) {
4538 		ridx = sc->sc_fixed_ridx;
4539 #endif
4540 	} else if (ic->ic_fixed_rate != -1) {
4541 		ridx = sc->sc_fixed_ridx;
4542 	} else {
4543 		/* for data frames, use RS table */
4544 		tx->initial_rate_index = 0;
4545 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4546 		DPRINTFN(12, ("start with txrate %d\n",
4547 		    tx->initial_rate_index));
4548 #ifndef IEEE80211_NO_HT
4549 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4550 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4551 			return &iwm_rates[ridx];
4552 		}
4553 #endif
4554 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4555 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4556 		for (i = 0; i < nrates; i++) {
4557 			if (iwm_rates[i].rate == (ni->ni_txrate &
4558 			    IEEE80211_RATE_VAL)) {
4559 				ridx = i;
4560 				break;
4561 			}
4562 		}
4563 		return &iwm_rates[ridx];
4564 	}
4565 
4566 	rinfo = &iwm_rates[ridx];
4567 	for (i = 0, ind = sc->sc_mgmt_last_antenna;
4568 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4569 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4570 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4571 			sc->sc_mgmt_last_antenna = ind;
4572 			break;
4573 		}
4574 	}
4575 	rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4576 	if (IWM_RIDX_IS_CCK(ridx))
4577 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4578 #ifndef IEEE80211_NO_HT
4579 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4580 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4581 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4582 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4583 	} else
4584 #endif
4585 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4586 
4587 	return rinfo;
4588 }
4589 
4590 #define TB0_SIZE 16
4591 static int
4592 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4593 {
4594 	struct ieee80211com *ic = &sc->sc_ic;
4595 	struct iwm_node *in = (struct iwm_node *)ni;
4596 	struct iwm_tx_ring *ring;
4597 	struct iwm_tx_data *data;
4598 	struct iwm_tfd *desc;
4599 	struct iwm_device_cmd *cmd;
4600 	struct iwm_tx_cmd *tx;
4601 	struct ieee80211_frame *wh;
4602 	struct ieee80211_key *k = NULL;
4603 	struct mbuf *m1;
4604 	const struct iwm_rate *rinfo;
4605 	uint32_t flags;
4606 	u_int hdrlen;
4607 	bus_dma_segment_t *seg;
4608 	uint8_t tid, type;
4609 	int i, totlen, err, pad;
4610 
4611 	wh = mtod(m, struct ieee80211_frame *);
4612 	hdrlen = ieee80211_anyhdrsize(wh);
4613 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4614 
4615 	tid = 0;
4616 
4617 	ring = &sc->txq[ac];
4618 	desc = &ring->desc[ring->cur];
4619 	memset(desc, 0, sizeof(*desc));
4620 	data = &ring->data[ring->cur];
4621 
4622 	cmd = &ring->cmd[ring->cur];
4623 	cmd->hdr.code = IWM_TX_CMD;
4624 	cmd->hdr.flags = 0;
4625 	cmd->hdr.qid = ring->qid;
4626 	cmd->hdr.idx = ring->cur;
4627 
4628 	tx = (void *)cmd->data;
4629 	memset(tx, 0, sizeof(*tx));
4630 
4631 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4632 
4633 	if (__predict_false(sc->sc_drvbpf != NULL)) {
4634 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4635 
4636 		tap->wt_flags = 0;
4637 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4638 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4639 #ifndef IEEE80211_NO_HT
4640 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4641 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4642 		    type == IEEE80211_FC0_TYPE_DATA &&
4643 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
4644 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4645 		} else
4646 #endif
4647 			tap->wt_rate = rinfo->rate;
4648 		tap->wt_hwqueue = ac;
4649 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4650 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4651 
4652 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT);
4653 	}
4654 
4655 	/* Encrypt the frame if need be. */
4656 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4657 		k = ieee80211_crypto_encap(ic, ni, m);
4658 		if (k == NULL) {
4659 			m_freem(m);
4660 			return ENOBUFS;
4661 		}
4662 		/* Packet header may have moved, reset our local pointer. */
4663 		wh = mtod(m, struct ieee80211_frame *);
4664 	}
4665 	totlen = m->m_pkthdr.len;
4666 
4667 	flags = 0;
4668 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4669 		flags |= IWM_TX_CMD_FLG_ACK;
4670 	}
4671 
4672 	if (type == IEEE80211_FC0_TYPE_DATA &&
4673 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4674 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4675 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
4676 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4677 
4678 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4679 	    type != IEEE80211_FC0_TYPE_DATA)
4680 		tx->sta_id = IWM_AUX_STA_ID;
4681 	else
4682 		tx->sta_id = IWM_STATION_ID;
4683 
4684 	if (type == IEEE80211_FC0_TYPE_MGT) {
4685 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4686 
4687 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4688 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4689 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4690 		else
4691 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4692 	} else {
4693 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4694 	}
4695 
4696 	if (hdrlen & 3) {
4697 		/* First segment length must be a multiple of 4. */
4698 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4699 		pad = 4 - (hdrlen & 3);
4700 	} else
4701 		pad = 0;
4702 
4703 	tx->driver_txop = 0;
4704 	tx->next_frame_len = 0;
4705 
4706 	tx->len = htole16(totlen);
4707 	tx->tid_tspec = tid;
4708 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4709 
4710 	/* Set physical address of "scratch area". */
4711 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4712 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4713 
4714 	/* Copy 802.11 header in TX command. */
4715 	memcpy(tx + 1, wh, hdrlen);
4716 
4717 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4718 
4719 	tx->sec_ctl = 0;
4720 	tx->tx_flags |= htole32(flags);
4721 
4722 	/* Trim 802.11 header. */
4723 	m_adj(m, hdrlen);
4724 
4725 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4726 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4727 	if (err) {
4728 		if (err != EFBIG) {
4729 			aprint_error_dev(sc->sc_dev,
4730 			    "can't map mbuf (error %d)\n", err);
4731 			m_freem(m);
4732 			return err;
4733 		}
4734 		/* Too many DMA segments, linearize mbuf. */
4735 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
4736 		if (m1 == NULL) {
4737 			m_freem(m);
4738 			return ENOBUFS;
4739 		}
4740 		if (m->m_pkthdr.len > MHLEN) {
4741 			MCLGET(m1, M_DONTWAIT);
4742 			if (!(m1->m_flags & M_EXT)) {
4743 				m_freem(m);
4744 				m_freem(m1);
4745 				return ENOBUFS;
4746 			}
4747 		}
4748 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4749 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4750 		m_freem(m);
4751 		m = m1;
4752 
4753 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4754 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4755 		if (err) {
4756 			aprint_error_dev(sc->sc_dev,
4757 			    "can't map mbuf (error %d)\n", err);
4758 			m_freem(m);
4759 			return err;
4760 		}
4761 	}
4762 	data->m = m;
4763 	data->in = in;
4764 	data->done = 0;
4765 
4766 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4767 	KASSERT(data->in != NULL);
4768 
4769 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4770 	    "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4771 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4772 	    (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4773 	    le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4774 	    le32toh(tx->rate_n_flags)));
4775 
4776 	/* Fill TX descriptor. */
4777 	desc->num_tbs = 2 + data->map->dm_nsegs;
4778 
4779 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4780 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4781 	    (TB0_SIZE << 4);
4782 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4783 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4784 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4785 	      + hdrlen + pad - TB0_SIZE) << 4);
4786 
4787 	/* Other DMA segments are for data payload. */
4788 	seg = data->map->dm_segs;
4789 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4790 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4791 		desc->tbs[i+2].hi_n_len =
4792 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4793 		    | ((seg->ds_len) << 4);
4794 	}
4795 
4796 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, m->m_pkthdr.len,
4797 	    BUS_DMASYNC_PREWRITE);
4798 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4799 	    (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4800 	    BUS_DMASYNC_PREWRITE);
4801 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4802 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4803 	    BUS_DMASYNC_PREWRITE);
4804 
4805 #if 0
4806 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4807 	    le16toh(tx->len));
4808 #endif
4809 
4810 	/* Kick TX ring. */
4811 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4812 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4813 
4814 	/* Mark TX ring as full if we reach a certain threshold. */
4815 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4816 		sc->qfullmsk |= 1 << ring->qid;
4817 	}
4818 
4819 	return 0;
4820 }
4821 
4822 #if 0
4823 /* not necessary? */
4824 static int
4825 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4826 {
4827 	struct iwm_tx_path_flush_cmd flush_cmd = {
4828 		.queues_ctl = htole32(tfd_msk),
4829 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4830 	};
4831 	int err;
4832 
4833 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4834 	    sizeof(flush_cmd), &flush_cmd);
4835 	if (err)
4836 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4837 		    err);
4838 	return err;
4839 }
4840 #endif
4841 
4842 static void
4843 iwm_led_enable(struct iwm_softc *sc)
4844 {
4845 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4846 }
4847 
4848 static void
4849 iwm_led_disable(struct iwm_softc *sc)
4850 {
4851 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4852 }
4853 
4854 static int
4855 iwm_led_is_enabled(struct iwm_softc *sc)
4856 {
4857 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4858 }
4859 
4860 static void
4861 iwm_led_blink_timeout(void *arg)
4862 {
4863 	struct iwm_softc *sc = arg;
4864 
4865 	if (iwm_led_is_enabled(sc))
4866 		iwm_led_disable(sc);
4867 	else
4868 		iwm_led_enable(sc);
4869 
4870 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4871 }
4872 
4873 static void
4874 iwm_led_blink_start(struct iwm_softc *sc)
4875 {
4876 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4877 }
4878 
4879 static void
4880 iwm_led_blink_stop(struct iwm_softc *sc)
4881 {
4882 	callout_stop(&sc->sc_led_blink_to);
4883 	iwm_led_disable(sc);
4884 }
4885 
4886 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4887 
4888 static int
4889 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4890     struct iwm_beacon_filter_cmd *cmd)
4891 {
4892 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4893 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4894 }
4895 
4896 static void
4897 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4898     struct iwm_beacon_filter_cmd *cmd)
4899 {
4900 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4901 }
4902 
4903 static int
4904 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4905 {
4906 	struct iwm_beacon_filter_cmd cmd = {
4907 		IWM_BF_CMD_CONFIG_DEFAULTS,
4908 		.bf_enable_beacon_filter = htole32(1),
4909 		.ba_enable_beacon_abort = htole32(enable),
4910 	};
4911 
4912 	if (!sc->sc_bf.bf_enabled)
4913 		return 0;
4914 
4915 	sc->sc_bf.ba_enabled = enable;
4916 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4917 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4918 }
4919 
4920 static void
4921 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4922     struct iwm_mac_power_cmd *cmd)
4923 {
4924 	struct ieee80211_node *ni = &in->in_ni;
4925 	int dtim_period, dtim_msec, keep_alive;
4926 
4927 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4928 	    in->in_color));
4929 	if (ni->ni_dtim_period)
4930 		dtim_period = ni->ni_dtim_period;
4931 	else
4932 		dtim_period = 1;
4933 
4934 	/*
4935 	 * Regardless of power management state the driver must set
4936 	 * keep alive period. FW will use it for sending keep alive NDPs
4937 	 * immediately after association. Check that keep alive period
4938 	 * is at least 3 * DTIM.
4939 	 */
4940 	dtim_msec = dtim_period * ni->ni_intval;
4941 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4942 	keep_alive = roundup(keep_alive, 1000) / 1000;
4943 	cmd->keep_alive_seconds = htole16(keep_alive);
4944 
4945 #ifdef notyet
4946 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4947 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4948 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4949 #endif
4950 }
4951 
4952 static int
4953 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4954 {
4955 	int err;
4956 	int ba_enable;
4957 	struct iwm_mac_power_cmd cmd;
4958 
4959 	memset(&cmd, 0, sizeof(cmd));
4960 
4961 	iwm_power_build_cmd(sc, in, &cmd);
4962 
4963 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4964 	    sizeof(cmd), &cmd);
4965 	if (err)
4966 		return err;
4967 
4968 	ba_enable = !!(cmd.flags &
4969 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4970 	return iwm_update_beacon_abort(sc, in, ba_enable);
4971 }
4972 
4973 static int
4974 iwm_power_update_device(struct iwm_softc *sc)
4975 {
4976 	struct iwm_device_power_cmd cmd = {
4977 #ifdef notyet
4978 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4979 #else
4980 		.flags = 0,
4981 #endif
4982 	};
4983 
4984 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4985 		return 0;
4986 
4987 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4988 	DPRINTF(("Sending device power command with flags = 0x%X\n",
4989 	    cmd.flags));
4990 
4991 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4992 }
4993 
4994 #ifdef notyet
4995 static int
4996 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4997 {
4998 	struct iwm_beacon_filter_cmd cmd = {
4999 		IWM_BF_CMD_CONFIG_DEFAULTS,
5000 		.bf_enable_beacon_filter = htole32(1),
5001 	};
5002 	int err;
5003 
5004 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5005 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5006 
5007 	if (err == 0)
5008 		sc->sc_bf.bf_enabled = 1;
5009 
5010 	return err;
5011 }
5012 #endif
5013 
5014 static int
5015 iwm_disable_beacon_filter(struct iwm_softc *sc)
5016 {
5017 	struct iwm_beacon_filter_cmd cmd;
5018 	int err;
5019 
5020 	memset(&cmd, 0, sizeof(cmd));
5021 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5022 		return 0;
5023 
5024 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5025 	if (err == 0)
5026 		sc->sc_bf.bf_enabled = 0;
5027 
5028 	return err;
5029 }
5030 
5031 static int
5032 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5033 {
5034 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
5035 	int err;
5036 	uint32_t status;
5037 
5038 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5039 
5040 	add_sta_cmd.sta_id = IWM_STATION_ID;
5041 	add_sta_cmd.mac_id_n_color
5042 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5043 	if (!update) {
5044 		int ac;
5045 		for (ac = 0; ac < WME_NUM_AC; ac++) {
5046 			add_sta_cmd.tfd_queue_msk |=
5047 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5048 		}
5049 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5050 	}
5051 	add_sta_cmd.add_modify = update ? 1 : 0;
5052 	add_sta_cmd.station_flags_msk
5053 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5054 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5055 	if (update)
5056 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5057 
5058 #ifndef IEEE80211_NO_HT
5059 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5060 		add_sta_cmd.station_flags_msk
5061 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5062 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5063 
5064 		add_sta_cmd.station_flags
5065 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5066 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5067 		case IEEE80211_AMPDU_PARAM_SS_2:
5068 			add_sta_cmd.station_flags
5069 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5070 			break;
5071 		case IEEE80211_AMPDU_PARAM_SS_4:
5072 			add_sta_cmd.station_flags
5073 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5074 			break;
5075 		case IEEE80211_AMPDU_PARAM_SS_8:
5076 			add_sta_cmd.station_flags
5077 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5078 			break;
5079 		case IEEE80211_AMPDU_PARAM_SS_16:
5080 			add_sta_cmd.station_flags
5081 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5082 			break;
5083 		default:
5084 			break;
5085 		}
5086 	}
5087 #endif
5088 
5089 	status = IWM_ADD_STA_SUCCESS;
5090 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5091 	    &add_sta_cmd, &status);
5092 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5093 		err = EIO;
5094 
5095 	return err;
5096 }
5097 
5098 static int
5099 iwm_add_aux_sta(struct iwm_softc *sc)
5100 {
5101 	struct iwm_add_sta_cmd_v7 cmd;
5102 	int err;
5103 	uint32_t status;
5104 
5105 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5106 	if (err)
5107 		return err;
5108 
5109 	memset(&cmd, 0, sizeof(cmd));
5110 	cmd.sta_id = IWM_AUX_STA_ID;
5111 	cmd.mac_id_n_color =
5112 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5113 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5114 	cmd.tid_disable_tx = htole16(0xffff);
5115 
5116 	status = IWM_ADD_STA_SUCCESS;
5117 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5118 	    &status);
5119 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5120 		err = EIO;
5121 
5122 	return err;
5123 }
5124 
5125 #define IWM_PLCP_QUIET_THRESH 1
5126 #define IWM_ACTIVE_QUIET_TIME 10
5127 #define LONG_OUT_TIME_PERIOD 600
5128 #define SHORT_OUT_TIME_PERIOD 200
5129 #define SUSPEND_TIME_PERIOD 100
5130 
5131 static uint16_t
5132 iwm_scan_rx_chain(struct iwm_softc *sc)
5133 {
5134 	uint16_t rx_chain;
5135 	uint8_t rx_ant;
5136 
5137 	rx_ant = iwm_fw_valid_rx_ant(sc);
5138 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5139 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5140 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5141 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5142 	return htole16(rx_chain);
5143 }
5144 
5145 static uint32_t
5146 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5147 {
5148 	uint32_t tx_ant;
5149 	int i, ind;
5150 
5151 	for (i = 0, ind = sc->sc_scan_last_antenna;
5152 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5153 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5154 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5155 			sc->sc_scan_last_antenna = ind;
5156 			break;
5157 		}
5158 	}
5159 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5160 
5161 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5162 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5163 				   tx_ant);
5164 	else
5165 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5166 }
5167 
5168 #ifdef notyet
5169 /*
5170  * If req->n_ssids > 0, it means we should do an active scan.
5171  * In case of active scan w/o directed scan, we receive a zero-length SSID
5172  * just to notify that this scan is active and not passive.
5173  * In order to notify the FW of the number of SSIDs we wish to scan (including
5174  * the zero-length one), we need to set the corresponding bits in chan->type,
5175  * one for each SSID, and set the active bit (first). If the first SSID is
5176  * already included in the probe template, so we need to set only
5177  * req->n_ssids - 1 bits in addition to the first bit.
5178  */
5179 static uint16_t
5180 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5181 {
5182 	if (flags & IEEE80211_CHAN_2GHZ)
5183 		return 30  + 3 * (n_ssids + 1);
5184 	return 20  + 2 * (n_ssids + 1);
5185 }
5186 
5187 static uint16_t
5188 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5189 {
5190 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5191 }
5192 #endif
5193 
5194 static uint8_t
5195 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5196     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5197 {
5198 	struct ieee80211com *ic = &sc->sc_ic;
5199 	struct ieee80211_channel *c;
5200 	uint8_t nchan;
5201 
5202 	for (nchan = 0, c = &ic->ic_channels[1];
5203 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5204 	    nchan < sc->sc_capa_n_scan_channels;
5205 	    c++) {
5206 		if (c->ic_flags == 0)
5207 			continue;
5208 
5209 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5210 		chan->iter_count = htole16(1);
5211 		chan->iter_interval = htole32(0);
5212 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5213 		chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5214 		if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5215 			chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5216 		chan++;
5217 		nchan++;
5218 	}
5219 
5220 	return nchan;
5221 }
5222 
5223 static uint8_t
5224 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5225     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5226 {
5227 	struct ieee80211com *ic = &sc->sc_ic;
5228 	struct ieee80211_channel *c;
5229 	uint8_t nchan;
5230 
5231 	for (nchan = 0, c = &ic->ic_channels[1];
5232 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5233 	    nchan < sc->sc_capa_n_scan_channels;
5234 	    c++) {
5235 		if (c->ic_flags == 0)
5236 			continue;
5237 
5238 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5239 		chan->iter_count = 1;
5240 		chan->iter_interval = htole16(0);
5241 		chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5242 		chan++;
5243 		nchan++;
5244 	}
5245 
5246 	return nchan;
5247 }
5248 
5249 static int
5250 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5251 {
5252 	struct ieee80211com *ic = &sc->sc_ic;
5253 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5254 	struct ieee80211_rateset *rs;
5255 	size_t remain = sizeof(preq->buf);
5256 	uint8_t *frm, *pos;
5257 
5258 	memset(preq, 0, sizeof(*preq));
5259 
5260 	KASSERT(ic->ic_des_esslen < sizeof(ic->ic_des_essid));
5261 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5262 		return ENOBUFS;
5263 
5264 	/*
5265 	 * Build a probe request frame.  Most of the following code is a
5266 	 * copy & paste of what is done in net80211.
5267 	 */
5268 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5269 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5270 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5271 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5272 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5273 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5274 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5275 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5276 
5277 	frm = (uint8_t *)(wh + 1);
5278 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5279 
5280 	/* Tell the firmware where the MAC header is. */
5281 	preq->mac_header.offset = 0;
5282 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5283 	remain -= frm - (uint8_t *)wh;
5284 
5285 	/* Fill in 2GHz IEs and tell firmware where they are. */
5286 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5287 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5288 		if (remain < 4 + rs->rs_nrates)
5289 			return ENOBUFS;
5290 	} else if (remain < 2 + rs->rs_nrates)
5291 		return ENOBUFS;
5292 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5293 	pos = frm;
5294 	frm = ieee80211_add_rates(frm, rs);
5295 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5296 		frm = ieee80211_add_xrates(frm, rs);
5297 	preq->band_data[0].len = htole16(frm - pos);
5298 	remain -= frm - pos;
5299 
5300 	if (isset(sc->sc_enabled_capa,
5301 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5302 		if (remain < 3)
5303 			return ENOBUFS;
5304 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5305 		*frm++ = 1;
5306 		*frm++ = 0;
5307 		remain -= 3;
5308 	}
5309 
5310 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5311 		/* Fill in 5GHz IEs. */
5312 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5313 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5314 			if (remain < 4 + rs->rs_nrates)
5315 				return ENOBUFS;
5316 		} else if (remain < 2 + rs->rs_nrates)
5317 			return ENOBUFS;
5318 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5319 		pos = frm;
5320 		frm = ieee80211_add_rates(frm, rs);
5321 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5322 			frm = ieee80211_add_xrates(frm, rs);
5323 		preq->band_data[1].len = htole16(frm - pos);
5324 		remain -= frm - pos;
5325 	}
5326 
5327 #ifndef IEEE80211_NO_HT
5328 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5329 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5330 	pos = frm;
5331 	if (ic->ic_flags & IEEE80211_F_HTON) {
5332 		if (remain < 28)
5333 			return ENOBUFS;
5334 		frm = ieee80211_add_htcaps(frm, ic);
5335 		/* XXX add WME info? */
5336 	}
5337 #endif
5338 
5339 	preq->common_data.len = htole16(frm - pos);
5340 
5341 	return 0;
5342 }
5343 
5344 static int
5345 iwm_lmac_scan(struct iwm_softc *sc)
5346 {
5347 	struct ieee80211com *ic = &sc->sc_ic;
5348 	struct iwm_host_cmd hcmd = {
5349 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5350 		.len = { 0, },
5351 		.data = { NULL, },
5352 		.flags = 0,
5353 	};
5354 	struct iwm_scan_req_lmac *req;
5355 	size_t req_len;
5356 	int err;
5357 
5358 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5359 
5360 	req_len = sizeof(struct iwm_scan_req_lmac) +
5361 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5362 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5363 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5364 		return ENOMEM;
5365 	req = kmem_zalloc(req_len, KM_SLEEP);
5366 	hcmd.len[0] = (uint16_t)req_len;
5367 	hcmd.data[0] = (void *)req;
5368 
5369 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5370 	req->active_dwell = 10;
5371 	req->passive_dwell = 110;
5372 	req->fragmented_dwell = 44;
5373 	req->extended_dwell = 90;
5374 	req->max_out_time = 0;
5375 	req->suspend_time = 0;
5376 
5377 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5378 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5379 	req->iter_num = htole32(1);
5380 	req->delay = 0;
5381 
5382 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5383 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5384 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5385 	if (ic->ic_des_esslen == 0)
5386 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5387 	else
5388 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5389 	if (isset(sc->sc_enabled_capa,
5390 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5391 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5392 
5393 	req->flags = htole32(IWM_PHY_BAND_24);
5394 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5395 		req->flags |= htole32(IWM_PHY_BAND_5);
5396 	req->filter_flags =
5397 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5398 
5399 	/* Tx flags 2 GHz. */
5400 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5401 	    IWM_TX_CMD_FLG_BT_DIS);
5402 	req->tx_cmd[0].rate_n_flags =
5403 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5404 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5405 
5406 	/* Tx flags 5 GHz. */
5407 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5408 	    IWM_TX_CMD_FLG_BT_DIS);
5409 	req->tx_cmd[1].rate_n_flags =
5410 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5411 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5412 
5413 	/* Check if we're doing an active directed scan. */
5414 	if (ic->ic_des_esslen != 0) {
5415 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5416 		req->direct_scan[0].len = ic->ic_des_esslen;
5417 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5418 		    ic->ic_des_esslen);
5419 	}
5420 
5421 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5422 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5423 	    ic->ic_des_esslen != 0);
5424 
5425 	err = iwm_fill_probe_req(sc,
5426 	    (struct iwm_scan_probe_req *)(req->data +
5427 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5428 	     sc->sc_capa_n_scan_channels)));
5429 	if (err) {
5430 		kmem_free(req, req_len);
5431 		return err;
5432 	}
5433 
5434 	/* Specify the scan plan: We'll do one iteration. */
5435 	req->schedule[0].iterations = 1;
5436 	req->schedule[0].full_scan_mul = 1;
5437 
5438 	/* Disable EBS. */
5439 	req->channel_opt[0].non_ebs_ratio = 1;
5440 	req->channel_opt[1].non_ebs_ratio = 1;
5441 
5442 	err = iwm_send_cmd(sc, &hcmd);
5443 	kmem_free(req, req_len);
5444 	return err;
5445 }
5446 
5447 static int
5448 iwm_config_umac_scan(struct iwm_softc *sc)
5449 {
5450 	struct ieee80211com *ic = &sc->sc_ic;
5451 	struct iwm_scan_config *scan_config;
5452 	int err, nchan;
5453 	size_t cmd_size;
5454 	struct ieee80211_channel *c;
5455 	struct iwm_host_cmd hcmd = {
5456 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5457 		.flags = 0,
5458 	};
5459 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5460 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5461 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5462 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5463 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5464 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5465 	    IWM_SCAN_CONFIG_RATE_54M);
5466 
5467 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5468 
5469 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5470 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5471 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5472 	scan_config->legacy_rates = htole32(rates |
5473 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5474 
5475 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5476 	scan_config->dwell_active = 10;
5477 	scan_config->dwell_passive = 110;
5478 	scan_config->dwell_fragmented = 44;
5479 	scan_config->dwell_extended = 90;
5480 	scan_config->out_of_channel_time = htole32(0);
5481 	scan_config->suspend_time = htole32(0);
5482 
5483 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5484 
5485 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5486 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5487 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5488 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5489 
5490 	for (c = &ic->ic_channels[1], nchan = 0;
5491 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5492 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5493 		if (c->ic_flags == 0)
5494 			continue;
5495 		scan_config->channel_array[nchan++] =
5496 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5497 	}
5498 
5499 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5500 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5501 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5502 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5503 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5504 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5505 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5506 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5507 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5508 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5509 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5510 
5511 	hcmd.data[0] = scan_config;
5512 	hcmd.len[0] = cmd_size;
5513 
5514 	err = iwm_send_cmd(sc, &hcmd);
5515 	kmem_free(scan_config, cmd_size);
5516 	return err;
5517 }
5518 
5519 static int
5520 iwm_umac_scan(struct iwm_softc *sc)
5521 {
5522 	struct ieee80211com *ic = &sc->sc_ic;
5523 	struct iwm_host_cmd hcmd = {
5524 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5525 		.len = { 0, },
5526 		.data = { NULL, },
5527 		.flags = 0,
5528 	};
5529 	struct iwm_scan_req_umac *req;
5530 	struct iwm_scan_req_umac_tail *tail;
5531 	size_t req_len;
5532 	int err;
5533 
5534 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5535 
5536 	req_len = sizeof(struct iwm_scan_req_umac) +
5537 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5538 	    sc->sc_capa_n_scan_channels) +
5539 	    sizeof(struct iwm_scan_req_umac_tail);
5540 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5541 		return ENOMEM;
5542 	req = kmem_zalloc(req_len, KM_SLEEP);
5543 
5544 	hcmd.len[0] = (uint16_t)req_len;
5545 	hcmd.data[0] = (void *)req;
5546 
5547 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5548 	req->active_dwell = 10;
5549 	req->passive_dwell = 110;
5550 	req->fragmented_dwell = 44;
5551 	req->extended_dwell = 90;
5552 	req->max_out_time = 0;
5553 	req->suspend_time = 0;
5554 
5555 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5556 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5557 
5558 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5559 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5560 	    ic->ic_des_esslen != 0);
5561 
5562 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5563 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5564 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5565 
5566 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
5567 		sizeof(struct iwm_scan_channel_cfg_umac) *
5568 			sc->sc_capa_n_scan_channels);
5569 
5570 	/* Check if we're doing an active directed scan. */
5571 	if (ic->ic_des_esslen != 0) {
5572 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5573 		tail->direct_scan[0].len = ic->ic_des_esslen;
5574 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5575 		    ic->ic_des_esslen);
5576 		req->general_flags |=
5577 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5578 	} else
5579 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5580 
5581 	if (isset(sc->sc_enabled_capa,
5582 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5583 		req->general_flags |=
5584 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5585 
5586 	err = iwm_fill_probe_req(sc, &tail->preq);
5587 	if (err) {
5588 		kmem_free(req, req_len);
5589 		return err;
5590 	}
5591 
5592 	/* Specify the scan plan: We'll do one iteration. */
5593 	tail->schedule[0].interval = 0;
5594 	tail->schedule[0].iter_count = 1;
5595 
5596 	err = iwm_send_cmd(sc, &hcmd);
5597 	kmem_free(req, req_len);
5598 	return err;
5599 }
5600 
5601 static uint8_t
5602 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5603 {
5604 	int i;
5605 	uint8_t rval;
5606 
5607 	for (i = 0; i < rs->rs_nrates; i++) {
5608 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5609 		if (rval == iwm_rates[ridx].rate)
5610 			return rs->rs_rates[i];
5611 	}
5612 	return 0;
5613 }
5614 
5615 static void
5616 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5617     int *ofdm_rates)
5618 {
5619 	struct ieee80211_node *ni = &in->in_ni;
5620 	struct ieee80211_rateset *rs = &ni->ni_rates;
5621 	int lowest_present_ofdm = -1;
5622 	int lowest_present_cck = -1;
5623 	uint8_t cck = 0;
5624 	uint8_t ofdm = 0;
5625 	int i;
5626 
5627 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5628 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5629 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5630 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5631 				continue;
5632 			cck |= (1 << i);
5633 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5634 				lowest_present_cck = i;
5635 		}
5636 	}
5637 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5638 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5639 			continue;
5640 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5641 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5642 			lowest_present_ofdm = i;
5643 	}
5644 
5645 	/*
5646 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5647 	 * variables. This isn't sufficient though, as there might not
5648 	 * be all the right rates in the bitmap. E.g. if the only basic
5649 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5650 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5651 	 *
5652 	 *    [...] a STA responding to a received frame shall transmit
5653 	 *    its Control Response frame [...] at the highest rate in the
5654 	 *    BSSBasicRateSet parameter that is less than or equal to the
5655 	 *    rate of the immediately previous frame in the frame exchange
5656 	 *    sequence ([...]) and that is of the same modulation class
5657 	 *    ([...]) as the received frame. If no rate contained in the
5658 	 *    BSSBasicRateSet parameter meets these conditions, then the
5659 	 *    control frame sent in response to a received frame shall be
5660 	 *    transmitted at the highest mandatory rate of the PHY that is
5661 	 *    less than or equal to the rate of the received frame, and
5662 	 *    that is of the same modulation class as the received frame.
5663 	 *
5664 	 * As a consequence, we need to add all mandatory rates that are
5665 	 * lower than all of the basic rates to these bitmaps.
5666 	 */
5667 
5668 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5669 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5670 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5671 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5672 	/* 6M already there or needed so always add */
5673 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5674 
5675 	/*
5676 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5677 	 * Note, however:
5678 	 *  - if no CCK rates are basic, it must be ERP since there must
5679 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5680 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5681 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5682 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5683 	 *  - if 2M is basic, 1M is mandatory
5684 	 *  - if 1M is basic, that's the only valid ACK rate.
5685 	 * As a consequence, it's not as complicated as it sounds, just add
5686 	 * any lower rates to the ACK rate bitmap.
5687 	 */
5688 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5689 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5690 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5691 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5692 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5693 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5694 	/* 1M already there or needed so always add */
5695 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5696 
5697 	*cck_rates = cck;
5698 	*ofdm_rates = ofdm;
5699 }
5700 
5701 static void
5702 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5703     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5704 {
5705 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5706 	struct ieee80211com *ic = &sc->sc_ic;
5707 	struct ieee80211_node *ni = ic->ic_bss;
5708 	int cck_ack_rates, ofdm_ack_rates;
5709 	int i;
5710 
5711 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5712 	    in->in_color));
5713 	cmd->action = htole32(action);
5714 
5715 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5716 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5717 
5718 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5719 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5720 
5721 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5722 	cmd->cck_rates = htole32(cck_ack_rates);
5723 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5724 
5725 	cmd->cck_short_preamble
5726 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5727 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5728 	cmd->short_slot
5729 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5730 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5731 
5732 	for (i = 0; i < WME_NUM_AC; i++) {
5733 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5734 		int txf = iwm_ac_to_tx_fifo[i];
5735 
5736 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5737 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5738 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5739 		cmd->ac[txf].fifos_mask = (1 << txf);
5740 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5741 	}
5742 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5743 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5744 
5745 #ifndef IEEE80211_NO_HT
5746 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5747 		enum ieee80211_htprot htprot =
5748 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5749 		switch (htprot) {
5750 		case IEEE80211_HTPROT_NONE:
5751 			break;
5752 		case IEEE80211_HTPROT_NONMEMBER:
5753 		case IEEE80211_HTPROT_NONHT_MIXED:
5754 			cmd->protection_flags |=
5755 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5756 		case IEEE80211_HTPROT_20MHZ:
5757 			cmd->protection_flags |=
5758 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5759 			    IWM_MAC_PROT_FLG_FAT_PROT);
5760 			break;
5761 		default:
5762 			break;
5763 		}
5764 
5765 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5766 	}
5767 #endif
5768 
5769 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5770 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5771 
5772 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5773 #undef IWM_EXP2
5774 }
5775 
5776 static void
5777 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5778     struct iwm_mac_data_sta *sta, int assoc)
5779 {
5780 	struct ieee80211_node *ni = &in->in_ni;
5781 	uint32_t dtim_off;
5782 	uint64_t tsf;
5783 
5784 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5785 	tsf = le64toh(ni->ni_tstamp.tsf);
5786 
5787 	sta->is_assoc = htole32(assoc);
5788 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5789 	sta->dtim_tsf = htole64(tsf + dtim_off);
5790 	sta->bi = htole32(ni->ni_intval);
5791 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5792 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5793 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5794 	sta->listen_interval = htole32(10);
5795 	sta->assoc_id = htole32(ni->ni_associd);
5796 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5797 }
5798 
5799 static int
5800 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5801     int assoc)
5802 {
5803 	struct ieee80211_node *ni = &in->in_ni;
5804 	struct iwm_mac_ctx_cmd cmd;
5805 
5806 	memset(&cmd, 0, sizeof(cmd));
5807 
5808 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5809 
5810 	/* Allow beacons to pass through as long as we are not associated or we
5811 	 * do not have dtim period information */
5812 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5813 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5814 	else
5815 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5816 
5817 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5818 }
5819 
5820 #define IWM_MISSED_BEACONS_THRESHOLD 8
5821 
5822 static void
5823 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5824 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5825 {
5826 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5827 	int s;
5828 
5829 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5830 	    le32toh(mb->mac_id),
5831 	    le32toh(mb->consec_missed_beacons),
5832 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5833 	    le32toh(mb->num_recvd_beacons),
5834 	    le32toh(mb->num_expected_beacons)));
5835 
5836 	/*
5837 	 * TODO: the threshold should be adjusted based on latency conditions,
5838 	 * and/or in case of a CS flow on one of the other AP vifs.
5839 	 */
5840 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5841 	    IWM_MISSED_BEACONS_THRESHOLD) {
5842 		s = splnet();
5843 		ieee80211_beacon_miss(&sc->sc_ic);
5844 		splx(s);
5845 	}
5846 }
5847 
5848 static int
5849 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5850 {
5851 	struct iwm_time_quota_cmd cmd;
5852 	int i, idx, num_active_macs, quota, quota_rem;
5853 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5854 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5855 	uint16_t id;
5856 
5857 	memset(&cmd, 0, sizeof(cmd));
5858 
5859 	/* currently, PHY ID == binding ID */
5860 	if (in) {
5861 		id = in->in_phyctxt->id;
5862 		KASSERT(id < IWM_MAX_BINDINGS);
5863 		colors[id] = in->in_phyctxt->color;
5864 
5865 		if (1)
5866 			n_ifs[id] = 1;
5867 	}
5868 
5869 	/*
5870 	 * The FW's scheduling session consists of
5871 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5872 	 * equally between all the bindings that require quota
5873 	 */
5874 	num_active_macs = 0;
5875 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5876 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5877 		num_active_macs += n_ifs[i];
5878 	}
5879 
5880 	quota = 0;
5881 	quota_rem = 0;
5882 	if (num_active_macs) {
5883 		quota = IWM_MAX_QUOTA / num_active_macs;
5884 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5885 	}
5886 
5887 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5888 		if (colors[i] < 0)
5889 			continue;
5890 
5891 		cmd.quotas[idx].id_and_color =
5892 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5893 
5894 		if (n_ifs[i] <= 0) {
5895 			cmd.quotas[idx].quota = htole32(0);
5896 			cmd.quotas[idx].max_duration = htole32(0);
5897 		} else {
5898 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5899 			cmd.quotas[idx].max_duration = htole32(0);
5900 		}
5901 		idx++;
5902 	}
5903 
5904 	/* Give the remainder of the session to the first binding */
5905 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5906 
5907 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5908 }
5909 
5910 static int
5911 iwm_auth(struct iwm_softc *sc)
5912 {
5913 	struct ieee80211com *ic = &sc->sc_ic;
5914 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5915 	uint32_t duration;
5916 	int err;
5917 
5918 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5919 	if (err)
5920 		return err;
5921 
5922 	err = iwm_allow_mcast(sc);
5923 	if (err)
5924 		return err;
5925 
5926 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5927 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5928 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5929 	if (err)
5930 		return err;
5931 	in->in_phyctxt = &sc->sc_phyctxt[0];
5932 
5933 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5934 	if (err) {
5935 		aprint_error_dev(sc->sc_dev,
5936 		    "could not add MAC context (error %d)\n", err);
5937 		return err;
5938 	}
5939 
5940 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5941 	if (err)
5942 		return err;
5943 
5944 	err = iwm_add_sta_cmd(sc, in, 0);
5945 	if (err)
5946 		return err;
5947 
5948 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5949 	if (err) {
5950 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5951 		return err;
5952 	}
5953 
5954 	/*
5955 	 * Prevent the FW from wandering off channel during association
5956 	 * by "protecting" the session with a time event.
5957 	 */
5958 	if (in->in_ni.ni_intval)
5959 		duration = in->in_ni.ni_intval * 2;
5960 	else
5961 		duration = IEEE80211_DUR_TU;
5962 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5963 	DELAY(100);
5964 
5965 	return 0;
5966 }
5967 
5968 static int
5969 iwm_assoc(struct iwm_softc *sc)
5970 {
5971 	struct ieee80211com *ic = &sc->sc_ic;
5972 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5973 	int err;
5974 
5975 	err = iwm_add_sta_cmd(sc, in, 1);
5976 	if (err)
5977 		return err;
5978 
5979 	return 0;
5980 }
5981 
5982 static struct ieee80211_node *
5983 iwm_node_alloc(struct ieee80211_node_table *nt)
5984 {
5985 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5986 }
5987 
5988 static void
5989 iwm_calib_timeout(void *arg)
5990 {
5991 	struct iwm_softc *sc = arg;
5992 	struct ieee80211com *ic = &sc->sc_ic;
5993 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5994 #ifndef IEEE80211_NO_HT
5995 	struct ieee80211_node *ni = &in->in_ni;
5996 	int otxrate;
5997 #endif
5998 	int s;
5999 
6000 	s = splnet();
6001 	if ((ic->ic_fixed_rate == -1
6002 #ifndef IEEE80211_NO_HT
6003 	    || ic->ic_fixed_mcs == -1
6004 #endif
6005 	    ) &&
6006 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
6007 #ifndef IEEE80211_NO_HT
6008 		if (ni->ni_flags & IEEE80211_NODE_HT)
6009 			otxrate = ni->ni_txmcs;
6010 		else
6011 			otxrate = ni->ni_txrate;
6012 #endif
6013 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6014 
6015 #ifndef IEEE80211_NO_HT
6016 		/*
6017 		 * If AMRR has chosen a new TX rate we must update
6018 		 * the firwmare's LQ rate table from process context.
6019 		 */
6020 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6021 		    otxrate != ni->ni_txmcs)
6022 			softint_schedule(sc->setrates_task);
6023 		else if (otxrate != ni->ni_txrate)
6024 			softint_schedule(sc->setrates_task);
6025 #endif
6026 	}
6027 	splx(s);
6028 
6029 	callout_schedule(&sc->sc_calib_to, mstohz(500));
6030 }
6031 
6032 #ifndef IEEE80211_NO_HT
6033 static void
6034 iwm_setrates_task(void *arg)
6035 {
6036 	struct iwm_softc *sc = arg;
6037 	struct ieee80211com *ic = &sc->sc_ic;
6038 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6039 
6040 	/* Update rates table based on new TX rate determined by AMRR. */
6041 	iwm_setrates(in);
6042 }
6043 
6044 static int
6045 iwm_setrates(struct iwm_node *in)
6046 {
6047 	struct ieee80211_node *ni = &in->in_ni;
6048 	struct ieee80211com *ic = ni->ni_ic;
6049 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6050 	struct iwm_lq_cmd *lq = &in->in_lq;
6051 	struct ieee80211_rateset *rs = &ni->ni_rates;
6052 	int i, j, ridx, ridx_min, tab = 0;
6053 #ifndef IEEE80211_NO_HT
6054 	int sgi_ok;
6055 #endif
6056 	struct iwm_host_cmd cmd = {
6057 		.id = IWM_LQ_CMD,
6058 		.len = { sizeof(in->in_lq), },
6059 	};
6060 
6061 	memset(lq, 0, sizeof(*lq));
6062 	lq->sta_id = IWM_STATION_ID;
6063 
6064 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6065 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6066 
6067 #ifndef IEEE80211_NO_HT
6068 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6069 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6070 #endif
6071 
6072 
6073 	/*
6074 	 * Fill the LQ rate selection table with legacy and/or HT rates
6075 	 * in descending order, i.e. with the node's current TX rate first.
6076 	 * In cases where throughput of an HT rate corresponds to a legacy
6077 	 * rate it makes no sense to add both. We rely on the fact that
6078 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
6079 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6080 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6081 	 */
6082 	j = 0;
6083 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6084 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6085 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6086 		if (j >= __arraycount(lq->rs_table))
6087 			break;
6088 		tab = 0;
6089 #ifndef IEEE80211_NO_HT
6090 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6091 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6092 			for (i = ni->ni_txmcs; i >= 0; i--) {
6093 				if (isclr(ni->ni_rxmcs, i))
6094 					continue;
6095 				if (ridx == iwm_mcs2ridx[i]) {
6096 					tab = iwm_rates[ridx].ht_plcp;
6097 					tab |= IWM_RATE_MCS_HT_MSK;
6098 					if (sgi_ok)
6099 						tab |= IWM_RATE_MCS_SGI_MSK;
6100 					break;
6101 				}
6102 			}
6103 		}
6104 #endif
6105 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6106 			for (i = ni->ni_txrate; i >= 0; i--) {
6107 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6108 				    IEEE80211_RATE_VAL)) {
6109 					tab = iwm_rates[ridx].plcp;
6110 					break;
6111 				}
6112 			}
6113 		}
6114 
6115 		if (tab == 0)
6116 			continue;
6117 
6118 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
6119 		if (IWM_RIDX_IS_CCK(ridx))
6120 			tab |= IWM_RATE_MCS_CCK_MSK;
6121 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
6122 		lq->rs_table[j++] = htole32(tab);
6123 	}
6124 
6125 	/* Fill the rest with the lowest possible rate */
6126 	i = j > 0 ? j - 1 : 0;
6127 	while (j < __arraycount(lq->rs_table))
6128 		lq->rs_table[j++] = lq->rs_table[i];
6129 
6130 	lq->single_stream_ant_msk = IWM_ANT_A;
6131 	lq->dual_stream_ant_msk = IWM_ANT_AB;
6132 
6133 	lq->agg_time_limit = htole16(4000);	/* 4ms */
6134 	lq->agg_disable_start_th = 3;
6135 #ifdef notyet
6136 	lq->agg_frame_cnt_limit = 0x3f;
6137 #else
6138 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6139 #endif
6140 
6141 	cmd.data[0] = &in->in_lq;
6142 	return iwm_send_cmd(sc, &cmd);
6143 }
6144 #endif
6145 
6146 static int
6147 iwm_media_change(struct ifnet *ifp)
6148 {
6149 	struct iwm_softc *sc = ifp->if_softc;
6150 	struct ieee80211com *ic = &sc->sc_ic;
6151 	uint8_t rate, ridx;
6152 	int err;
6153 
6154 	err = ieee80211_media_change(ifp);
6155 	if (err != ENETRESET)
6156 		return err;
6157 
6158 #ifndef IEEE80211_NO_HT
6159 	if (ic->ic_fixed_mcs != -1)
6160 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6161 	else
6162 #endif
6163 	if (ic->ic_fixed_rate != -1) {
6164 		rate = ic->ic_sup_rates[ic->ic_curmode].
6165 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6166 		/* Map 802.11 rate to HW rate index. */
6167 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6168 			if (iwm_rates[ridx].rate == rate)
6169 				break;
6170 		sc->sc_fixed_ridx = ridx;
6171 	}
6172 
6173 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6174 	    (IFF_UP | IFF_RUNNING)) {
6175 		iwm_stop(ifp, 0);
6176 		err = iwm_init(ifp);
6177 	}
6178 	return err;
6179 }
6180 
6181 static int
6182 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6183 {
6184 	struct ifnet *ifp = IC2IFP(ic);
6185 	struct iwm_softc *sc = ifp->if_softc;
6186 	enum ieee80211_state ostate = ic->ic_state;
6187 	struct iwm_node *in;
6188 	int err;
6189 
6190 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6191 	    ieee80211_state_name[nstate]));
6192 
6193 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6194 		iwm_led_blink_stop(sc);
6195 
6196 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
6197 		iwm_disable_beacon_filter(sc);
6198 
6199 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6200 	/* XXX Is there a way to switch states without a full reset? */
6201 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6202 		/*
6203 		 * Upon receiving a deauth frame from AP the net80211 stack
6204 		 * puts the driver into AUTH state. This will fail with this
6205 		 * driver so bring the FSM from RUN to SCAN in this case.
6206 		 */
6207 		if (nstate != IEEE80211_S_INIT) {
6208 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6209 			/* Always pass arg as -1 since we can't Tx right now. */
6210 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6211 			iwm_stop(ifp, 0);
6212 			iwm_init(ifp);
6213 			return 0;
6214 		}
6215 
6216 		iwm_stop_device(sc);
6217 		iwm_init_hw(sc);
6218 	}
6219 
6220 	switch (nstate) {
6221 	case IEEE80211_S_INIT:
6222 		break;
6223 
6224 	case IEEE80211_S_SCAN:
6225 		if (ostate == nstate &&
6226 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6227 			return 0;
6228 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6229 			err = iwm_umac_scan(sc);
6230 		else
6231 			err = iwm_lmac_scan(sc);
6232 		if (err) {
6233 			DPRINTF(("%s: could not initiate scan: %d\n",
6234 			    DEVNAME(sc), err));
6235 			return err;
6236 		}
6237 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
6238 		ic->ic_state = nstate;
6239 		iwm_led_blink_start(sc);
6240 		return 0;
6241 
6242 	case IEEE80211_S_AUTH:
6243 		err = iwm_auth(sc);
6244 		if (err) {
6245 			DPRINTF(("%s: could not move to auth state: %d\n",
6246 			    DEVNAME(sc), err));
6247 			return err;
6248 		}
6249 		break;
6250 
6251 	case IEEE80211_S_ASSOC:
6252 		err = iwm_assoc(sc);
6253 		if (err) {
6254 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6255 			    err));
6256 			return err;
6257 		}
6258 		break;
6259 
6260 	case IEEE80211_S_RUN:
6261 		in = (struct iwm_node *)ic->ic_bss;
6262 
6263 		/* We have now been assigned an associd by the AP. */
6264 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6265 		if (err) {
6266 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6267 			return err;
6268 		}
6269 
6270 		err = iwm_power_update_device(sc);
6271 		if (err) {
6272 			aprint_error_dev(sc->sc_dev,
6273 			    "could send power command (error %d)\n", err);
6274 			return err;
6275 		}
6276 #ifdef notyet
6277 		/*
6278 		 * Disabled for now. Default beacon filter settings
6279 		 * prevent net80211 from getting ERP and HT protection
6280 		 * updates from beacons.
6281 		 */
6282 		err = iwm_enable_beacon_filter(sc, in);
6283 		if (err) {
6284 			aprint_error_dev(sc->sc_dev,
6285 			    "could not enable beacon filter\n");
6286 			return err;
6287 		}
6288 #endif
6289 		err = iwm_power_mac_update_mode(sc, in);
6290 		if (err) {
6291 			aprint_error_dev(sc->sc_dev,
6292 			    "could not update MAC power (error %d)\n", err);
6293 			return err;
6294 		}
6295 
6296 		err = iwm_update_quotas(sc, in);
6297 		if (err) {
6298 			aprint_error_dev(sc->sc_dev,
6299 			    "could not update quotas (error %d)\n", err);
6300 			return err;
6301 		}
6302 
6303 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6304 
6305 		/* Start at lowest available bit-rate, AMRR will raise. */
6306 		in->in_ni.ni_txrate = 0;
6307 #ifndef IEEE80211_NO_HT
6308 		in->in_ni.ni_txmcs = 0;
6309 		iwm_setrates(in);
6310 #endif
6311 
6312 		callout_schedule(&sc->sc_calib_to, mstohz(500));
6313 		iwm_led_enable(sc);
6314 		break;
6315 
6316 	default:
6317 		break;
6318 	}
6319 
6320 	return sc->sc_newstate(ic, nstate, arg);
6321 }
6322 
6323 static void
6324 iwm_newstate_cb(struct work *wk, void *v)
6325 {
6326 	struct iwm_softc *sc = v;
6327 	struct ieee80211com *ic = &sc->sc_ic;
6328 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6329 	enum ieee80211_state nstate = iwmns->ns_nstate;
6330 	int generation = iwmns->ns_generation;
6331 	int arg = iwmns->ns_arg;
6332 	int s;
6333 
6334 	kmem_intr_free(iwmns, sizeof(*iwmns));
6335 
6336 	s = splnet();
6337 
6338 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6339 	if (sc->sc_generation != generation) {
6340 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6341 		if (nstate == IEEE80211_S_INIT) {
6342 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6343 			    "calling sc_newstate()\n"));
6344 			(void) sc->sc_newstate(ic, nstate, arg);
6345 		}
6346 	} else
6347 		(void) iwm_do_newstate(ic, nstate, arg);
6348 
6349 	splx(s);
6350 }
6351 
6352 static int
6353 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6354 {
6355 	struct iwm_newstate_state *iwmns;
6356 	struct ifnet *ifp = IC2IFP(ic);
6357 	struct iwm_softc *sc = ifp->if_softc;
6358 
6359 	callout_stop(&sc->sc_calib_to);
6360 
6361 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6362 	if (!iwmns) {
6363 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6364 		return ENOMEM;
6365 	}
6366 
6367 	iwmns->ns_nstate = nstate;
6368 	iwmns->ns_arg = arg;
6369 	iwmns->ns_generation = sc->sc_generation;
6370 
6371 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6372 
6373 	return 0;
6374 }
6375 
6376 static void
6377 iwm_endscan(struct iwm_softc *sc)
6378 {
6379 	struct ieee80211com *ic = &sc->sc_ic;
6380 	int s;
6381 
6382 	DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6383 
6384 	s = splnet();
6385 	if (ic->ic_state == IEEE80211_S_SCAN)
6386 		ieee80211_end_scan(ic);
6387 	splx(s);
6388 }
6389 
6390 /*
6391  * Aging and idle timeouts for the different possible scenarios
6392  * in default configuration
6393  */
6394 static const uint32_t
6395 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6396 	{
6397 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6398 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6399 	},
6400 	{
6401 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6402 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6403 	},
6404 	{
6405 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6406 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6407 	},
6408 	{
6409 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
6410 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6411 	},
6412 	{
6413 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6414 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6415 	},
6416 };
6417 
6418 /*
6419  * Aging and idle timeouts for the different possible scenarios
6420  * in single BSS MAC configuration.
6421  */
6422 static const uint32_t
6423 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6424 	{
6425 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6426 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6427 	},
6428 	{
6429 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6430 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6431 	},
6432 	{
6433 		htole32(IWM_SF_MCAST_AGING_TIMER),
6434 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6435 	},
6436 	{
6437 		htole32(IWM_SF_BA_AGING_TIMER),
6438 		htole32(IWM_SF_BA_IDLE_TIMER)
6439 	},
6440 	{
6441 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6442 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6443 	},
6444 };
6445 
6446 static void
6447 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6448     struct ieee80211_node *ni)
6449 {
6450 	int i, j, watermark;
6451 
6452 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6453 
6454 	/*
6455 	 * If we are in association flow - check antenna configuration
6456 	 * capabilities of the AP station, and choose the watermark accordingly.
6457 	 */
6458 	if (ni) {
6459 #ifndef IEEE80211_NO_HT
6460 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6461 #ifdef notyet
6462 			if (ni->ni_rxmcs[2] != 0)
6463 				watermark = IWM_SF_W_MARK_MIMO3;
6464 			else if (ni->ni_rxmcs[1] != 0)
6465 				watermark = IWM_SF_W_MARK_MIMO2;
6466 			else
6467 #endif
6468 				watermark = IWM_SF_W_MARK_SISO;
6469 		} else
6470 #endif
6471 			watermark = IWM_SF_W_MARK_LEGACY;
6472 	/* default watermark value for unassociated mode. */
6473 	} else {
6474 		watermark = IWM_SF_W_MARK_MIMO2;
6475 	}
6476 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6477 
6478 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6479 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6480 			sf_cmd->long_delay_timeouts[i][j] =
6481 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6482 		}
6483 	}
6484 
6485 	if (ni) {
6486 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6487 		       sizeof(iwm_sf_full_timeout));
6488 	} else {
6489 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6490 		       sizeof(iwm_sf_full_timeout_def));
6491 	}
6492 }
6493 
6494 static int
6495 iwm_sf_config(struct iwm_softc *sc, int new_state)
6496 {
6497 	struct ieee80211com *ic = &sc->sc_ic;
6498 	struct iwm_sf_cfg_cmd sf_cmd = {
6499 		.state = htole32(IWM_SF_FULL_ON),
6500 	};
6501 
6502 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6503 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6504 
6505 	switch (new_state) {
6506 	case IWM_SF_UNINIT:
6507 	case IWM_SF_INIT_OFF:
6508 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6509 		break;
6510 	case IWM_SF_FULL_ON:
6511 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6512 		break;
6513 	default:
6514 		return EINVAL;
6515 	}
6516 
6517 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6518 	    sizeof(sf_cmd), &sf_cmd);
6519 }
6520 
6521 static int
6522 iwm_send_bt_init_conf(struct iwm_softc *sc)
6523 {
6524 	struct iwm_bt_coex_cmd bt_cmd;
6525 
6526 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6527 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6528 
6529 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6530 }
6531 
6532 static bool
6533 iwm_is_lar_supported(struct iwm_softc *sc)
6534 {
6535 	bool nvm_lar = sc->sc_nvm.lar_enabled;
6536 	bool tlv_lar = isset(sc->sc_enabled_capa,
6537 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6538 
6539 	if (iwm_lar_disable)
6540 		return false;
6541 
6542 	/*
6543 	 * Enable LAR only if it is supported by the FW (TLV) &&
6544 	 * enabled in the NVM
6545 	 */
6546 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6547 		return nvm_lar && tlv_lar;
6548 	else
6549 		return tlv_lar;
6550 }
6551 
6552 static int
6553 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6554 {
6555 	struct iwm_mcc_update_cmd mcc_cmd;
6556 	struct iwm_host_cmd hcmd = {
6557 		.id = IWM_MCC_UPDATE_CMD,
6558 		.flags = IWM_CMD_WANT_SKB,
6559 		.data = { &mcc_cmd },
6560 	};
6561 	int err;
6562 	int resp_v2 = isset(sc->sc_enabled_capa,
6563 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6564 
6565 	if (!iwm_is_lar_supported(sc)) {
6566 		DPRINTF(("%s: no LAR support\n", __func__));
6567 		return 0;
6568 	}
6569 
6570 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6571 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6572 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6573 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6574 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6575 	else
6576 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6577 
6578 	if (resp_v2)
6579 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6580 	else
6581 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6582 
6583 	err = iwm_send_cmd(sc, &hcmd);
6584 	if (err)
6585 		return err;
6586 
6587 	iwm_free_resp(sc, &hcmd);
6588 
6589 	return 0;
6590 }
6591 
6592 static void
6593 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6594 {
6595 	struct iwm_host_cmd cmd = {
6596 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6597 		.len = { sizeof(uint32_t), },
6598 		.data = { &backoff, },
6599 	};
6600 
6601 	iwm_send_cmd(sc, &cmd);
6602 }
6603 
6604 static int
6605 iwm_init_hw(struct iwm_softc *sc)
6606 {
6607 	struct ieee80211com *ic = &sc->sc_ic;
6608 	int err, i, ac;
6609 
6610 	err = iwm_start_hw(sc);
6611 	if (err) {
6612 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6613 		return err;
6614 	}
6615 
6616 	err = iwm_run_init_mvm_ucode(sc, 0);
6617 	if (err)
6618 		return err;
6619 
6620 	/* Should stop and start HW since INIT image just loaded. */
6621 	iwm_stop_device(sc);
6622 	err = iwm_start_hw(sc);
6623 	if (err) {
6624 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6625 		return err;
6626 	}
6627 
6628 	/* Restart, this time with the regular firmware */
6629 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6630 	if (err) {
6631 		aprint_error_dev(sc->sc_dev,
6632 		    "could not load firmware (error %d)\n", err);
6633 		goto err;
6634 	}
6635 
6636 	err = iwm_send_bt_init_conf(sc);
6637 	if (err) {
6638 		aprint_error_dev(sc->sc_dev,
6639 		    "could not init bt coex (error %d)\n", err);
6640 		goto err;
6641 	}
6642 
6643 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6644 	if (err) {
6645 		aprint_error_dev(sc->sc_dev,
6646 		    "could not init tx ant config (error %d)\n", err);
6647 		goto err;
6648 	}
6649 
6650 	/* Send phy db control command and then phy db calibration*/
6651 	err = iwm_send_phy_db_data(sc);
6652 	if (err) {
6653 		aprint_error_dev(sc->sc_dev,
6654 		    "could not init phy db (error %d)\n", err);
6655 		goto err;
6656 	}
6657 
6658 	err = iwm_send_phy_cfg_cmd(sc);
6659 	if (err) {
6660 		aprint_error_dev(sc->sc_dev,
6661 		    "could not send phy config (error %d)\n", err);
6662 		goto err;
6663 	}
6664 
6665 	/* Add auxiliary station for scanning */
6666 	err = iwm_add_aux_sta(sc);
6667 	if (err) {
6668 		aprint_error_dev(sc->sc_dev,
6669 		    "could not add aux station (error %d)\n", err);
6670 		goto err;
6671 	}
6672 
6673 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6674 		/*
6675 		 * The channel used here isn't relevant as it's
6676 		 * going to be overwritten in the other flows.
6677 		 * For now use the first channel we have.
6678 		 */
6679 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6680 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6681 		    IWM_FW_CTXT_ACTION_ADD, 0);
6682 		if (err) {
6683 			aprint_error_dev(sc->sc_dev,
6684 			    "could not add phy context %d (error %d)\n",
6685 			    i, err);
6686 			goto err;
6687 		}
6688 	}
6689 
6690 	/* Initialize tx backoffs to the minimum. */
6691 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6692 		iwm_tt_tx_backoff(sc, 0);
6693 
6694 	err = iwm_power_update_device(sc);
6695 	if (err) {
6696 		aprint_error_dev(sc->sc_dev,
6697 		    "could send power command (error %d)\n", err);
6698 		goto err;
6699 	}
6700 
6701 	err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6702 	if (err) {
6703 		aprint_error_dev(sc->sc_dev,
6704 		    "could not init LAR (error %d)\n", err);
6705 		goto err;
6706 	}
6707 
6708 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6709 		err = iwm_config_umac_scan(sc);
6710 		if (err) {
6711 			aprint_error_dev(sc->sc_dev,
6712 			    "could not configure scan (error %d)\n", err);
6713 			goto err;
6714 		}
6715 	}
6716 
6717 	for (ac = 0; ac < WME_NUM_AC; ac++) {
6718 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6719 		    iwm_ac_to_tx_fifo[ac]);
6720 		if (err) {
6721 			aprint_error_dev(sc->sc_dev,
6722 			    "could not enable Tx queue %d (error %d)\n",
6723 			    i, err);
6724 			goto err;
6725 		}
6726 	}
6727 
6728 	err = iwm_disable_beacon_filter(sc);
6729 	if (err) {
6730 		aprint_error_dev(sc->sc_dev,
6731 		    "could not disable beacon filter (error %d)\n", err);
6732 		goto err;
6733 	}
6734 
6735 	return 0;
6736 
6737  err:
6738 	iwm_stop_device(sc);
6739 	return err;
6740 }
6741 
6742 /* Allow multicast from our BSSID. */
6743 static int
6744 iwm_allow_mcast(struct iwm_softc *sc)
6745 {
6746 	struct ieee80211com *ic = &sc->sc_ic;
6747 	struct ieee80211_node *ni = ic->ic_bss;
6748 	struct iwm_mcast_filter_cmd *cmd;
6749 	size_t size;
6750 	int err;
6751 
6752 	size = roundup(sizeof(*cmd), 4);
6753 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6754 	if (cmd == NULL)
6755 		return ENOMEM;
6756 	cmd->filter_own = 1;
6757 	cmd->port_id = 0;
6758 	cmd->count = 0;
6759 	cmd->pass_all = 1;
6760 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6761 
6762 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6763 	kmem_intr_free(cmd, size);
6764 	return err;
6765 }
6766 
6767 static int
6768 iwm_init(struct ifnet *ifp)
6769 {
6770 	struct iwm_softc *sc = ifp->if_softc;
6771 	int err;
6772 
6773 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6774 		return 0;
6775 
6776 	sc->sc_generation++;
6777 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
6778 
6779 	err = iwm_init_hw(sc);
6780 	if (err) {
6781 		iwm_stop(ifp, 1);
6782 		return err;
6783 	}
6784 
6785 	ifp->if_flags &= ~IFF_OACTIVE;
6786 	ifp->if_flags |= IFF_RUNNING;
6787 
6788 	ieee80211_begin_scan(&sc->sc_ic, 0);
6789 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6790 
6791 	return 0;
6792 }
6793 
6794 static void
6795 iwm_start(struct ifnet *ifp)
6796 {
6797 	struct iwm_softc *sc = ifp->if_softc;
6798 	struct ieee80211com *ic = &sc->sc_ic;
6799 	struct ieee80211_node *ni;
6800 	struct ether_header *eh;
6801 	struct mbuf *m;
6802 	int ac;
6803 
6804 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6805 		return;
6806 
6807 	for (;;) {
6808 		/* why isn't this done per-queue? */
6809 		if (sc->qfullmsk != 0) {
6810 			ifp->if_flags |= IFF_OACTIVE;
6811 			break;
6812 		}
6813 
6814 		/* need to send management frames even if we're not RUNning */
6815 		IF_DEQUEUE(&ic->ic_mgtq, m);
6816 		if (m) {
6817 			ni = M_GETCTX(m, struct ieee80211_node *);
6818 			M_CLEARCTX(m);
6819 			ac = WME_AC_BE;
6820 			goto sendit;
6821 		}
6822 		if (ic->ic_state != IEEE80211_S_RUN) {
6823 			break;
6824 		}
6825 
6826 		IFQ_DEQUEUE(&ifp->if_snd, m);
6827 		if (m == NULL)
6828 			break;
6829 
6830 		if (m->m_len < sizeof (*eh) &&
6831 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
6832 			if_statinc(ifp, if_oerrors);
6833 			continue;
6834 		}
6835 
6836 		eh = mtod(m, struct ether_header *);
6837 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6838 		if (ni == NULL) {
6839 			m_freem(m);
6840 			if_statinc(ifp, if_oerrors);
6841 			continue;
6842 		}
6843 
6844 		/* classify mbuf so we can find which tx ring to use */
6845 		if (ieee80211_classify(ic, m, ni) != 0) {
6846 			m_freem(m);
6847 			ieee80211_free_node(ni);
6848 			if_statinc(ifp, if_oerrors);
6849 			continue;
6850 		}
6851 
6852 		/* No QoS encapsulation for EAPOL frames. */
6853 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6854 		    M_WME_GETAC(m) : WME_AC_BE;
6855 
6856 		bpf_mtap(ifp, m, BPF_D_OUT);
6857 
6858 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6859 			ieee80211_free_node(ni);
6860 			if_statinc(ifp, if_oerrors);
6861 			continue;
6862 		}
6863 
6864  sendit:
6865 		bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT);
6866 
6867 		if (iwm_tx(sc, m, ni, ac) != 0) {
6868 			ieee80211_free_node(ni);
6869 			if_statinc(ifp, if_oerrors);
6870 			continue;
6871 		}
6872 
6873 		if (ifp->if_flags & IFF_UP) {
6874 			sc->sc_tx_timer = 15;
6875 			ifp->if_timer = 1;
6876 		}
6877 	}
6878 }
6879 
6880 static void
6881 iwm_stop(struct ifnet *ifp, int disable)
6882 {
6883 	struct iwm_softc *sc = ifp->if_softc;
6884 	struct ieee80211com *ic = &sc->sc_ic;
6885 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6886 
6887 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6888 	sc->sc_flags |= IWM_FLAG_STOPPED;
6889 	sc->sc_generation++;
6890 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6891 
6892 	if (in)
6893 		in->in_phyctxt = NULL;
6894 
6895 	if (ic->ic_state != IEEE80211_S_INIT)
6896 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6897 
6898 	callout_stop(&sc->sc_calib_to);
6899 	iwm_led_blink_stop(sc);
6900 	ifp->if_timer = sc->sc_tx_timer = 0;
6901 	iwm_stop_device(sc);
6902 }
6903 
6904 static void
6905 iwm_watchdog(struct ifnet *ifp)
6906 {
6907 	struct iwm_softc *sc = ifp->if_softc;
6908 
6909 	ifp->if_timer = 0;
6910 	if (sc->sc_tx_timer > 0) {
6911 		if (--sc->sc_tx_timer == 0) {
6912 			aprint_error_dev(sc->sc_dev, "device timeout\n");
6913 #ifdef IWM_DEBUG
6914 			iwm_nic_error(sc);
6915 #endif
6916 			ifp->if_flags &= ~IFF_UP;
6917 			iwm_stop(ifp, 1);
6918 			if_statinc(ifp, if_oerrors);
6919 			return;
6920 		}
6921 		ifp->if_timer = 1;
6922 	}
6923 
6924 	ieee80211_watchdog(&sc->sc_ic);
6925 }
6926 
6927 static int
6928 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6929 {
6930 	struct iwm_softc *sc = ifp->if_softc;
6931 	struct ieee80211com *ic = &sc->sc_ic;
6932 	const struct sockaddr *sa;
6933 	int s, err = 0;
6934 
6935 	s = splnet();
6936 
6937 	switch (cmd) {
6938 	case SIOCSIFADDR:
6939 		ifp->if_flags |= IFF_UP;
6940 		/* FALLTHROUGH */
6941 	case SIOCSIFFLAGS:
6942 		err = ifioctl_common(ifp, cmd, data);
6943 		if (err)
6944 			break;
6945 		if (ifp->if_flags & IFF_UP) {
6946 			if (!(ifp->if_flags & IFF_RUNNING)) {
6947 				err = iwm_init(ifp);
6948 				if (err)
6949 					ifp->if_flags &= ~IFF_UP;
6950 			}
6951 		} else {
6952 			if (ifp->if_flags & IFF_RUNNING)
6953 				iwm_stop(ifp, 1);
6954 		}
6955 		break;
6956 
6957 	case SIOCADDMULTI:
6958 	case SIOCDELMULTI:
6959 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6960 		err = (cmd == SIOCADDMULTI) ?
6961 		    ether_addmulti(sa, &sc->sc_ec) :
6962 		    ether_delmulti(sa, &sc->sc_ec);
6963 		if (err == ENETRESET)
6964 			err = 0;
6965 		break;
6966 
6967 	default:
6968 		err = ieee80211_ioctl(ic, cmd, data);
6969 		break;
6970 	}
6971 
6972 	if (err == ENETRESET) {
6973 		err = 0;
6974 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6975 		    (IFF_UP | IFF_RUNNING)) {
6976 			iwm_stop(ifp, 0);
6977 			err = iwm_init(ifp);
6978 		}
6979 	}
6980 
6981 	splx(s);
6982 	return err;
6983 }
6984 
6985 /*
6986  * Note: This structure is read from the device with IO accesses,
6987  * and the reading already does the endian conversion. As it is
6988  * read with uint32_t-sized accesses, any members with a different size
6989  * need to be ordered correctly though!
6990  */
6991 struct iwm_error_event_table {
6992 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6993 	uint32_t error_id;		/* type of error */
6994 	uint32_t trm_hw_status0;	/* TRM HW status */
6995 	uint32_t trm_hw_status1;	/* TRM HW status */
6996 	uint32_t blink2;		/* branch link */
6997 	uint32_t ilink1;		/* interrupt link */
6998 	uint32_t ilink2;		/* interrupt link */
6999 	uint32_t data1;		/* error-specific data */
7000 	uint32_t data2;		/* error-specific data */
7001 	uint32_t data3;		/* error-specific data */
7002 	uint32_t bcon_time;		/* beacon timer */
7003 	uint32_t tsf_low;		/* network timestamp function timer */
7004 	uint32_t tsf_hi;		/* network timestamp function timer */
7005 	uint32_t gp1;		/* GP1 timer register */
7006 	uint32_t gp2;		/* GP2 timer register */
7007 	uint32_t fw_rev_type;	/* firmware revision type */
7008 	uint32_t major;		/* uCode version major */
7009 	uint32_t minor;		/* uCode version minor */
7010 	uint32_t hw_ver;		/* HW Silicon version */
7011 	uint32_t brd_ver;		/* HW board version */
7012 	uint32_t log_pc;		/* log program counter */
7013 	uint32_t frame_ptr;		/* frame pointer */
7014 	uint32_t stack_ptr;		/* stack pointer */
7015 	uint32_t hcmd;		/* last host command header */
7016 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
7017 				 * rxtx_flag */
7018 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
7019 				 * host_flag */
7020 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
7021 				 * enc_flag */
7022 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
7023 				 * time_flag */
7024 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
7025 				 * wico interrupt */
7026 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
7027 	uint32_t wait_event;		/* wait event() caller address */
7028 	uint32_t l2p_control;	/* L2pControlField */
7029 	uint32_t l2p_duration;	/* L2pDurationField */
7030 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
7031 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
7032 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
7033 				 * (LMPM_PMG_SEL) */
7034 	uint32_t u_timestamp;	/* indicate when the date and time of the
7035 				 * compilation */
7036 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
7037 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7038 
7039 /*
7040  * UMAC error struct - relevant starting from family 8000 chip.
7041  * Note: This structure is read from the device with IO accesses,
7042  * and the reading already does the endian conversion. As it is
7043  * read with u32-sized accesses, any members with a different size
7044  * need to be ordered correctly though!
7045  */
7046 struct iwm_umac_error_event_table {
7047 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
7048 	uint32_t error_id;	/* type of error */
7049 	uint32_t blink1;	/* branch link */
7050 	uint32_t blink2;	/* branch link */
7051 	uint32_t ilink1;	/* interrupt link */
7052 	uint32_t ilink2;	/* interrupt link */
7053 	uint32_t data1;		/* error-specific data */
7054 	uint32_t data2;		/* error-specific data */
7055 	uint32_t data3;		/* error-specific data */
7056 	uint32_t umac_major;
7057 	uint32_t umac_minor;
7058 	uint32_t frame_pointer;	/* core register 27 */
7059 	uint32_t stack_pointer;	/* core register 28 */
7060 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
7061 	uint32_t nic_isr_pref;	/* ISR status register */
7062 } __packed;
7063 
7064 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
7065 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
7066 
7067 #ifdef IWM_DEBUG
7068 static const struct {
7069 	const char *name;
7070 	uint8_t num;
7071 } advanced_lookup[] = {
7072 	{ "NMI_INTERRUPT_WDG", 0x34 },
7073 	{ "SYSASSERT", 0x35 },
7074 	{ "UCODE_VERSION_MISMATCH", 0x37 },
7075 	{ "BAD_COMMAND", 0x38 },
7076 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7077 	{ "FATAL_ERROR", 0x3D },
7078 	{ "NMI_TRM_HW_ERR", 0x46 },
7079 	{ "NMI_INTERRUPT_TRM", 0x4C },
7080 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7081 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7082 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7083 	{ "NMI_INTERRUPT_HOST", 0x66 },
7084 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
7085 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7086 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7087 	{ "ADVANCED_SYSASSERT", 0 },
7088 };
7089 
7090 static const char *
7091 iwm_desc_lookup(uint32_t num)
7092 {
7093 	int i;
7094 
7095 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7096 		if (advanced_lookup[i].num == num)
7097 			return advanced_lookup[i].name;
7098 
7099 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7100 	return advanced_lookup[i].name;
7101 }
7102 
7103 /*
7104  * Support for dumping the error log seemed like a good idea ...
7105  * but it's mostly hex junk and the only sensible thing is the
7106  * hw/ucode revision (which we know anyway).  Since it's here,
7107  * I'll just leave it in, just in case e.g. the Intel guys want to
7108  * help us decipher some "ADVANCED_SYSASSERT" later.
7109  */
7110 static void
7111 iwm_nic_error(struct iwm_softc *sc)
7112 {
7113 	struct iwm_error_event_table t;
7114 	uint32_t base;
7115 
7116 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7117 	base = sc->sc_uc.uc_error_event_table;
7118 	if (base < 0x800000) {
7119 		aprint_error_dev(sc->sc_dev,
7120 		    "Invalid error log pointer 0x%08x\n", base);
7121 		return;
7122 	}
7123 
7124 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7125 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7126 		return;
7127 	}
7128 
7129 	if (!t.valid) {
7130 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7131 		return;
7132 	}
7133 
7134 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7135 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7136 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7137 		    sc->sc_flags, t.valid);
7138 	}
7139 
7140 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7141 	    iwm_desc_lookup(t.error_id));
7142 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7143 	    t.trm_hw_status0);
7144 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7145 	    t.trm_hw_status1);
7146 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7147 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7148 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7149 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7150 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7151 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7152 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7153 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7154 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7155 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7156 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7157 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7158 	    t.fw_rev_type);
7159 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7160 	    t.major);
7161 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7162 	    t.minor);
7163 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7164 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7165 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7166 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7167 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7168 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7169 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7170 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7171 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7172 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7173 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7174 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7175 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7176 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7177 	    t.l2p_addr_match);
7178 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7179 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7180 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7181 
7182 	if (sc->sc_uc.uc_umac_error_event_table)
7183 		iwm_nic_umac_error(sc);
7184 }
7185 
7186 static void
7187 iwm_nic_umac_error(struct iwm_softc *sc)
7188 {
7189 	struct iwm_umac_error_event_table t;
7190 	uint32_t base;
7191 
7192 	base = sc->sc_uc.uc_umac_error_event_table;
7193 
7194 	if (base < 0x800000) {
7195 		aprint_error_dev(sc->sc_dev,
7196 		    "Invalid error log pointer 0x%08x\n", base);
7197 		return;
7198 	}
7199 
7200 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7201 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7202 		return;
7203 	}
7204 
7205 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7206 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7207 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7208 		    sc->sc_flags, t.valid);
7209 	}
7210 
7211 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7212 		iwm_desc_lookup(t.error_id));
7213 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7214 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7215 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7216 	    t.ilink1);
7217 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7218 	    t.ilink2);
7219 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7220 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7221 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7222 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7223 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7224 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7225 	    t.frame_pointer);
7226 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7227 	    t.stack_pointer);
7228 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7229 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7230 	    t.nic_isr_pref);
7231 }
7232 #endif
7233 
7234 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7235 do {									\
7236 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7237 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7238 	_var_ = (void *)((_pkt_)+1);					\
7239 } while (/*CONSTCOND*/0)
7240 
7241 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7242 do {									\
7243 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7244 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7245 	_ptr_ = (void *)((_pkt_)+1);					\
7246 } while (/*CONSTCOND*/0)
7247 
7248 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7249 
7250 static void
7251 iwm_notif_intr(struct iwm_softc *sc)
7252 {
7253 	uint16_t hw;
7254 
7255 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7256 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7257 
7258 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7259 	while (sc->rxq.cur != hw) {
7260 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7261 		struct iwm_rx_packet *pkt;
7262 		struct iwm_cmd_response *cresp;
7263 		int orig_qid, qid, idx, code;
7264 
7265 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7266 		    BUS_DMASYNC_POSTREAD);
7267 		pkt = mtod(data->m, struct iwm_rx_packet *);
7268 
7269 		orig_qid = pkt->hdr.qid;
7270 		qid = orig_qid & ~0x80;
7271 		idx = pkt->hdr.idx;
7272 
7273 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7274 
7275 		/*
7276 		 * randomly get these from the firmware, no idea why.
7277 		 * they at least seem harmless, so just ignore them for now
7278 		 */
7279 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7280 		    || pkt->len_n_flags == htole32(0x55550000))) {
7281 			ADVANCE_RXQ(sc);
7282 			continue;
7283 		}
7284 
7285 		switch (code) {
7286 		case IWM_REPLY_RX_PHY_CMD:
7287 			iwm_rx_rx_phy_cmd(sc, pkt, data);
7288 			break;
7289 
7290 		case IWM_REPLY_RX_MPDU_CMD:
7291 			iwm_rx_rx_mpdu(sc, pkt, data);
7292 			break;
7293 
7294 		case IWM_TX_CMD:
7295 			iwm_rx_tx_cmd(sc, pkt, data);
7296 			break;
7297 
7298 		case IWM_MISSED_BEACONS_NOTIFICATION:
7299 			iwm_rx_missed_beacons_notif(sc, pkt, data);
7300 			break;
7301 
7302 		case IWM_MFUART_LOAD_NOTIFICATION:
7303 			break;
7304 
7305 		case IWM_ALIVE: {
7306 			struct iwm_alive_resp_v1 *resp1;
7307 			struct iwm_alive_resp_v2 *resp2;
7308 			struct iwm_alive_resp_v3 *resp3;
7309 
7310 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7311 				SYNC_RESP_STRUCT(resp1, pkt);
7312 				sc->sc_uc.uc_error_event_table
7313 				    = le32toh(resp1->error_event_table_ptr);
7314 				sc->sc_uc.uc_log_event_table
7315 				    = le32toh(resp1->log_event_table_ptr);
7316 				sc->sched_base = le32toh(resp1->scd_base_ptr);
7317 				if (resp1->status == IWM_ALIVE_STATUS_OK)
7318 					sc->sc_uc.uc_ok = 1;
7319 				else
7320 					sc->sc_uc.uc_ok = 0;
7321 			}
7322 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7323 				SYNC_RESP_STRUCT(resp2, pkt);
7324 				sc->sc_uc.uc_error_event_table
7325 				    = le32toh(resp2->error_event_table_ptr);
7326 				sc->sc_uc.uc_log_event_table
7327 				    = le32toh(resp2->log_event_table_ptr);
7328 				sc->sched_base = le32toh(resp2->scd_base_ptr);
7329 				sc->sc_uc.uc_umac_error_event_table
7330 				    = le32toh(resp2->error_info_addr);
7331 				if (resp2->status == IWM_ALIVE_STATUS_OK)
7332 					sc->sc_uc.uc_ok = 1;
7333 				else
7334 					sc->sc_uc.uc_ok = 0;
7335 			}
7336 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7337 				SYNC_RESP_STRUCT(resp3, pkt);
7338 				sc->sc_uc.uc_error_event_table
7339 				    = le32toh(resp3->error_event_table_ptr);
7340 				sc->sc_uc.uc_log_event_table
7341 				    = le32toh(resp3->log_event_table_ptr);
7342 				sc->sched_base = le32toh(resp3->scd_base_ptr);
7343 				sc->sc_uc.uc_umac_error_event_table
7344 				    = le32toh(resp3->error_info_addr);
7345 				if (resp3->status == IWM_ALIVE_STATUS_OK)
7346 					sc->sc_uc.uc_ok = 1;
7347 				else
7348 					sc->sc_uc.uc_ok = 0;
7349 			}
7350 
7351 			sc->sc_uc.uc_intr = 1;
7352 			wakeup(&sc->sc_uc);
7353 			break;
7354 		}
7355 
7356 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
7357 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
7358 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
7359 			uint16_t size = le16toh(phy_db_notif->length);
7360 			bus_dmamap_sync(sc->sc_dmat, data->map,
7361 			    sizeof(*pkt) + sizeof(*phy_db_notif),
7362 			    size, BUS_DMASYNC_POSTREAD);
7363 			iwm_phy_db_set_section(sc, phy_db_notif, size);
7364 			break;
7365 		}
7366 
7367 		case IWM_STATISTICS_NOTIFICATION: {
7368 			struct iwm_notif_statistics *stats;
7369 			SYNC_RESP_STRUCT(stats, pkt);
7370 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7371 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
7372 			break;
7373 		}
7374 
7375 		case IWM_NVM_ACCESS_CMD:
7376 		case IWM_MCC_UPDATE_CMD:
7377 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7378 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7379 				    sizeof(sc->sc_cmd_resp),
7380 				    BUS_DMASYNC_POSTREAD);
7381 				memcpy(sc->sc_cmd_resp,
7382 				    pkt, sizeof(sc->sc_cmd_resp));
7383 			}
7384 			break;
7385 
7386 		case IWM_MCC_CHUB_UPDATE_CMD: {
7387 			struct iwm_mcc_chub_notif *notif;
7388 			SYNC_RESP_STRUCT(notif, pkt);
7389 
7390 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7391 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7392 			sc->sc_fw_mcc[2] = '\0';
7393 			break;
7394 		}
7395 
7396 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
7397 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7398 		    IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7399 			struct iwm_dts_measurement_notif_v1 *notif1;
7400 			struct iwm_dts_measurement_notif_v2 *notif2;
7401 
7402 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7403 				SYNC_RESP_STRUCT(notif1, pkt);
7404 				DPRINTF(("%s: DTS temp=%d \n",
7405 				    DEVNAME(sc), notif1->temp));
7406 				break;
7407 			}
7408 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7409 				SYNC_RESP_STRUCT(notif2, pkt);
7410 				DPRINTF(("%s: DTS temp=%d \n",
7411 				    DEVNAME(sc), notif2->temp));
7412 				break;
7413 			}
7414 			break;
7415 		}
7416 
7417 		case IWM_PHY_CONFIGURATION_CMD:
7418 		case IWM_TX_ANT_CONFIGURATION_CMD:
7419 		case IWM_ADD_STA:
7420 		case IWM_MAC_CONTEXT_CMD:
7421 		case IWM_REPLY_SF_CFG_CMD:
7422 		case IWM_POWER_TABLE_CMD:
7423 		case IWM_PHY_CONTEXT_CMD:
7424 		case IWM_BINDING_CONTEXT_CMD:
7425 		case IWM_TIME_EVENT_CMD:
7426 		case IWM_SCAN_REQUEST_CMD:
7427 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7428 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7429 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7430 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7431 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
7432 		case IWM_REPLY_BEACON_FILTERING_CMD:
7433 		case IWM_MAC_PM_POWER_TABLE:
7434 		case IWM_TIME_QUOTA_CMD:
7435 		case IWM_REMOVE_STA:
7436 		case IWM_TXPATH_FLUSH:
7437 		case IWM_LQ_CMD:
7438 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7439 		case IWM_BT_CONFIG:
7440 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7441 			SYNC_RESP_STRUCT(cresp, pkt);
7442 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7443 				memcpy(sc->sc_cmd_resp,
7444 				    pkt, sizeof(*pkt) + sizeof(*cresp));
7445 			}
7446 			break;
7447 
7448 		/* ignore */
7449 		case IWM_PHY_DB_CMD:
7450 			break;
7451 
7452 		case IWM_INIT_COMPLETE_NOTIF:
7453 			sc->sc_init_complete = 1;
7454 			wakeup(&sc->sc_init_complete);
7455 			break;
7456 
7457 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7458 			struct iwm_periodic_scan_complete *notif;
7459 			SYNC_RESP_STRUCT(notif, pkt);
7460 			break;
7461 		}
7462 
7463 		case IWM_SCAN_ITERATION_COMPLETE: {
7464 			struct iwm_lmac_scan_complete_notif *notif;
7465 			SYNC_RESP_STRUCT(notif, pkt);
7466 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7467 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7468 				iwm_endscan(sc);
7469 			}
7470 			break;
7471 		}
7472 
7473 		case IWM_SCAN_COMPLETE_UMAC: {
7474 			struct iwm_umac_scan_complete *notif;
7475 			SYNC_RESP_STRUCT(notif, pkt);
7476 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7477 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7478 				iwm_endscan(sc);
7479 			}
7480 			break;
7481 		}
7482 
7483 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7484 			struct iwm_umac_scan_iter_complete_notif *notif;
7485 			SYNC_RESP_STRUCT(notif, pkt);
7486 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7487 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7488 				iwm_endscan(sc);
7489 			}
7490 			break;
7491 		}
7492 
7493 		case IWM_REPLY_ERROR: {
7494 			struct iwm_error_resp *resp;
7495 			SYNC_RESP_STRUCT(resp, pkt);
7496 			aprint_error_dev(sc->sc_dev,
7497 			    "firmware error 0x%x, cmd 0x%x\n",
7498 			    le32toh(resp->error_type), resp->cmd_id);
7499 			break;
7500 		}
7501 
7502 		case IWM_TIME_EVENT_NOTIFICATION: {
7503 			struct iwm_time_event_notif *notif;
7504 			SYNC_RESP_STRUCT(notif, pkt);
7505 			break;
7506 		}
7507 
7508 		case IWM_DEBUG_LOG_MSG:
7509 			break;
7510 
7511 		case IWM_MCAST_FILTER_CMD:
7512 			break;
7513 
7514 		case IWM_SCD_QUEUE_CFG: {
7515 			struct iwm_scd_txq_cfg_rsp *rsp;
7516 			SYNC_RESP_STRUCT(rsp, pkt);
7517 			break;
7518 		}
7519 
7520 		default:
7521 			aprint_error_dev(sc->sc_dev,
7522 			    "unhandled firmware response 0x%x 0x%x/0x%x "
7523 			    "rx ring %d[%d]\n",
7524 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7525 			break;
7526 		}
7527 
7528 		/*
7529 		 * uCode sets bit 0x80 when it originates the notification,
7530 		 * i.e. when the notification is not a direct response to a
7531 		 * command sent by the driver.
7532 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7533 		 * received frame to the driver.
7534 		 */
7535 		if (!(orig_qid & (1 << 7))) {
7536 			iwm_cmd_done(sc, qid, idx);
7537 		}
7538 
7539 		ADVANCE_RXQ(sc);
7540 	}
7541 
7542 	/*
7543 	 * Seems like the hardware gets upset unless we align the write by 8??
7544 	 */
7545 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7546 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7547 }
7548 
7549 static int
7550 iwm_intr(void *arg)
7551 {
7552 	struct iwm_softc *sc = arg;
7553 
7554 	/* Disable interrupts */
7555 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7556 
7557 	softint_schedule(sc->sc_soft_ih);
7558 	return 1;
7559 }
7560 
7561 static void
7562 iwm_softintr(void *arg)
7563 {
7564 	struct iwm_softc *sc = arg;
7565 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7566 	uint32_t r1, r2;
7567 	int isperiodic = 0, s;
7568 
7569 	if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7570 		uint32_t *ict = sc->ict_dma.vaddr;
7571 		int tmp;
7572 
7573 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7574 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7575 		tmp = htole32(ict[sc->ict_cur]);
7576 		if (tmp == 0)
7577 			goto out_ena;	/* Interrupt not for us. */
7578 
7579 		/*
7580 		 * ok, there was something.  keep plowing until we have all.
7581 		 */
7582 		r1 = r2 = 0;
7583 		while (tmp) {
7584 			r1 |= tmp;
7585 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
7586 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7587 			tmp = htole32(ict[sc->ict_cur]);
7588 		}
7589 
7590 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7591 		    0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7592 
7593 		/* this is where the fun begins.  don't ask */
7594 		if (r1 == 0xffffffff)
7595 			r1 = 0;
7596 
7597 		/* i am not expected to understand this */
7598 		if (r1 & 0xc0000)
7599 			r1 |= 0x8000;
7600 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7601 	} else {
7602 		r1 = IWM_READ(sc, IWM_CSR_INT);
7603 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7604 			return;	/* Hardware gone! */
7605 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7606 	}
7607 	if (r1 == 0 && r2 == 0) {
7608 		goto out_ena;	/* Interrupt not for us. */
7609 	}
7610 
7611 	/* Acknowledge interrupts. */
7612 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7613 	if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7614 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7615 
7616 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7617 #ifdef IWM_DEBUG
7618 		int i;
7619 
7620 		iwm_nic_error(sc);
7621 
7622 		/* Dump driver status (TX and RX rings) while we're here. */
7623 		DPRINTF(("driver status:\n"));
7624 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7625 			struct iwm_tx_ring *ring = &sc->txq[i];
7626 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7627 			    "queued=%-3d\n",
7628 			    i, ring->qid, ring->cur, ring->queued));
7629 		}
7630 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7631 		DPRINTF(("  802.11 state %s\n",
7632 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7633 #endif
7634 
7635 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7636  fatal:
7637 		s = splnet();
7638 		ifp->if_flags &= ~IFF_UP;
7639 		iwm_stop(ifp, 1);
7640 		splx(s);
7641 		/* Don't restore interrupt mask */
7642 		return;
7643 
7644 	}
7645 
7646 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7647 		aprint_error_dev(sc->sc_dev,
7648 		    "hardware error, stopping device\n");
7649 		goto fatal;
7650 	}
7651 
7652 	/* firmware chunk loaded */
7653 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7654 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7655 		sc->sc_fw_chunk_done = 1;
7656 		wakeup(&sc->sc_fw);
7657 	}
7658 
7659 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7660 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7661 			goto fatal;
7662 	}
7663 
7664 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7665 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7666 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7667 			IWM_WRITE_1(sc,
7668 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7669 		isperiodic = 1;
7670 	}
7671 
7672 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7673 	    isperiodic) {
7674 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7675 
7676 		iwm_notif_intr(sc);
7677 
7678 		/* enable periodic interrupt, see above */
7679 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7680 		    !isperiodic)
7681 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7682 			    IWM_CSR_INT_PERIODIC_ENA);
7683 	}
7684 
7685 out_ena:
7686 	iwm_restore_interrupts(sc);
7687 }
7688 
7689 /*
7690  * Autoconf glue-sniffing
7691  */
7692 
7693 static const pci_product_id_t iwm_devices[] = {
7694 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7695 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7696 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7697 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7698 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7699 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7700 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7701 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7702 	PCI_PRODUCT_INTEL_WIFI_LINK_3168,
7703 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7704 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7705 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7706 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7707 	PCI_PRODUCT_INTEL_WIFI_LINK_8265,
7708 };
7709 
7710 static int
7711 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7712 {
7713 	struct pci_attach_args *pa = aux;
7714 
7715 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7716 		return 0;
7717 
7718 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7719 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7720 			return 1;
7721 
7722 	return 0;
7723 }
7724 
7725 static int
7726 iwm_preinit(struct iwm_softc *sc)
7727 {
7728 	int err;
7729 
7730 	err = iwm_start_hw(sc);
7731 	if (err) {
7732 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7733 		return err;
7734 	}
7735 
7736 	err = iwm_run_init_mvm_ucode(sc, 1);
7737 	iwm_stop_device(sc);
7738 	if (err)
7739 		return err;
7740 
7741 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7742 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7743 	    ether_sprintf(sc->sc_nvm.hw_addr));
7744 
7745 	return 0;
7746 }
7747 
7748 static void
7749 iwm_attach_hook(device_t dev)
7750 {
7751 	struct iwm_softc *sc = device_private(dev);
7752 
7753 	iwm_config_complete(sc);
7754 }
7755 
7756 static void
7757 iwm_attach(device_t parent, device_t self, void *aux)
7758 {
7759 	struct iwm_softc *sc = device_private(self);
7760 	struct pci_attach_args *pa = aux;
7761 	pcireg_t reg, memtype;
7762 	char intrbuf[PCI_INTRSTR_LEN];
7763 	const char *intrstr;
7764 	int err;
7765 	int txq_i;
7766 	const struct sysctlnode *node;
7767 
7768 	sc->sc_dev = self;
7769 	sc->sc_pct = pa->pa_pc;
7770 	sc->sc_pcitag = pa->pa_tag;
7771 	sc->sc_dmat = pa->pa_dmat;
7772 	sc->sc_pciid = pa->pa_id;
7773 
7774 	pci_aprint_devinfo(pa, NULL);
7775 
7776 	if (workqueue_create(&sc->sc_nswq, "iwmns",
7777 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7778 		panic("%s: could not create workqueue: newstate",
7779 		    device_xname(self));
7780 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7781 	if (sc->sc_soft_ih == NULL)
7782 		panic("%s: could not establish softint", device_xname(self));
7783 
7784 	/*
7785 	 * Get the offset of the PCI Express Capability Structure in PCI
7786 	 * Configuration Space.
7787 	 */
7788 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7789 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7790 	if (err == 0) {
7791 		aprint_error_dev(self,
7792 		    "PCIe capability structure not found!\n");
7793 		return;
7794 	}
7795 
7796 	/* Clear device-specific "PCI retry timeout" register (41h). */
7797 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7798 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7799 
7800 	/* Enable bus-mastering */
7801 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7802 	reg |= PCI_COMMAND_MASTER_ENABLE;
7803 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7804 
7805 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7806 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7807 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7808 	if (err) {
7809 		aprint_error_dev(self, "can't map mem space\n");
7810 		return;
7811 	}
7812 
7813 	/* Install interrupt handler. */
7814 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7815 	if (err) {
7816 		aprint_error_dev(self, "can't allocate interrupt\n");
7817 		return;
7818 	}
7819 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7820 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7821 		CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7822 	else
7823 		SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7824 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7825 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7826 	    sizeof(intrbuf));
7827 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7828 	    IPL_NET, iwm_intr, sc, device_xname(self));
7829 	if (sc->sc_ih == NULL) {
7830 		aprint_error_dev(self, "can't establish interrupt");
7831 		if (intrstr != NULL)
7832 			aprint_error(" at %s", intrstr);
7833 		aprint_error("\n");
7834 		return;
7835 	}
7836 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7837 
7838 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7839 
7840 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7841 	switch (PCI_PRODUCT(sc->sc_pciid)) {
7842 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7843 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7844 		sc->sc_fwname = "iwlwifi-3160-17.ucode";
7845 		sc->host_interrupt_operation_mode = 1;
7846 		sc->apmg_wake_up_wa = 1;
7847 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7848 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7849 		break;
7850 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7851 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7852 		sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7853 		sc->host_interrupt_operation_mode = 0;
7854 		sc->apmg_wake_up_wa = 1;
7855 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7856 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7857 		break;
7858 	case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7859 		sc->sc_fwname = "iwlwifi-3168-22.ucode";
7860 		sc->host_interrupt_operation_mode = 0;
7861 		sc->apmg_wake_up_wa = 1;
7862 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7863 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7864 		break;
7865 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7866 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7867 		sc->sc_fwname = "iwlwifi-7260-17.ucode";
7868 		sc->host_interrupt_operation_mode = 1;
7869 		sc->apmg_wake_up_wa = 1;
7870 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7871 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7872 		break;
7873 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7874 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7875 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7876 		    IWM_CSR_HW_REV_TYPE_7265D ?
7877 		    "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7878 		sc->host_interrupt_operation_mode = 0;
7879 		sc->apmg_wake_up_wa = 1;
7880 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7881 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7882 		break;
7883 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7884 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7885 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7886 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7887 		sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7888 		sc->host_interrupt_operation_mode = 0;
7889 		sc->apmg_wake_up_wa = 0;
7890 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7891 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7892 		break;
7893 	case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7894 		sc->sc_fwname = "iwlwifi-8265-22.ucode";
7895 		sc->host_interrupt_operation_mode = 0;
7896 		sc->apmg_wake_up_wa = 0;
7897 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7898 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7899 		break;
7900 	default:
7901 		aprint_error_dev(self, "unknown product %#x",
7902 		    PCI_PRODUCT(sc->sc_pciid));
7903 		return;
7904 	}
7905 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7906 
7907 	/*
7908 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7909 	 * changed, and now the revision step also includes bit 0-1 (no more
7910 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7911 	 * in the old format.
7912 	 */
7913 
7914 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7915 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7916 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7917 
7918 	if (iwm_prepare_card_hw(sc) != 0) {
7919 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7920 		return;
7921 	}
7922 
7923 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7924 		uint32_t hw_step;
7925 
7926 		/*
7927 		 * In order to recognize C step the driver should read the
7928 		 * chip version id located at the AUX bus MISC address.
7929 		 */
7930 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7931 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7932 		DELAY(2);
7933 
7934 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7935 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7936 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7937 				   25000);
7938 		if (!err) {
7939 			aprint_error_dev(sc->sc_dev,
7940 			    "failed to wake up the nic\n");
7941 			return;
7942 		}
7943 
7944 		if (iwm_nic_lock(sc)) {
7945 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7946 			hw_step |= IWM_ENABLE_WFPM;
7947 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7948 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7949 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7950 			if (hw_step == 0x3)
7951 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7952 				    (IWM_SILICON_C_STEP << 2);
7953 			iwm_nic_unlock(sc);
7954 		} else {
7955 			aprint_error_dev(sc->sc_dev,
7956 			    "failed to lock the nic\n");
7957 			return;
7958 		}
7959 	}
7960 
7961 	/*
7962 	 * Allocate DMA memory for firmware transfers.
7963 	 * Must be aligned on a 16-byte boundary.
7964 	 */
7965 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7966 	    16);
7967 	if (err) {
7968 		aprint_error_dev(sc->sc_dev,
7969 		    "could not allocate memory for firmware\n");
7970 		return;
7971 	}
7972 
7973 	/* Allocate "Keep Warm" page, used internally by the card. */
7974 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7975 	if (err) {
7976 		aprint_error_dev(sc->sc_dev,
7977 		    "could not allocate keep warm page\n");
7978 		goto fail1;
7979 	}
7980 
7981 	/* Allocate interrupt cause table (ICT).*/
7982 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7983 	    1 << IWM_ICT_PADDR_SHIFT);
7984 	if (err) {
7985 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7986 		goto fail2;
7987 	}
7988 
7989 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7990 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7991 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7992 	if (err) {
7993 		aprint_error_dev(sc->sc_dev,
7994 		    "could not allocate TX scheduler rings\n");
7995 		goto fail3;
7996 	}
7997 
7998 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7999 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8000 		if (err) {
8001 			aprint_error_dev(sc->sc_dev,
8002 			    "could not allocate TX ring %d\n", txq_i);
8003 			goto fail4;
8004 		}
8005 	}
8006 
8007 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
8008 	if (err) {
8009 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8010 		goto fail5;
8011 	}
8012 
8013 	/* Clear pending interrupts. */
8014 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8015 
8016 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8017 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8018 	    SYSCTL_DESCR("iwm per-controller controls"),
8019 	    NULL, 0, NULL, 0,
8020 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8021 	    CTL_EOL)) != 0) {
8022 		aprint_normal_dev(sc->sc_dev,
8023 		    "couldn't create iwm per-controller sysctl node\n");
8024 	}
8025 	if (err == 0) {
8026 		int iwm_nodenum = node->sysctl_num;
8027 
8028 		/* Reload firmware sysctl node */
8029 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8030 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8031 		    SYSCTL_DESCR("Reload firmware"),
8032 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8033 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8034 		    CTL_EOL)) != 0) {
8035 			aprint_normal_dev(sc->sc_dev,
8036 			    "couldn't create load_fw sysctl node\n");
8037 		}
8038 	}
8039 
8040 	callout_init(&sc->sc_calib_to, 0);
8041 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8042 	callout_init(&sc->sc_led_blink_to, 0);
8043 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8044 #ifndef IEEE80211_NO_HT
8045 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8046 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8047 		panic("%s: could not create workqueue: setrates",
8048 		    device_xname(self));
8049 	if (workqueue_create(&sc->sc_bawq, "iwmba",
8050 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8051 		panic("%s: could not create workqueue: blockack",
8052 		    device_xname(self));
8053 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8054 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8055 		panic("%s: could not create workqueue: htprot",
8056 		    device_xname(self));
8057 #endif
8058 
8059 	/*
8060 	 * We can't do normal attach before the file system is mounted
8061 	 * because we cannot read the MAC address without loading the
8062 	 * firmware from disk.  So we postpone until mountroot is done.
8063 	 * Notably, this will require a full driver unload/load cycle
8064 	 * (or reboot) in case the firmware is not present when the
8065 	 * hook runs.
8066 	 */
8067 	config_mountroot(self, iwm_attach_hook);
8068 
8069 	return;
8070 
8071 fail5:	while (--txq_i >= 0)
8072 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8073 fail4:	iwm_dma_contig_free(&sc->sched_dma);
8074 fail3:	if (sc->ict_dma.vaddr != NULL)
8075 		iwm_dma_contig_free(&sc->ict_dma);
8076 fail2:	iwm_dma_contig_free(&sc->kw_dma);
8077 fail1:	iwm_dma_contig_free(&sc->fw_dma);
8078 }
8079 
8080 static int
8081 iwm_config_complete(struct iwm_softc *sc)
8082 {
8083 	device_t self = sc->sc_dev;
8084 	struct ieee80211com *ic = &sc->sc_ic;
8085 	struct ifnet *ifp = &sc->sc_ec.ec_if;
8086 	int err;
8087 
8088 	KASSERT(!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED));
8089 
8090 	err = iwm_preinit(sc);
8091 	if (err)
8092 		return err;
8093 
8094 	/*
8095 	 * Attach interface
8096 	 */
8097 	ic->ic_ifp = ifp;
8098 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8099 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8100 	ic->ic_state = IEEE80211_S_INIT;
8101 
8102 	/* Set device capabilities. */
8103 	ic->ic_caps =
8104 	    IEEE80211_C_WEP |		/* WEP */
8105 	    IEEE80211_C_WPA |		/* 802.11i */
8106 #ifdef notyet
8107 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8108 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8109 #endif
8110 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8111 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8112 
8113 #ifndef IEEE80211_NO_HT
8114 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8115 	ic->ic_htxcaps = 0;
8116 	ic->ic_txbfcaps = 0;
8117 	ic->ic_aselcaps = 0;
8118 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8119 #endif
8120 
8121 	/* all hardware can do 2.4GHz band */
8122 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8123 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8124 
8125 	/* not all hardware can do 5GHz band */
8126 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
8127 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
8128 
8129 #ifndef IEEE80211_NO_HT
8130 	if (sc->sc_nvm.sku_cap_11n_enable)
8131 		iwm_setup_ht_rates(sc);
8132 #endif
8133 
8134 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8135 		sc->sc_phyctxt[i].id = i;
8136 	}
8137 
8138 	sc->sc_amrr.amrr_min_success_threshold =  1;
8139 	sc->sc_amrr.amrr_max_success_threshold = 15;
8140 
8141 	/* IBSS channel undefined for now. */
8142 	ic->ic_ibss_chan = &ic->ic_channels[1];
8143 
8144 #if 0
8145 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8146 #endif
8147 
8148 	ifp->if_softc = sc;
8149 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8150 	ifp->if_init = iwm_init;
8151 	ifp->if_stop = iwm_stop;
8152 	ifp->if_ioctl = iwm_ioctl;
8153 	ifp->if_start = iwm_start;
8154 	ifp->if_watchdog = iwm_watchdog;
8155 	IFQ_SET_READY(&ifp->if_snd);
8156 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8157 
8158 	if_initialize(ifp);
8159 	ieee80211_ifattach(ic);
8160 	/* Use common softint-based if_input */
8161 	ifp->if_percpuq = if_percpuq_create(ifp);
8162 	if_register(ifp);
8163 
8164 	ic->ic_node_alloc = iwm_node_alloc;
8165 
8166 	/* Override 802.11 state transition machine. */
8167 	sc->sc_newstate = ic->ic_newstate;
8168 	ic->ic_newstate = iwm_newstate;
8169 
8170 	/* XXX media locking needs revisiting */
8171 	mutex_init(&sc->sc_media_mtx, MUTEX_DEFAULT, IPL_SOFTNET);
8172 	ieee80211_media_init_with_lock(ic,
8173 	    iwm_media_change, ieee80211_media_status, &sc->sc_media_mtx);
8174 
8175 	ieee80211_announce(ic);
8176 
8177 	iwm_radiotap_attach(sc);
8178 
8179 	if (pmf_device_register(self, NULL, NULL))
8180 		pmf_class_network_register(self, ifp);
8181 	else
8182 		aprint_error_dev(self, "couldn't establish power handler\n");
8183 
8184 	sc->sc_flags |= IWM_FLAG_ATTACHED;
8185 
8186 	return 0;
8187 }
8188 
8189 void
8190 iwm_radiotap_attach(struct iwm_softc *sc)
8191 {
8192 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8193 
8194 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8195 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8196 	    &sc->sc_drvbpf);
8197 
8198 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8199 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8200 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8201 
8202 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8203 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8204 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8205 }
8206 
8207 #if 0
8208 static void
8209 iwm_init_task(void *arg)
8210 {
8211 	struct iwm_softc *sc = arg;
8212 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8213 	int s;
8214 
8215 	rw_enter_write(&sc->ioctl_rwl);
8216 	s = splnet();
8217 
8218 	iwm_stop(ifp, 0);
8219 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8220 		iwm_init(ifp);
8221 
8222 	splx(s);
8223 	rw_exit(&sc->ioctl_rwl);
8224 }
8225 
8226 static void
8227 iwm_wakeup(struct iwm_softc *sc)
8228 {
8229 	pcireg_t reg;
8230 
8231 	/* Clear device-specific "PCI retry timeout" register (41h). */
8232 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8233 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8234 
8235 	iwm_init_task(sc);
8236 }
8237 
8238 static int
8239 iwm_activate(device_t self, enum devact act)
8240 {
8241 	struct iwm_softc *sc = device_private(self);
8242 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8243 
8244 	switch (act) {
8245 	case DVACT_DEACTIVATE:
8246 		if (ifp->if_flags & IFF_RUNNING)
8247 			iwm_stop(ifp, 0);
8248 		return 0;
8249 	default:
8250 		return EOPNOTSUPP;
8251 	}
8252 }
8253 #endif
8254 
8255 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8256 	NULL, NULL);
8257 
8258 static int
8259 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8260 {
8261 	struct sysctlnode node;
8262 	struct iwm_softc *sc;
8263 	int err, t;
8264 
8265 	node = *rnode;
8266 	sc = node.sysctl_data;
8267 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8268 	node.sysctl_data = &t;
8269 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
8270 	if (err || newp == NULL)
8271 		return err;
8272 
8273 	if (t == 0)
8274 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8275 	return 0;
8276 }
8277 
8278 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8279 {
8280 	const struct sysctlnode *rnode;
8281 #ifdef IWM_DEBUG
8282 	const struct sysctlnode *cnode;
8283 #endif /* IWM_DEBUG */
8284 	int rc;
8285 
8286 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8287 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8288 	    SYSCTL_DESCR("iwm global controls"),
8289 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8290 		goto err;
8291 
8292 	iwm_sysctl_root_num = rnode->sysctl_num;
8293 
8294 #ifdef IWM_DEBUG
8295 	/* control debugging printfs */
8296 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8297 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8298 	    "debug", SYSCTL_DESCR("Enable debugging output"),
8299 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8300 		goto err;
8301 #endif /* IWM_DEBUG */
8302 
8303 	return;
8304 
8305  err:
8306 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8307 }
8308