xref: /openbsd-src/sys/dev/pci/if_iwm.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: if_iwm.c,v 1.132 2016/09/12 10:18:26 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*-
22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
23  * which were used as the reference documentation for this implementation.
24  *
25  ***********************************************************************
26  *
27  * This file is provided under a dual BSD/GPLv2 license.  When using or
28  * redistributing this file, you may do so under either license.
29  *
30  * GPL LICENSE SUMMARY
31  *
32  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
33  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
34  * Copyright(c) 2016 Intel Deutschland GmbH
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62  * Copyright(c) 2016 Intel Deutschland GmbH
63  * All rights reserved.
64  *
65  * Redistribution and use in source and binary forms, with or without
66  * modification, are permitted provided that the following conditions
67  * are met:
68  *
69  *  * Redistributions of source code must retain the above copyright
70  *    notice, this list of conditions and the following disclaimer.
71  *  * Redistributions in binary form must reproduce the above copyright
72  *    notice, this list of conditions and the following disclaimer in
73  *    the documentation and/or other materials provided with the
74  *    distribution.
75  *  * Neither the name Intel Corporation nor the names of its
76  *    contributors may be used to endorse or promote products derived
77  *    from this software without specific prior written permission.
78  *
79  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90  */
91 
92 /*-
93  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
94  *
95  * Permission to use, copy, modify, and distribute this software for any
96  * purpose with or without fee is hereby granted, provided that the above
97  * copyright notice and this permission notice appear in all copies.
98  *
99  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106  */
107 
108 #include "bpfilter.h"
109 
110 #include <sys/param.h>
111 #include <sys/conf.h>
112 #include <sys/kernel.h>
113 #include <sys/malloc.h>
114 #include <sys/mbuf.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/rwlock.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/systm.h>
121 
122 #include <sys/task.h>
123 #include <machine/bus.h>
124 #include <machine/endian.h>
125 #include <machine/intr.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #if NBPFILTER > 0
132 #include <net/bpf.h>
133 #endif
134 #include <net/if.h>
135 #include <net/if_dl.h>
136 #include <net/if_media.h>
137 
138 #include <netinet/in.h>
139 #include <netinet/if_ether.h>
140 
141 #include <net80211/ieee80211_var.h>
142 #include <net80211/ieee80211_amrr.h>
143 #include <net80211/ieee80211_radiotap.h>
144 
145 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
146 
147 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
148 
149 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
150 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
151 
152 #ifdef IWM_DEBUG
153 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
154 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
155 int iwm_debug = 1;
156 #else
157 #define DPRINTF(x)	do { ; } while (0)
158 #define DPRINTFN(n, x)	do { ; } while (0)
159 #endif
160 
161 #include <dev/pci/if_iwmreg.h>
162 #include <dev/pci/if_iwmvar.h>
163 
164 const uint8_t iwm_nvm_channels[] = {
165 	/* 2.4 GHz */
166 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
167 	/* 5 GHz */
168 	36, 40, 44 , 48, 52, 56, 60, 64,
169 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
170 	149, 153, 157, 161, 165
171 };
172 
173 const uint8_t iwm_nvm_channels_8000[] = {
174 	/* 2.4 GHz */
175 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
176 	/* 5 GHz */
177 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
178 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
179 	149, 153, 157, 161, 165, 169, 173, 177, 181
180 };
181 
182 #define IWM_NUM_2GHZ_CHANNELS	14
183 
184 const struct iwm_rate {
185 	uint8_t rate;
186 	uint8_t plcp;
187 	uint8_t ht_plcp;
188 } iwm_rates[] = {
189 		/* Legacy */		/* HT */
190 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
191 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
192 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
193 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
195 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
197 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
198 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
199 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
200 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
201 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
202 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
203 };
204 #define IWM_RIDX_CCK	0
205 #define IWM_RIDX_OFDM	4
206 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
207 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
208 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
209 
210 /* Convert an MCS index into an iwm_rates[] index. */
211 const int iwm_mcs2ridx[] = {
212 	IWM_RATE_MCS_0_INDEX,
213 	IWM_RATE_MCS_1_INDEX,
214 	IWM_RATE_MCS_2_INDEX,
215 	IWM_RATE_MCS_3_INDEX,
216 	IWM_RATE_MCS_4_INDEX,
217 	IWM_RATE_MCS_5_INDEX,
218 	IWM_RATE_MCS_6_INDEX,
219 	IWM_RATE_MCS_7_INDEX,
220 };
221 
222 struct iwm_nvm_section {
223 	uint16_t length;
224 	uint8_t *data;
225 };
226 
227 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
228 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
229 	    uint8_t *, size_t);
230 int	iwm_set_default_calib(struct iwm_softc *, const void *);
231 void	iwm_fw_info_free(struct iwm_fw_info *);
232 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
233 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
234 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
235 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
236 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
237 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
238 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
239 int	iwm_nic_lock(struct iwm_softc *);
240 void	iwm_nic_unlock(struct iwm_softc *);
241 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
242 	    uint32_t);
243 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
244 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
245 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
246 	    bus_size_t);
247 void	iwm_dma_contig_free(struct iwm_dma_info *);
248 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
249 void	iwm_disable_rx_dma(struct iwm_softc *);
250 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
251 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
253 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
254 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
255 void	iwm_enable_rfkill_int(struct iwm_softc *);
256 int	iwm_check_rfkill(struct iwm_softc *);
257 void	iwm_enable_interrupts(struct iwm_softc *);
258 void	iwm_restore_interrupts(struct iwm_softc *);
259 void	iwm_disable_interrupts(struct iwm_softc *);
260 void	iwm_ict_reset(struct iwm_softc *);
261 int	iwm_set_hw_ready(struct iwm_softc *);
262 int	iwm_prepare_card_hw(struct iwm_softc *);
263 void	iwm_apm_config(struct iwm_softc *);
264 int	iwm_apm_init(struct iwm_softc *);
265 void	iwm_apm_stop(struct iwm_softc *);
266 int	iwm_allow_mcast(struct iwm_softc *);
267 int	iwm_start_hw(struct iwm_softc *);
268 void	iwm_stop_device(struct iwm_softc *);
269 void	iwm_nic_config(struct iwm_softc *);
270 int	iwm_nic_rx_init(struct iwm_softc *);
271 int	iwm_nic_tx_init(struct iwm_softc *);
272 int	iwm_nic_init(struct iwm_softc *);
273 int	iwm_enable_txq(struct iwm_softc *, int, int, int);
274 int	iwm_post_alive(struct iwm_softc *);
275 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
276 	    uint16_t);
277 int	iwm_phy_db_set_section(struct iwm_softc *,
278 	    struct iwm_calib_res_notif_phy_db *);
279 int	iwm_is_valid_channel(uint16_t);
280 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
281 uint16_t iwm_channel_id_to_papd(uint16_t);
282 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
283 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
284 	    uint16_t *, uint16_t);
285 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
286 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
287 	    uint8_t);
288 int	iwm_send_phy_db_data(struct iwm_softc *);
289 void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
290 	    struct iwm_time_event_cmd_v1 *);
291 int	iwm_send_time_event_cmd(struct iwm_softc *,
292 	    const struct iwm_time_event_cmd_v2 *);
293 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
294 	    uint32_t);
295 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
296 	    uint8_t *, uint16_t *);
297 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
298 	    uint16_t *, size_t);
299 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
300 	    const uint8_t *nvm_channels, size_t nchan);
301 void	iwm_setup_ht_rates(struct iwm_softc *);
302 void	iwm_htprot_task(void *);
303 void	iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
304 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
305 	    uint8_t);
306 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
307 	    uint8_t);
308 void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
309 	    uint16_t, int);
310 #ifdef notyet
311 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
312 	    uint8_t);
313 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
314 	    uint8_t);
315 #endif
316 void	iwm_ba_task(void *);
317 
318 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
319 	    const uint16_t *, const uint16_t *,
320 	    const uint16_t *, const uint16_t *,
321 	    const uint16_t *);
322 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
323 	    const uint16_t *, const uint16_t *);
324 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
325 int	iwm_nvm_init(struct iwm_softc *);
326 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
327 	    uint32_t);
328 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
329 	    uint32_t);
330 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
331 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
332 	    int , int *);
333 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
334 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
335 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
336 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
337 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
338 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
339 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
340 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
341 int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
342 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
343 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
344 	    struct iwm_rx_data *);
345 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
346 void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
347 	    struct iwm_rx_data *);
348 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
349 	    struct iwm_node *);
350 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
351 	    struct iwm_rx_data *);
352 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
353 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
354 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
355 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
356 	    struct ieee80211_channel *, uint8_t, uint8_t);
357 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
358 	    uint8_t, uint32_t, uint32_t);
359 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
360 int	iwm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t, uint16_t,
361 	    const void *);
362 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
363 	    uint32_t *);
364 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint8_t, uint16_t,
365 	    const void *, uint32_t *);
366 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
367 void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
368 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
369 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
370 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
371 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
372 void	iwm_led_enable(struct iwm_softc *);
373 void	iwm_led_disable(struct iwm_softc *);
374 int	iwm_led_is_enabled(struct iwm_softc *);
375 void	iwm_led_blink_timeout(void *);
376 void	iwm_led_blink_start(struct iwm_softc *);
377 void	iwm_led_blink_stop(struct iwm_softc *);
378 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
379 	    struct iwm_beacon_filter_cmd *);
380 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
381 	    struct iwm_beacon_filter_cmd *);
382 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
383 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
384 	    struct iwm_mac_power_cmd *);
385 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
386 int	iwm_power_update_device(struct iwm_softc *);
387 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
388 int	iwm_disable_beacon_filter(struct iwm_softc *);
389 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
390 int	iwm_add_aux_sta(struct iwm_softc *);
391 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
392 uint32_t iwm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
393 uint32_t iwm_scan_suspend_time(struct iwm_softc *, int);
394 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
395 uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
396 uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
397 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
398 	    struct iwm_scan_channel_cfg_lmac *, int);
399 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
400 int	iwm_lmac_scan(struct iwm_softc *);
401 int	iwm_config_umac_scan(struct iwm_softc *);
402 int	iwm_umac_scan(struct iwm_softc *);
403 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
404 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
405 	    struct iwm_mac_ctx_cmd *, uint32_t);
406 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
407 	    struct iwm_mac_data_sta *, int);
408 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
409 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
410 int	iwm_auth(struct iwm_softc *);
411 int	iwm_assoc(struct iwm_softc *);
412 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
413 void	iwm_calib_timeout(void *);
414 void	iwm_setrates(struct iwm_node *);
415 int	iwm_media_change(struct ifnet *);
416 void	iwm_newstate_task(void *);
417 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
418 void	iwm_endscan_cb(void *);
419 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
420 	    struct ieee80211_node *);
421 int	iwm_sf_config(struct iwm_softc *, int);
422 int	iwm_send_bt_init_conf(struct iwm_softc *);
423 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
424 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
425 int	iwm_init_hw(struct iwm_softc *);
426 int	iwm_init(struct ifnet *);
427 void	iwm_start(struct ifnet *);
428 void	iwm_stop(struct ifnet *, int);
429 void	iwm_watchdog(struct ifnet *);
430 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
431 #ifdef IWM_DEBUG
432 const char *iwm_desc_lookup(uint32_t);
433 void	iwm_nic_error(struct iwm_softc *);
434 void	iwm_nic_umac_error(struct iwm_softc *);
435 #endif
436 void	iwm_notif_intr(struct iwm_softc *);
437 int	iwm_intr(void *);
438 int	iwm_match(struct device *, void *, void *);
439 int	iwm_preinit(struct iwm_softc *);
440 void	iwm_attach_hook(struct device *);
441 void	iwm_attach(struct device *, struct device *, void *);
442 void	iwm_init_task(void *);
443 int	iwm_activate(struct device *, int);
444 void	iwm_wakeup(struct iwm_softc *);
445 
446 #if NBPFILTER > 0
447 void	iwm_radiotap_attach(struct iwm_softc *);
448 #endif
449 
450 int
451 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
452 {
453 	struct iwm_fw_cscheme_list *l = (void *)data;
454 
455 	if (dlen < sizeof(*l) ||
456 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
457 		return EINVAL;
458 
459 	/* we don't actually store anything for now, always use s/w crypto */
460 
461 	return 0;
462 }
463 
464 int
465 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
466     uint8_t *data, size_t dlen)
467 {
468 	struct iwm_fw_sects *fws;
469 	struct iwm_fw_onesect *fwone;
470 
471 	if (type >= IWM_UCODE_TYPE_MAX)
472 		return EINVAL;
473 	if (dlen < sizeof(uint32_t))
474 		return EINVAL;
475 
476 	fws = &sc->sc_fw.fw_sects[type];
477 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
478 		return EINVAL;
479 
480 	fwone = &fws->fw_sect[fws->fw_count];
481 
482 	/* first 32bit are device load offset */
483 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
484 
485 	/* rest is data */
486 	fwone->fws_data = data + sizeof(uint32_t);
487 	fwone->fws_len = dlen - sizeof(uint32_t);
488 
489 	fws->fw_count++;
490 	fws->fw_totlen += fwone->fws_len;
491 
492 	return 0;
493 }
494 
495 struct iwm_tlv_calib_data {
496 	uint32_t ucode_type;
497 	struct iwm_tlv_calib_ctrl calib;
498 } __packed;
499 
500 int
501 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
502 {
503 	const struct iwm_tlv_calib_data *def_calib = data;
504 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
505 
506 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
507 		return EINVAL;
508 
509 	sc->sc_default_calib[ucode_type].flow_trigger =
510 	    def_calib->calib.flow_trigger;
511 	sc->sc_default_calib[ucode_type].event_trigger =
512 	    def_calib->calib.event_trigger;
513 
514 	return 0;
515 }
516 
517 void
518 iwm_fw_info_free(struct iwm_fw_info *fw)
519 {
520 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
521 	fw->fw_rawdata = NULL;
522 	fw->fw_rawsize = 0;
523 	/* don't touch fw->fw_status */
524 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
525 }
526 
527 int
528 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
529 {
530 	struct iwm_fw_info *fw = &sc->sc_fw;
531 	struct iwm_tlv_ucode_header *uhdr;
532 	struct iwm_ucode_tlv tlv;
533 	uint32_t tlv_type;
534 	uint8_t *data;
535 	int err;
536 	size_t len;
537 
538 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
539 	    ucode_type != IWM_UCODE_TYPE_INIT)
540 		return 0;
541 
542 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
543 		tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
544 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
545 
546 	if (fw->fw_rawdata != NULL)
547 		iwm_fw_info_free(fw);
548 
549 	err = loadfirmware(sc->sc_fwname,
550 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
551 	if (err) {
552 		printf("%s: could not read firmware %s (error %d)\n",
553 		    DEVNAME(sc), sc->sc_fwname, err);
554 		goto out;
555 	}
556 
557 	sc->sc_capaflags = 0;
558 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
559 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
560 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
561 
562 	uhdr = (void *)fw->fw_rawdata;
563 	if (*(uint32_t *)fw->fw_rawdata != 0
564 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
565 		printf("%s: invalid firmware %s\n",
566 		    DEVNAME(sc), sc->sc_fwname);
567 		err = EINVAL;
568 		goto out;
569 	}
570 
571 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
572 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
573 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
574 	    IWM_UCODE_API(le32toh(uhdr->ver)));
575 	data = uhdr->data;
576 	len = fw->fw_rawsize - sizeof(*uhdr);
577 
578 	while (len >= sizeof(tlv)) {
579 		size_t tlv_len;
580 		void *tlv_data;
581 
582 		memcpy(&tlv, data, sizeof(tlv));
583 		tlv_len = le32toh(tlv.length);
584 		tlv_type = le32toh(tlv.type);
585 
586 		len -= sizeof(tlv);
587 		data += sizeof(tlv);
588 		tlv_data = data;
589 
590 		if (len < tlv_len) {
591 			printf("%s: firmware too short: %zu bytes\n",
592 			    DEVNAME(sc), len);
593 			err = EINVAL;
594 			goto parse_out;
595 		}
596 
597 		switch (tlv_type) {
598 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
599 			if (tlv_len < sizeof(uint32_t)) {
600 				err = EINVAL;
601 				goto parse_out;
602 			}
603 			sc->sc_capa_max_probe_len
604 			    = le32toh(*(uint32_t *)tlv_data);
605 			if (sc->sc_capa_max_probe_len >
606 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
607 				err = EINVAL;
608 				goto parse_out;
609 			}
610 			break;
611 		case IWM_UCODE_TLV_PAN:
612 			if (tlv_len) {
613 				err = EINVAL;
614 				goto parse_out;
615 			}
616 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
617 			break;
618 		case IWM_UCODE_TLV_FLAGS:
619 			if (tlv_len < sizeof(uint32_t)) {
620 				err = EINVAL;
621 				goto parse_out;
622 			}
623 			/*
624 			 * Apparently there can be many flags, but Linux driver
625 			 * parses only the first one, and so do we.
626 			 *
627 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
628 			 * Intentional or a bug?  Observations from
629 			 * current firmware file:
630 			 *  1) TLV_PAN is parsed first
631 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
632 			 * ==> this resets TLV_PAN to itself... hnnnk
633 			 */
634 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
635 			break;
636 		case IWM_UCODE_TLV_CSCHEME:
637 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
638 			if (err)
639 				goto parse_out;
640 			break;
641 		case IWM_UCODE_TLV_NUM_OF_CPU: {
642 			uint32_t num_cpu;
643 			if (tlv_len != sizeof(uint32_t)) {
644 				err = EINVAL;
645 				goto parse_out;
646 			}
647 			num_cpu = le32toh(*(uint32_t *)tlv_data);
648 			if (num_cpu < 1 || num_cpu > 2) {
649 				err = EINVAL;
650 				goto parse_out;
651 			}
652 			break;
653 		}
654 		case IWM_UCODE_TLV_SEC_RT:
655 			err = iwm_firmware_store_section(sc,
656 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
657 			if (err)
658 				goto parse_out;
659 			break;
660 		case IWM_UCODE_TLV_SEC_INIT:
661 			err = iwm_firmware_store_section(sc,
662 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
663 			if (err)
664 				goto parse_out;
665 			break;
666 		case IWM_UCODE_TLV_SEC_WOWLAN:
667 			err = iwm_firmware_store_section(sc,
668 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
669 			if (err)
670 				goto parse_out;
671 			break;
672 		case IWM_UCODE_TLV_DEF_CALIB:
673 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
674 				err = EINVAL;
675 				goto parse_out;
676 			}
677 			err = iwm_set_default_calib(sc, tlv_data);
678 			if (err)
679 				goto parse_out;
680 			break;
681 		case IWM_UCODE_TLV_PHY_SKU:
682 			if (tlv_len != sizeof(uint32_t)) {
683 				err = EINVAL;
684 				goto parse_out;
685 			}
686 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
687 			break;
688 
689 		case IWM_UCODE_TLV_API_CHANGES_SET: {
690 			struct iwm_ucode_api *api;
691 			if (tlv_len != sizeof(*api)) {
692 				err = EINVAL;
693 				goto parse_out;
694 			}
695 			api = (struct iwm_ucode_api *)tlv_data;
696 			/* Flags may exceed 32 bits in future firmware. */
697 			if (le32toh(api->api_index) > 0) {
698 				goto parse_out;
699 			}
700 			sc->sc_ucode_api = le32toh(api->api_flags);
701 			break;
702 		}
703 
704 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
705 			struct iwm_ucode_capa *capa;
706 			int idx, i;
707 			if (tlv_len != sizeof(*capa)) {
708 				err = EINVAL;
709 				goto parse_out;
710 			}
711 			capa = (struct iwm_ucode_capa *)tlv_data;
712 			idx = le32toh(capa->api_index);
713 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
714 				goto parse_out;
715 			}
716 			for (i = 0; i < 32; i++) {
717 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
718 					continue;
719 				setbit(sc->sc_enabled_capa, i + (32 * idx));
720 			}
721 			break;
722 		}
723 
724 		case 48: /* undocumented TLV */
725 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
726 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
727 			/* ignore, not used by current driver */
728 			break;
729 
730 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
731 			err = iwm_firmware_store_section(sc,
732 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
733 			    tlv_len);
734 			if (err)
735 				goto parse_out;
736 			break;
737 
738 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
739 			if (tlv_len != sizeof(uint32_t)) {
740 				err = EINVAL;
741 				goto parse_out;
742 			}
743 			sc->sc_capa_n_scan_channels =
744 			  le32toh(*(uint32_t *)tlv_data);
745 			break;
746 
747 		case IWM_UCODE_TLV_FW_VERSION:
748 			if (tlv_len != sizeof(uint32_t) * 3) {
749 				err = EINVAL;
750 				goto parse_out;
751 			}
752 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
753 			    "%d.%d.%d",
754 			    le32toh(((uint32_t *)tlv_data)[0]),
755 			    le32toh(((uint32_t *)tlv_data)[1]),
756 			    le32toh(((uint32_t *)tlv_data)[2]));
757 			break;
758 
759 		default:
760 			err = EINVAL;
761 			goto parse_out;
762 		}
763 
764 		len -= roundup(tlv_len, 4);
765 		data += roundup(tlv_len, 4);
766 	}
767 
768 	KASSERT(err == 0);
769 
770  parse_out:
771 	if (err) {
772 		printf("%s: firmware parse error %d, "
773 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
774 	}
775 
776 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
777 		printf("%s: device uses unsupported power ops\n", DEVNAME(sc));
778 		err = ENOTSUP;
779 	}
780 
781  out:
782 	if (err) {
783 		fw->fw_status = IWM_FW_STATUS_NONE;
784 		if (fw->fw_rawdata != NULL)
785 			iwm_fw_info_free(fw);
786 	} else
787 		fw->fw_status = IWM_FW_STATUS_DONE;
788 	wakeup(&sc->sc_fw);
789 
790 	return err;
791 }
792 
793 uint32_t
794 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
795 {
796 	IWM_WRITE(sc,
797 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
798 	IWM_BARRIER_READ_WRITE(sc);
799 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
800 }
801 
802 void
803 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
804 {
805 	IWM_WRITE(sc,
806 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
807 	IWM_BARRIER_WRITE(sc);
808 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
809 }
810 
811 int
812 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
813 {
814 	int offs, err = 0;
815 	uint32_t *vals = buf;
816 
817 	if (iwm_nic_lock(sc)) {
818 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
819 		for (offs = 0; offs < dwords; offs++)
820 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
821 		iwm_nic_unlock(sc);
822 	} else {
823 		err = EBUSY;
824 	}
825 	return err;
826 }
827 
828 int
829 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
830 {
831 	int offs;
832 	const uint32_t *vals = buf;
833 
834 	if (iwm_nic_lock(sc)) {
835 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
836 		/* WADDR auto-increments */
837 		for (offs = 0; offs < dwords; offs++) {
838 			uint32_t val = vals ? vals[offs] : 0;
839 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
840 		}
841 		iwm_nic_unlock(sc);
842 	} else {
843 		return EBUSY;
844 	}
845 	return 0;
846 }
847 
848 int
849 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
850 {
851 	return iwm_write_mem(sc, addr, &val, 1);
852 }
853 
854 int
855 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
856     int timo)
857 {
858 	for (;;) {
859 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
860 			return 1;
861 		}
862 		if (timo < 10) {
863 			return 0;
864 		}
865 		timo -= 10;
866 		DELAY(10);
867 	}
868 }
869 
870 int
871 iwm_nic_lock(struct iwm_softc *sc)
872 {
873 	int rv = 0;
874 
875 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
876 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
877 
878 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
879 		DELAY(2);
880 
881 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
882 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
883 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
884 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
885 	    	rv = 1;
886 	} else {
887 		printf("%s: device timeout\n", DEVNAME(sc));
888 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
889 	}
890 
891 	return rv;
892 }
893 
894 void
895 iwm_nic_unlock(struct iwm_softc *sc)
896 {
897 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
898 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
899 }
900 
901 void
902 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
903     uint32_t mask)
904 {
905 	uint32_t val;
906 
907 	/* XXX: no error path? */
908 	if (iwm_nic_lock(sc)) {
909 		val = iwm_read_prph(sc, reg) & mask;
910 		val |= bits;
911 		iwm_write_prph(sc, reg, val);
912 		iwm_nic_unlock(sc);
913 	}
914 }
915 
916 void
917 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
918 {
919 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
920 }
921 
922 void
923 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
924 {
925 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
926 }
927 
928 int
929 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
930     bus_size_t size, bus_size_t alignment)
931 {
932 	int nsegs, err;
933 	caddr_t va;
934 
935 	dma->tag = tag;
936 	dma->size = size;
937 
938 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
939 	    &dma->map);
940 	if (err)
941 		goto fail;
942 
943 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
944 	    BUS_DMA_NOWAIT);
945 	if (err)
946 		goto fail;
947 
948 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
949 	    BUS_DMA_NOWAIT);
950 	if (err)
951 		goto fail;
952 	dma->vaddr = va;
953 
954 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
955 	    BUS_DMA_NOWAIT);
956 	if (err)
957 		goto fail;
958 
959 	memset(dma->vaddr, 0, size);
960 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
961 	dma->paddr = dma->map->dm_segs[0].ds_addr;
962 
963 	return 0;
964 
965 fail:	iwm_dma_contig_free(dma);
966 	return err;
967 }
968 
969 void
970 iwm_dma_contig_free(struct iwm_dma_info *dma)
971 {
972 	if (dma->map != NULL) {
973 		if (dma->vaddr != NULL) {
974 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
975 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
976 			bus_dmamap_unload(dma->tag, dma->map);
977 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
978 			bus_dmamem_free(dma->tag, &dma->seg, 1);
979 			dma->vaddr = NULL;
980 		}
981 		bus_dmamap_destroy(dma->tag, dma->map);
982 		dma->map = NULL;
983 	}
984 }
985 
986 int
987 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
988 {
989 	bus_size_t size;
990 	int i, err;
991 
992 	ring->cur = 0;
993 
994 	/* Allocate RX descriptors (256-byte aligned). */
995 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
996 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
997 	if (err) {
998 		printf("%s: could not allocate RX ring DMA memory\n",
999 		    DEVNAME(sc));
1000 		goto fail;
1001 	}
1002 	ring->desc = ring->desc_dma.vaddr;
1003 
1004 	/* Allocate RX status area (16-byte aligned). */
1005 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1006 	    sizeof(*ring->stat), 16);
1007 	if (err) {
1008 		printf("%s: could not allocate RX status DMA memory\n",
1009 		    DEVNAME(sc));
1010 		goto fail;
1011 	}
1012 	ring->stat = ring->stat_dma.vaddr;
1013 
1014 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1015 		struct iwm_rx_data *data = &ring->data[i];
1016 
1017 		memset(data, 0, sizeof(*data));
1018 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1019 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1020 		    &data->map);
1021 		if (err) {
1022 			printf("%s: could not create RX buf DMA map\n",
1023 			    DEVNAME(sc));
1024 			goto fail;
1025 		}
1026 
1027 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1028 		if (err)
1029 			goto fail;
1030 	}
1031 	return 0;
1032 
1033 fail:	iwm_free_rx_ring(sc, ring);
1034 	return err;
1035 }
1036 
1037 void
1038 iwm_disable_rx_dma(struct iwm_softc *sc)
1039 {
1040 	int ntries;
1041 
1042 	if (iwm_nic_lock(sc)) {
1043 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1044 		for (ntries = 0; ntries < 1000; ntries++) {
1045 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1046 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1047 				break;
1048 			DELAY(10);
1049 		}
1050 		iwm_nic_unlock(sc);
1051 	}
1052 }
1053 
1054 void
1055 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1056 {
1057 	ring->cur = 0;
1058 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1059 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1060 	memset(ring->stat, 0, sizeof(*ring->stat));
1061 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1062 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1063 
1064 }
1065 
1066 void
1067 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1068 {
1069 	int i;
1070 
1071 	iwm_dma_contig_free(&ring->desc_dma);
1072 	iwm_dma_contig_free(&ring->stat_dma);
1073 
1074 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1075 		struct iwm_rx_data *data = &ring->data[i];
1076 
1077 		if (data->m != NULL) {
1078 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1079 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1080 			bus_dmamap_unload(sc->sc_dmat, data->map);
1081 			m_freem(data->m);
1082 			data->m = NULL;
1083 		}
1084 		if (data->map != NULL)
1085 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1086 	}
1087 }
1088 
1089 int
1090 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1091 {
1092 	bus_addr_t paddr;
1093 	bus_size_t size;
1094 	int i, err;
1095 
1096 	ring->qid = qid;
1097 	ring->queued = 0;
1098 	ring->cur = 0;
1099 
1100 	/* Allocate TX descriptors (256-byte aligned). */
1101 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1102 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1103 	if (err) {
1104 		printf("%s: could not allocate TX ring DMA memory\n",
1105 		    DEVNAME(sc));
1106 		goto fail;
1107 	}
1108 	ring->desc = ring->desc_dma.vaddr;
1109 
1110 	/*
1111 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1112 	 * to allocate commands space for other rings.
1113 	 */
1114 	if (qid > IWM_CMD_QUEUE)
1115 		return 0;
1116 
1117 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1118 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1119 	if (err) {
1120 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1121 		goto fail;
1122 	}
1123 	ring->cmd = ring->cmd_dma.vaddr;
1124 
1125 	paddr = ring->cmd_dma.paddr;
1126 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127 		struct iwm_tx_data *data = &ring->data[i];
1128 		size_t mapsize;
1129 
1130 		data->cmd_paddr = paddr;
1131 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1132 		    + offsetof(struct iwm_tx_cmd, scratch);
1133 		paddr += sizeof(struct iwm_device_cmd);
1134 
1135 		/* FW commands may require more mapped space than packets. */
1136 		if (qid == IWM_CMD_QUEUE)
1137 			mapsize = (sizeof(struct iwm_cmd_header) +
1138 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1139 		else
1140 			mapsize = MCLBYTES;
1141 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1142 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1143 		    &data->map);
1144 		if (err) {
1145 			printf("%s: could not create TX buf DMA map\n",
1146 			    DEVNAME(sc));
1147 			goto fail;
1148 		}
1149 	}
1150 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1151 	return 0;
1152 
1153 fail:	iwm_free_tx_ring(sc, ring);
1154 	return err;
1155 }
1156 
1157 void
1158 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1159 {
1160 	int i;
1161 
1162 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1163 		struct iwm_tx_data *data = &ring->data[i];
1164 
1165 		if (data->m != NULL) {
1166 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1167 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1168 			bus_dmamap_unload(sc->sc_dmat, data->map);
1169 			m_freem(data->m);
1170 			data->m = NULL;
1171 		}
1172 	}
1173 	/* Clear TX descriptors. */
1174 	memset(ring->desc, 0, ring->desc_dma.size);
1175 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1176 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1177 	sc->qfullmsk &= ~(1 << ring->qid);
1178 	ring->queued = 0;
1179 	ring->cur = 0;
1180 }
1181 
1182 void
1183 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1184 {
1185 	int i;
1186 
1187 	iwm_dma_contig_free(&ring->desc_dma);
1188 	iwm_dma_contig_free(&ring->cmd_dma);
1189 
1190 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1191 		struct iwm_tx_data *data = &ring->data[i];
1192 
1193 		if (data->m != NULL) {
1194 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1195 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1196 			bus_dmamap_unload(sc->sc_dmat, data->map);
1197 			m_freem(data->m);
1198 			data->m = NULL;
1199 		}
1200 		if (data->map != NULL)
1201 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1202 	}
1203 }
1204 
1205 void
1206 iwm_enable_rfkill_int(struct iwm_softc *sc)
1207 {
1208 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1209 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1210 }
1211 
1212 int
1213 iwm_check_rfkill(struct iwm_softc *sc)
1214 {
1215 	uint32_t v;
1216 	int s;
1217 	int rv;
1218 
1219 	s = splnet();
1220 
1221 	/*
1222 	 * "documentation" is not really helpful here:
1223 	 *  27:	HW_RF_KILL_SW
1224 	 *	Indicates state of (platform's) hardware RF-Kill switch
1225 	 *
1226 	 * But apparently when it's off, it's on ...
1227 	 */
1228 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1229 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1230 	if (rv) {
1231 		sc->sc_flags |= IWM_FLAG_RFKILL;
1232 	} else {
1233 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1234 	}
1235 
1236 	splx(s);
1237 	return rv;
1238 }
1239 
1240 void
1241 iwm_enable_interrupts(struct iwm_softc *sc)
1242 {
1243 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1244 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1245 }
1246 
1247 void
1248 iwm_restore_interrupts(struct iwm_softc *sc)
1249 {
1250 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1251 }
1252 
1253 void
1254 iwm_disable_interrupts(struct iwm_softc *sc)
1255 {
1256 	int s = splnet();
1257 
1258 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1259 
1260 	/* acknowledge all interrupts */
1261 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1262 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1263 
1264 	splx(s);
1265 }
1266 
1267 void
1268 iwm_ict_reset(struct iwm_softc *sc)
1269 {
1270 	iwm_disable_interrupts(sc);
1271 
1272 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1273 	sc->ict_cur = 0;
1274 
1275 	/* Set physical address of ICT (4KB aligned). */
1276 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1277 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1278 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1279 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1280 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1281 
1282 	/* Switch to ICT interrupt mode in driver. */
1283 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1284 
1285 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1286 	iwm_enable_interrupts(sc);
1287 }
1288 
1289 #define IWM_HW_READY_TIMEOUT 50
1290 int
1291 iwm_set_hw_ready(struct iwm_softc *sc)
1292 {
1293 	int ready;
1294 
1295 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1296 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1297 
1298 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1299 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1300 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1301 	    IWM_HW_READY_TIMEOUT);
1302 	if (ready)
1303 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1304 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1305 
1306 	return ready;
1307 }
1308 #undef IWM_HW_READY_TIMEOUT
1309 
1310 int
1311 iwm_prepare_card_hw(struct iwm_softc *sc)
1312 {
1313 	int t = 0;
1314 
1315 	if (iwm_set_hw_ready(sc))
1316 		return 0;
1317 
1318 	DELAY(100);
1319 
1320 	/* If HW is not ready, prepare the conditions to check again */
1321 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1322 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1323 
1324 	do {
1325 		if (iwm_set_hw_ready(sc))
1326 			return 0;
1327 		DELAY(200);
1328 		t += 200;
1329 	} while (t < 150000);
1330 
1331 	return ETIMEDOUT;
1332 }
1333 
1334 void
1335 iwm_apm_config(struct iwm_softc *sc)
1336 {
1337 	pcireg_t reg;
1338 
1339 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1340 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1341 	if (reg & PCI_PCIE_LCSR_ASPM_L1) {
1342 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1343 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1344 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1345 	} else {
1346 		/* ... and "Enabling" here */
1347 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1348 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1349 	}
1350 }
1351 
1352 /*
1353  * Start up NIC's basic functionality after it has been reset
1354  * e.g. after platform boot or shutdown.
1355  * NOTE:  This does not load uCode nor start the embedded processor
1356  */
1357 int
1358 iwm_apm_init(struct iwm_softc *sc)
1359 {
1360 	int err = 0;
1361 
1362 	/* Disable L0S exit timer (platform NMI workaround) */
1363 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1364 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1365 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1366 
1367 	/*
1368 	 * Disable L0s without affecting L1;
1369 	 *  don't wait for ICH L0s (ICH bug W/A)
1370 	 */
1371 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1372 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1373 
1374 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1375 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1376 
1377 	/*
1378 	 * Enable HAP INTA (interrupt from management bus) to
1379 	 * wake device's PCI Express link L1a -> L0s
1380 	 */
1381 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1382 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1383 
1384 	iwm_apm_config(sc);
1385 
1386 #if 0 /* not for 7k/8k */
1387 	/* Configure analog phase-lock-loop before activating to D0A */
1388 	if (trans->cfg->base_params->pll_cfg_val)
1389 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1390 		    trans->cfg->base_params->pll_cfg_val);
1391 #endif
1392 
1393 	/*
1394 	 * Set "initialization complete" bit to move adapter from
1395 	 * D0U* --> D0A* (powered-up active) state.
1396 	 */
1397 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1398 
1399 	/*
1400 	 * Wait for clock stabilization; once stabilized, access to
1401 	 * device-internal resources is supported, e.g. iwm_write_prph()
1402 	 * and accesses to uCode SRAM.
1403 	 */
1404 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1405 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1406 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1407 		printf("%s: timeout waiting for clock stabilization\n",
1408 		    DEVNAME(sc));
1409 		err = ETIMEDOUT;
1410 		goto out;
1411 	}
1412 
1413 	if (sc->host_interrupt_operation_mode) {
1414 		/*
1415 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1416 		 * only check host_interrupt_operation_mode even if this is
1417 		 * not related to host_interrupt_operation_mode.
1418 		 *
1419 		 * Enable the oscillator to count wake up time for L1 exit. This
1420 		 * consumes slightly more power (100uA) - but allows to be sure
1421 		 * that we wake up from L1 on time.
1422 		 *
1423 		 * This looks weird: read twice the same register, discard the
1424 		 * value, set a bit, and yet again, read that same register
1425 		 * just to discard the value. But that's the way the hardware
1426 		 * seems to like it.
1427 		 */
1428 		iwm_read_prph(sc, IWM_OSC_CLK);
1429 		iwm_read_prph(sc, IWM_OSC_CLK);
1430 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1431 		iwm_read_prph(sc, IWM_OSC_CLK);
1432 		iwm_read_prph(sc, IWM_OSC_CLK);
1433 	}
1434 
1435 	/*
1436 	 * Enable DMA clock and wait for it to stabilize.
1437 	 *
1438 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1439 	 * do not disable clocks.  This preserves any hardware bits already
1440 	 * set by default in "CLK_CTRL_REG" after reset.
1441 	 */
1442 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1443 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1444 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1445 		DELAY(20);
1446 
1447 		/* Disable L1-Active */
1448 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1449 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1450 
1451 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1452 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1453 		    IWM_APMG_RTC_INT_STT_RFKILL);
1454 	}
1455  out:
1456 	if (err)
1457 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1458 	return err;
1459 }
1460 
1461 void
1462 iwm_apm_stop(struct iwm_softc *sc)
1463 {
1464 	/* stop device's busmaster DMA activity */
1465 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1466 
1467 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1468 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1469 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1470 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1471 }
1472 
1473 int
1474 iwm_start_hw(struct iwm_softc *sc)
1475 {
1476 	int err;
1477 
1478 	err = iwm_prepare_card_hw(sc);
1479 	if (err)
1480 		return err;
1481 
1482 	/* Reset the entire device */
1483 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1484 	DELAY(10);
1485 
1486 	err = iwm_apm_init(sc);
1487 	if (err)
1488 		return err;
1489 
1490 	iwm_enable_rfkill_int(sc);
1491 	iwm_check_rfkill(sc);
1492 
1493 	return 0;
1494 }
1495 
1496 
1497 void
1498 iwm_stop_device(struct iwm_softc *sc)
1499 {
1500 	int chnl, ntries;
1501 	int qid;
1502 
1503 	iwm_disable_interrupts(sc);
1504 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1505 
1506 	/* Deactivate TX scheduler. */
1507 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1508 
1509 	/* Stop all DMA channels. */
1510 	if (iwm_nic_lock(sc)) {
1511 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1512 			IWM_WRITE(sc,
1513 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1514 			for (ntries = 0; ntries < 200; ntries++) {
1515 				uint32_t r;
1516 
1517 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1518 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1519 				    chnl))
1520 					break;
1521 				DELAY(20);
1522 			}
1523 		}
1524 		iwm_nic_unlock(sc);
1525 	}
1526 	iwm_disable_rx_dma(sc);
1527 
1528 	iwm_reset_rx_ring(sc, &sc->rxq);
1529 
1530 	for (qid = 0; qid < nitems(sc->txq); qid++)
1531 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1532 
1533 	/*
1534 	 * Power-down device's busmaster DMA clocks
1535 	 */
1536 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1537 	DELAY(5);
1538 
1539 	/* Make sure (redundant) we've released our request to stay awake */
1540 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1541 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1542 
1543 	/* Stop the device, and put it in low power state */
1544 	iwm_apm_stop(sc);
1545 
1546 	/*
1547 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1548 	 * Clear the interrupt again.
1549 	 */
1550 	iwm_disable_interrupts(sc);
1551 
1552 	/* Reset the on-board processor. */
1553 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1554 
1555 	/* Even though we stop the HW we still want the RF kill interrupt. */
1556 	iwm_enable_rfkill_int(sc);
1557 	iwm_check_rfkill(sc);
1558 }
1559 
1560 void
1561 iwm_nic_config(struct iwm_softc *sc)
1562 {
1563 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1564 	uint32_t reg_val = 0;
1565 
1566 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1567 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1568 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1569 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1570 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1571 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1572 
1573 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1574 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1575 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1576 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1577 
1578 	/* radio configuration */
1579 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1580 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1581 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1582 
1583 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1584 
1585 	/*
1586 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1587 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1588 	 * to lose ownership and not being able to obtain it back.
1589 	 */
1590 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1591 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1592 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1593 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1594 }
1595 
1596 int
1597 iwm_nic_rx_init(struct iwm_softc *sc)
1598 {
1599 	if (!iwm_nic_lock(sc))
1600 		return EBUSY;
1601 
1602 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1603 
1604 	iwm_disable_rx_dma(sc);
1605 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1606 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1607 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1608 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1609 
1610 	/* Set physical address of RX ring (256-byte aligned). */
1611 	IWM_WRITE(sc,
1612 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1613 
1614 	/* Set physical address of RX status (16-byte aligned). */
1615 	IWM_WRITE(sc,
1616 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1617 
1618 	/* Enable RX. */
1619 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1620 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1621 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1622 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1623 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1624 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1625 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1626 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1627 
1628 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1629 
1630 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1631 	if (sc->host_interrupt_operation_mode)
1632 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1633 
1634 	/*
1635 	 * This value should initially be 0 (before preparing any RBs),
1636 	 * and should be 8 after preparing the first 8 RBs (for example).
1637 	 */
1638 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1639 
1640 	iwm_nic_unlock(sc);
1641 
1642 	return 0;
1643 }
1644 
1645 int
1646 iwm_nic_tx_init(struct iwm_softc *sc)
1647 {
1648 	int qid;
1649 
1650 	if (!iwm_nic_lock(sc))
1651 		return EBUSY;
1652 
1653 	/* Deactivate TX scheduler. */
1654 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1655 
1656 	/* Set physical address of "keep warm" page (16-byte aligned). */
1657 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1658 
1659 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1660 		struct iwm_tx_ring *txq = &sc->txq[qid];
1661 
1662 		/* Set physical address of TX ring (256-byte aligned). */
1663 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1664 		    txq->desc_dma.paddr >> 8);
1665 	}
1666 
1667 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1668 
1669 	iwm_nic_unlock(sc);
1670 
1671 	return 0;
1672 }
1673 
1674 int
1675 iwm_nic_init(struct iwm_softc *sc)
1676 {
1677 	int err;
1678 
1679 	iwm_apm_init(sc);
1680 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1681 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1682 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1683 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1684 
1685 	iwm_nic_config(sc);
1686 
1687 	err = iwm_nic_rx_init(sc);
1688 	if (err)
1689 		return err;
1690 
1691 	err = iwm_nic_tx_init(sc);
1692 	if (err)
1693 		return err;
1694 
1695 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1696 
1697 	return 0;
1698 }
1699 
1700 const uint8_t iwm_ac_to_tx_fifo[] = {
1701 	IWM_TX_FIFO_VO,
1702 	IWM_TX_FIFO_VI,
1703 	IWM_TX_FIFO_BE,
1704 	IWM_TX_FIFO_BK,
1705 };
1706 
1707 int
1708 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1709 {
1710 	if (!iwm_nic_lock(sc))
1711 		return EBUSY;
1712 
1713 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1714 
1715 	if (qid == IWM_CMD_QUEUE) {
1716 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1717 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1718 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1719 
1720 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1721 
1722 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1723 
1724 		iwm_write_mem32(sc,
1725 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1726 
1727 		/* Set scheduler window size and frame limit. */
1728 		iwm_write_mem32(sc,
1729 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1730 		    sizeof(uint32_t),
1731 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1732 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1733 		    ((IWM_FRAME_LIMIT
1734 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1735 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1736 
1737 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1738 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1739 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1740 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1741 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1742 	} else {
1743 		struct iwm_scd_txq_cfg_cmd cmd;
1744 		int err;
1745 
1746 		iwm_nic_unlock(sc);
1747 
1748 		memset(&cmd, 0, sizeof(cmd));
1749 		cmd.scd_queue = qid;
1750 		cmd.enable = 1;
1751 		cmd.sta_id = sta_id;
1752 		cmd.tx_fifo = fifo;
1753 		cmd.aggregate = 0;
1754 		cmd.window = IWM_FRAME_LIMIT;
1755 
1756 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
1757 		    sizeof(cmd), &cmd);
1758 		if (err)
1759 			return err;
1760 
1761 		if (!iwm_nic_lock(sc))
1762 			return EBUSY;
1763 	}
1764 
1765 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1766 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1767 
1768 	iwm_nic_unlock(sc);
1769 
1770 	return 0;
1771 }
1772 
1773 int
1774 iwm_post_alive(struct iwm_softc *sc)
1775 {
1776 	int nwords;
1777 	int err, chnl;
1778 	uint32_t base;
1779 
1780 	if (!iwm_nic_lock(sc))
1781 		return EBUSY;
1782 
1783 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1784 
1785 	iwm_ict_reset(sc);
1786 
1787 	/* Clear TX scheduler state in SRAM. */
1788 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1789 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1790 	    / sizeof(uint32_t);
1791 	err = iwm_write_mem(sc,
1792 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1793 	    NULL, nwords);
1794 	if (err)
1795 		goto out;
1796 
1797 	/* Set physical address of TX scheduler rings (1KB aligned). */
1798 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1799 
1800 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1801 
1802 	iwm_nic_unlock(sc);
1803 
1804 	/* enable command channel */
1805 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1806 	if (err)
1807 		return err;
1808 
1809 	if (!iwm_nic_lock(sc))
1810 		return EBUSY;
1811 
1812 	/* Activate TX scheduler. */
1813 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1814 
1815 	/* Enable DMA channels. */
1816 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1817 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1818 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1819 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1820 	}
1821 
1822 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1823 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1824 
1825 	/* Enable L1-Active */
1826 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1827 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1828 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1829 
1830  out:
1831  	iwm_nic_unlock(sc);
1832 	return err;
1833 }
1834 
1835 struct iwm_phy_db_entry *
1836 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
1837 {
1838 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1839 
1840 	if (type >= IWM_PHY_DB_MAX)
1841 		return NULL;
1842 
1843 	switch (type) {
1844 	case IWM_PHY_DB_CFG:
1845 		return &phy_db->cfg;
1846 	case IWM_PHY_DB_CALIB_NCH:
1847 		return &phy_db->calib_nch;
1848 	case IWM_PHY_DB_CALIB_CHG_PAPD:
1849 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1850 			return NULL;
1851 		return &phy_db->calib_ch_group_papd[chg_id];
1852 	case IWM_PHY_DB_CALIB_CHG_TXP:
1853 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1854 			return NULL;
1855 		return &phy_db->calib_ch_group_txp[chg_id];
1856 	default:
1857 		return NULL;
1858 	}
1859 	return NULL;
1860 }
1861 
1862 int
1863 iwm_phy_db_set_section(struct iwm_softc *sc,
1864     struct iwm_calib_res_notif_phy_db *phy_db_notif)
1865 {
1866 	uint16_t type = le16toh(phy_db_notif->type);
1867 	uint16_t size  = le16toh(phy_db_notif->length);
1868 	struct iwm_phy_db_entry *entry;
1869 	uint16_t chg_id = 0;
1870 
1871 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
1872 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
1873 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
1874 
1875 	entry = iwm_phy_db_get_section(sc, type, chg_id);
1876 	if (!entry)
1877 		return EINVAL;
1878 
1879 	if (entry->data)
1880 		free(entry->data, M_DEVBUF, entry->size);
1881 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
1882 	if (!entry->data) {
1883 		entry->size = 0;
1884 		return ENOMEM;
1885 	}
1886 	memcpy(entry->data, phy_db_notif->data, size);
1887 	entry->size = size;
1888 
1889 	return 0;
1890 }
1891 
1892 int
1893 iwm_is_valid_channel(uint16_t ch_id)
1894 {
1895 	if (ch_id <= 14 ||
1896 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
1897 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
1898 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
1899 		return 1;
1900 	return 0;
1901 }
1902 
1903 uint8_t
1904 iwm_ch_id_to_ch_index(uint16_t ch_id)
1905 {
1906 	if (!iwm_is_valid_channel(ch_id))
1907 		return 0xff;
1908 
1909 	if (ch_id <= 14)
1910 		return ch_id - 1;
1911 	if (ch_id <= 64)
1912 		return (ch_id + 20) / 4;
1913 	if (ch_id <= 140)
1914 		return (ch_id - 12) / 4;
1915 	return (ch_id - 13) / 4;
1916 }
1917 
1918 
1919 uint16_t
1920 iwm_channel_id_to_papd(uint16_t ch_id)
1921 {
1922 	if (!iwm_is_valid_channel(ch_id))
1923 		return 0xff;
1924 
1925 	if (1 <= ch_id && ch_id <= 14)
1926 		return 0;
1927 	if (36 <= ch_id && ch_id <= 64)
1928 		return 1;
1929 	if (100 <= ch_id && ch_id <= 140)
1930 		return 2;
1931 	return 3;
1932 }
1933 
1934 uint16_t
1935 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
1936 {
1937 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1938 	struct iwm_phy_db_chg_txp *txp_chg;
1939 	int i;
1940 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
1941 
1942 	if (ch_index == 0xff)
1943 		return 0xff;
1944 
1945 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
1946 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
1947 		if (!txp_chg)
1948 			return 0xff;
1949 		/*
1950 		 * Looking for the first channel group the max channel
1951 		 * of which is higher than the requested channel.
1952 		 */
1953 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
1954 			return i;
1955 	}
1956 	return 0xff;
1957 }
1958 
1959 int
1960 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
1961     uint16_t *size, uint16_t ch_id)
1962 {
1963 	struct iwm_phy_db_entry *entry;
1964 	uint16_t ch_group_id = 0;
1965 
1966 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
1967 		ch_group_id = iwm_channel_id_to_papd(ch_id);
1968 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
1969 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
1970 
1971 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
1972 	if (!entry)
1973 		return EINVAL;
1974 
1975 	*data = entry->data;
1976 	*size = entry->size;
1977 
1978 	return 0;
1979 }
1980 
1981 int
1982 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
1983     void *data)
1984 {
1985 	struct iwm_phy_db_cmd phy_db_cmd;
1986 	struct iwm_host_cmd cmd = {
1987 		.id = IWM_PHY_DB_CMD,
1988 		.flags = IWM_CMD_ASYNC,
1989 	};
1990 
1991 	phy_db_cmd.type = le16toh(type);
1992 	phy_db_cmd.length = le16toh(length);
1993 
1994 	cmd.data[0] = &phy_db_cmd;
1995 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
1996 	cmd.data[1] = data;
1997 	cmd.len[1] = length;
1998 
1999 	return iwm_send_cmd(sc, &cmd);
2000 }
2001 
2002 int
2003 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2004     uint8_t max_ch_groups)
2005 {
2006 	uint16_t i;
2007 	int err;
2008 	struct iwm_phy_db_entry *entry;
2009 
2010 	for (i = 0; i < max_ch_groups; i++) {
2011 		entry = iwm_phy_db_get_section(sc, type, i);
2012 		if (!entry)
2013 			return EINVAL;
2014 
2015 		if (!entry->size)
2016 			continue;
2017 
2018 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2019 		if (err)
2020 			return err;
2021 
2022 		DELAY(1000);
2023 	}
2024 
2025 	return 0;
2026 }
2027 
2028 int
2029 iwm_send_phy_db_data(struct iwm_softc *sc)
2030 {
2031 	uint8_t *data = NULL;
2032 	uint16_t size = 0;
2033 	int err;
2034 
2035 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2036 	if (err)
2037 		return err;
2038 
2039 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2040 	if (err)
2041 		return err;
2042 
2043 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2044 	    &data, &size, 0);
2045 	if (err)
2046 		return err;
2047 
2048 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2049 	if (err)
2050 		return err;
2051 
2052 	err = iwm_phy_db_send_all_channel_groups(sc,
2053 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2054 	if (err)
2055 		return err;
2056 
2057 	err = iwm_phy_db_send_all_channel_groups(sc,
2058 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2059 	if (err)
2060 		return err;
2061 
2062 	return 0;
2063 }
2064 
2065 /*
2066  * For the high priority TE use a time event type that has similar priority to
2067  * the FW's action scan priority.
2068  */
2069 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2070 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2071 
2072 /* used to convert from time event API v2 to v1 */
2073 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2074 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2075 static inline uint16_t
2076 iwm_te_v2_get_notify(uint16_t policy)
2077 {
2078 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2079 }
2080 
2081 static inline uint16_t
2082 iwm_te_v2_get_dep_policy(uint16_t policy)
2083 {
2084 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2085 		IWM_TE_V2_PLACEMENT_POS;
2086 }
2087 
2088 static inline uint16_t
2089 iwm_te_v2_get_absence(uint16_t policy)
2090 {
2091 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2092 }
2093 
2094 void
2095 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2096     struct iwm_time_event_cmd_v1 *cmd_v1)
2097 {
2098 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2099 	cmd_v1->action = cmd_v2->action;
2100 	cmd_v1->id = cmd_v2->id;
2101 	cmd_v1->apply_time = cmd_v2->apply_time;
2102 	cmd_v1->max_delay = cmd_v2->max_delay;
2103 	cmd_v1->depends_on = cmd_v2->depends_on;
2104 	cmd_v1->interval = cmd_v2->interval;
2105 	cmd_v1->duration = cmd_v2->duration;
2106 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2107 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2108 	else
2109 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2110 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2111 	cmd_v1->interval_reciprocal = 0; /* unused */
2112 
2113 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2114 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2115 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2116 }
2117 
2118 int
2119 iwm_send_time_event_cmd(struct iwm_softc *sc,
2120     const struct iwm_time_event_cmd_v2 *cmd)
2121 {
2122 	struct iwm_time_event_cmd_v1 cmd_v1;
2123 
2124 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2125 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2126 		    0, sizeof(*cmd), cmd);
2127 
2128 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2129 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0,
2130 	    sizeof(cmd_v1), &cmd_v1);
2131 }
2132 
2133 void
2134 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2135     uint32_t duration, uint32_t max_delay)
2136 {
2137 	struct iwm_time_event_cmd_v2 time_cmd;
2138 
2139 	memset(&time_cmd, 0, sizeof(time_cmd));
2140 
2141 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2142 	time_cmd.id_and_color =
2143 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2144 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2145 
2146 	time_cmd.apply_time = htole32(0);
2147 
2148 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2149 	time_cmd.max_delay = htole32(max_delay);
2150 	/* TODO: why do we need to interval = bi if it is not periodic? */
2151 	time_cmd.interval = htole32(1);
2152 	time_cmd.duration = htole32(duration);
2153 	time_cmd.repeat = 1;
2154 	time_cmd.policy
2155 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2156 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2157 		IWM_T2_V2_START_IMMEDIATELY);
2158 
2159 	iwm_send_time_event_cmd(sc, &time_cmd);
2160 }
2161 
2162 /*
2163  * NVM read access and content parsing.  We do not support
2164  * external NVM or writing NVM.
2165  */
2166 
2167 /* list of NVM sections we are allowed/need to read */
2168 const int iwm_nvm_to_read[] = {
2169 	IWM_NVM_SECTION_TYPE_HW,
2170 	IWM_NVM_SECTION_TYPE_SW,
2171 	IWM_NVM_SECTION_TYPE_REGULATORY,
2172 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2173 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2174 	IWM_NVM_SECTION_TYPE_HW_8000,
2175 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2176 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2177 };
2178 
2179 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2180 #define IWM_MAX_NVM_SECTION_SIZE	8192
2181 
2182 #define IWM_NVM_WRITE_OPCODE 1
2183 #define IWM_NVM_READ_OPCODE 0
2184 
2185 int
2186 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2187     uint16_t length, uint8_t *data, uint16_t *len)
2188 {
2189 	offset = 0;
2190 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2191 		.offset = htole16(offset),
2192 		.length = htole16(length),
2193 		.type = htole16(section),
2194 		.op_code = IWM_NVM_READ_OPCODE,
2195 	};
2196 	struct iwm_nvm_access_resp *nvm_resp;
2197 	struct iwm_rx_packet *pkt;
2198 	struct iwm_host_cmd cmd = {
2199 		.id = IWM_NVM_ACCESS_CMD,
2200 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2201 		.data = { &nvm_access_cmd, },
2202 	};
2203 	int err, offset_read;
2204 	size_t bytes_read;
2205 	uint8_t *resp_data;
2206 
2207 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2208 
2209 	err = iwm_send_cmd(sc, &cmd);
2210 	if (err)
2211 		return err;
2212 
2213 	pkt = cmd.resp_pkt;
2214 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2215 		err = EIO;
2216 		goto exit;
2217 	}
2218 
2219 	/* Extract NVM response */
2220 	nvm_resp = (void *)pkt->data;
2221 
2222 	err = le16toh(nvm_resp->status);
2223 	bytes_read = le16toh(nvm_resp->length);
2224 	offset_read = le16toh(nvm_resp->offset);
2225 	resp_data = nvm_resp->data;
2226 	if (err) {
2227 		err = EINVAL;
2228 		goto exit;
2229 	}
2230 
2231 	if (offset_read != offset) {
2232 		err = EINVAL;
2233 		goto exit;
2234 	}
2235 
2236 	if (bytes_read > length) {
2237 		err = EINVAL;
2238 		goto exit;
2239 	}
2240 
2241 	memcpy(data + offset, resp_data, bytes_read);
2242 	*len = bytes_read;
2243 
2244  exit:
2245 	iwm_free_resp(sc, &cmd);
2246 	return err;
2247 }
2248 
2249 /*
2250  * Reads an NVM section completely.
2251  * NICs prior to 7000 family doesn't have a real NVM, but just read
2252  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2253  * by uCode, we need to manually check in this case that we don't
2254  * overflow and try to read more than the EEPROM size.
2255  */
2256 int
2257 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2258     uint16_t *len, size_t max_len)
2259 {
2260 	uint16_t chunklen, seglen;
2261 	int err = 0;
2262 
2263 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2264 	*len = 0;
2265 
2266 	/* Read NVM chunks until exhausted (reading less than requested) */
2267 	while (seglen == chunklen && *len < max_len) {
2268 		err = iwm_nvm_read_chunk(sc,
2269 		    section, *len, chunklen, data, &seglen);
2270 		if (err)
2271 			return err;
2272 
2273 		*len += seglen;
2274 	}
2275 
2276 	return err;
2277 }
2278 
2279 uint8_t
2280 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2281 {
2282 	uint8_t tx_ant;
2283 
2284 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2285 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2286 
2287 	if (sc->sc_nvm.valid_tx_ant)
2288 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2289 
2290 	return tx_ant;
2291 }
2292 
2293 uint8_t
2294 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2295 {
2296 	uint8_t rx_ant;
2297 
2298 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2299 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2300 
2301 	if (sc->sc_nvm.valid_rx_ant)
2302 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2303 
2304 	return rx_ant;
2305 }
2306 
2307 void
2308 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2309     const uint8_t *nvm_channels, size_t nchan)
2310 {
2311 	struct ieee80211com *ic = &sc->sc_ic;
2312 	struct iwm_nvm_data *data = &sc->sc_nvm;
2313 	int ch_idx;
2314 	struct ieee80211_channel *channel;
2315 	uint16_t ch_flags;
2316 	int is_5ghz;
2317 	int flags, hw_value;
2318 
2319 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2320 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2321 
2322 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2323 		    !data->sku_cap_band_52GHz_enable)
2324 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2325 
2326 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2327 			continue;
2328 
2329 		hw_value = nvm_channels[ch_idx];
2330 		channel = &ic->ic_channels[hw_value];
2331 
2332 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2333 		if (!is_5ghz) {
2334 			flags = IEEE80211_CHAN_2GHZ;
2335 			channel->ic_flags
2336 			    = IEEE80211_CHAN_CCK
2337 			    | IEEE80211_CHAN_OFDM
2338 			    | IEEE80211_CHAN_DYN
2339 			    | IEEE80211_CHAN_2GHZ;
2340 		} else {
2341 			flags = IEEE80211_CHAN_5GHZ;
2342 			channel->ic_flags =
2343 			    IEEE80211_CHAN_A;
2344 		}
2345 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2346 
2347 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2348 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2349 
2350 		if (data->sku_cap_11n_enable)
2351 			channel->ic_flags |= IEEE80211_CHAN_HT;
2352 	}
2353 }
2354 
2355 void
2356 iwm_setup_ht_rates(struct iwm_softc *sc)
2357 {
2358 	struct ieee80211com *ic = &sc->sc_ic;
2359 
2360 	/* TX is supported with the same MCS as RX. */
2361 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2362 
2363 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2364 
2365 #ifdef notyet
2366 	if (sc->sc_nvm.sku_cap_mimo_disable)
2367 		return;
2368 
2369 	if (iwm_fw_valid_rx_ant(sc) > 1)
2370 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2371 	if (iwm_fw_valid_rx_ant(sc) > 2)
2372 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2373 #endif
2374 }
2375 
2376 #define IWM_MAX_RX_BA_SESSIONS 16
2377 
2378 void
2379 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2380     uint16_t ssn, int start)
2381 {
2382 	struct iwm_add_sta_cmd_v7 cmd;
2383 	struct iwm_node *in = (void *)ni;
2384 	int err, s;
2385 	uint32_t status;
2386 
2387 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2388 		return;
2389 
2390 	memset(&cmd, 0, sizeof(cmd));
2391 
2392 	cmd.sta_id = IWM_STATION_ID;
2393 	cmd.mac_id_n_color
2394 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2395 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2396 
2397 	if (start) {
2398 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2399 		cmd.add_immediate_ba_ssn = ssn;
2400 	} else {
2401 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2402 	}
2403 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2404 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2405 
2406 	status = IWM_ADD_STA_SUCCESS;
2407 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2408 	    &status);
2409 	if (err)
2410 		return;
2411 
2412 	if (status == IWM_ADD_STA_SUCCESS) {
2413 		s = splnet();
2414 		if (start)
2415 			sc->sc_rx_ba_sessions++;
2416 		else if (sc->sc_rx_ba_sessions > 0)
2417 			sc->sc_rx_ba_sessions--;
2418 		splx(s);
2419 	}
2420 }
2421 
2422 void
2423 iwm_htprot_task(void *arg)
2424 {
2425 	struct iwm_softc *sc = arg;
2426 	struct ieee80211com *ic = &sc->sc_ic;
2427 	struct iwm_node *in = (void *)ic->ic_bss;
2428 	int err;
2429 
2430 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2431 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
2432 	if (err)
2433 		printf("%s: could not change HT protection: error %d\n",
2434 		    DEVNAME(sc), err);
2435 }
2436 
2437 /*
2438  * This function is called by upper layer when HT protection settings in
2439  * beacons have changed.
2440  */
2441 void
2442 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2443 {
2444 	struct iwm_softc *sc = ic->ic_softc;
2445 
2446 	/* assumes that ni == ic->ic_bss */
2447 	task_add(systq, &sc->htprot_task);
2448 }
2449 
2450 void
2451 iwm_ba_task(void *arg)
2452 {
2453 	struct iwm_softc *sc = arg;
2454 	struct ieee80211com *ic = &sc->sc_ic;
2455 	struct ieee80211_node *ni = ic->ic_bss;
2456 
2457 	if (sc->ba_start)
2458 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2459 	else
2460 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2461 }
2462 
2463 /*
2464  * This function is called by upper layer when an ADDBA request is received
2465  * from another STA and before the ADDBA response is sent.
2466  */
2467 int
2468 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2469     uint8_t tid)
2470 {
2471 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2472 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2473 
2474 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2475 		return ENOSPC;
2476 
2477 	sc->ba_start = 1;
2478 	sc->ba_tid = tid;
2479 	sc->ba_ssn = htole16(ba->ba_winstart);
2480 	task_add(systq, &sc->ba_task);
2481 
2482 	return 0; /* XXX firmware may still fail to add BA agreement... */
2483 }
2484 
2485 /*
2486  * This function is called by upper layer on teardown of an HT-immediate
2487  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2488  */
2489 void
2490 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2491     uint8_t tid)
2492 {
2493 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2494 
2495 	sc->ba_start = 0;
2496 	sc->ba_tid = tid;
2497 	task_add(systq, &sc->ba_task);
2498 }
2499 
2500 void
2501 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2502     const uint16_t *mac_override, const uint16_t *nvm_hw)
2503 {
2504 	const uint8_t *hw_addr;
2505 
2506 	if (mac_override) {
2507 		static const uint8_t reserved_mac[] = {
2508 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2509 		};
2510 
2511 		hw_addr = (const uint8_t *)(mac_override +
2512 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2513 
2514 		/*
2515 		 * Store the MAC address from MAO section.
2516 		 * No byte swapping is required in MAO section
2517 		 */
2518 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2519 
2520 		/*
2521 		 * Force the use of the OTP MAC address in case of reserved MAC
2522 		 * address in the NVM, or if address is given but invalid.
2523 		 */
2524 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2525 		    (memcmp(etherbroadcastaddr, data->hw_addr,
2526 		    sizeof(etherbroadcastaddr)) != 0) &&
2527 		    (memcmp(etheranyaddr, data->hw_addr,
2528 		    sizeof(etheranyaddr)) != 0) &&
2529 		    !ETHER_IS_MULTICAST(data->hw_addr))
2530 			return;
2531 	}
2532 
2533 	if (nvm_hw) {
2534 		/* Read the mac address from WFMP registers. */
2535 		uint32_t mac_addr0 =
2536 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2537 		uint32_t mac_addr1 =
2538 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2539 
2540 		hw_addr = (const uint8_t *)&mac_addr0;
2541 		data->hw_addr[0] = hw_addr[3];
2542 		data->hw_addr[1] = hw_addr[2];
2543 		data->hw_addr[2] = hw_addr[1];
2544 		data->hw_addr[3] = hw_addr[0];
2545 
2546 		hw_addr = (const uint8_t *)&mac_addr1;
2547 		data->hw_addr[4] = hw_addr[1];
2548 		data->hw_addr[5] = hw_addr[0];
2549 
2550 		return;
2551 	}
2552 
2553 	printf("%s: mac address not found\n", DEVNAME(sc));
2554 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2555 }
2556 
2557 int
2558 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2559     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2560     const uint16_t *mac_override, const uint16_t *phy_sku,
2561     const uint16_t *regulatory)
2562 {
2563 	struct iwm_nvm_data *data = &sc->sc_nvm;
2564 	uint8_t hw_addr[ETHER_ADDR_LEN];
2565 	uint32_t sku;
2566 
2567 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2568 
2569 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2570 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2571 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2572 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2573 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2574 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2575 
2576 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
2577 	} else {
2578 		uint32_t radio_cfg =
2579 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2580 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2581 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2582 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2583 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2584 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2585 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2586 
2587 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
2588 	}
2589 
2590 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2591 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2592 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2593 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2594 
2595 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2596 
2597 	/* The byte order is little endian 16 bit, meaning 214365 */
2598 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2599 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2600 		data->hw_addr[0] = hw_addr[1];
2601 		data->hw_addr[1] = hw_addr[0];
2602 		data->hw_addr[2] = hw_addr[3];
2603 		data->hw_addr[3] = hw_addr[2];
2604 		data->hw_addr[4] = hw_addr[5];
2605 		data->hw_addr[5] = hw_addr[4];
2606 	} else
2607 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2608 
2609 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2610 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2611 		    iwm_nvm_channels, nitems(iwm_nvm_channels));
2612 	else
2613 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
2614 		    iwm_nvm_channels_8000, nitems(iwm_nvm_channels_8000));
2615 
2616 	data->calib_version = 255;   /* TODO:
2617 					this value will prevent some checks from
2618 					failing, we need to check if this
2619 					field is still needed, and if it does,
2620 					where is it in the NVM */
2621 
2622 	return 0;
2623 }
2624 
2625 int
2626 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2627 {
2628 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2629 	const uint16_t *regulatory = NULL;
2630 
2631 	/* Checking for required sections */
2632 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2633 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2634 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2635 			return ENOENT;
2636 		}
2637 
2638 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2639 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2640 		/* SW and REGULATORY sections are mandatory */
2641 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2642 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2643 			return ENOENT;
2644 		}
2645 		/* MAC_OVERRIDE or at least HW section must exist */
2646 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2647 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2648 			return ENOENT;
2649 		}
2650 
2651 		/* PHY_SKU section is mandatory in B0 */
2652 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2653 			return ENOENT;
2654 		}
2655 
2656 		regulatory = (const uint16_t *)
2657 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2658 		hw = (const uint16_t *)
2659 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2660 		mac_override =
2661 			(const uint16_t *)
2662 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2663 		phy_sku = (const uint16_t *)
2664 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2665 	} else {
2666 		panic("unknown device family %d\n", sc->sc_device_family);
2667 	}
2668 
2669 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2670 	calib = (const uint16_t *)
2671 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2672 
2673 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2674 	    phy_sku, regulatory);
2675 }
2676 
2677 int
2678 iwm_nvm_init(struct iwm_softc *sc)
2679 {
2680 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2681 	int i, section, err;
2682 	uint16_t len;
2683 	uint8_t *buf;
2684 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2685 
2686 	memset(nvm_sections, 0, sizeof(nvm_sections));
2687 
2688 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
2689 	if (buf == NULL)
2690 		return ENOMEM;
2691 
2692 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
2693 		section = iwm_nvm_to_read[i];
2694 		KASSERT(section <= nitems(nvm_sections));
2695 
2696 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2697 		if (err) {
2698 			err = 0;
2699 			continue;
2700 		}
2701 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
2702 		if (nvm_sections[section].data == NULL) {
2703 			err = ENOMEM;
2704 			break;
2705 		}
2706 		memcpy(nvm_sections[section].data, buf, len);
2707 		nvm_sections[section].length = len;
2708 	}
2709 	free(buf, M_DEVBUF, bufsz);
2710 	if (err == 0)
2711 		err = iwm_parse_nvm_sections(sc, nvm_sections);
2712 
2713 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2714 		if (nvm_sections[i].data != NULL)
2715 			free(nvm_sections[i].data, M_DEVBUF,
2716 			    nvm_sections[i].length);
2717 	}
2718 
2719 	return err;
2720 }
2721 
2722 int
2723 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2724     const uint8_t *section, uint32_t byte_cnt)
2725 {
2726 	int err = EINVAL;
2727 	uint32_t chunk_sz, offset;
2728 
2729 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2730 
2731 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2732 		uint32_t addr, len;
2733 		const uint8_t *data;
2734 
2735 		addr = dst_addr + offset;
2736 		len = MIN(chunk_sz, byte_cnt - offset);
2737 		data = section + offset;
2738 
2739 		err = iwm_firmware_load_chunk(sc, addr, data, len);
2740 		if (err)
2741 			break;
2742 	}
2743 
2744 	return err;
2745 }
2746 
2747 int
2748 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2749     const uint8_t *chunk, uint32_t byte_cnt)
2750 {
2751 	struct iwm_dma_info *dma = &sc->fw_dma;
2752 	int err;
2753 
2754 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
2755 	memcpy(dma->vaddr, chunk, byte_cnt);
2756 	bus_dmamap_sync(sc->sc_dmat,
2757 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2758 
2759 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2760 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2761 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2762 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2763 
2764 	sc->sc_fw_chunk_done = 0;
2765 
2766 	if (!iwm_nic_lock(sc))
2767 		return EBUSY;
2768 
2769 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2770 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2771 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2772 	    dst_addr);
2773 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2774 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2775 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2776 	    (iwm_get_dma_hi_addr(dma->paddr)
2777 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2778 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2779 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2780 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2781 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2782 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2783 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2784 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2785 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2786 
2787 	iwm_nic_unlock(sc);
2788 
2789 	/* Wait for this segment to load. */
2790 	err = 0;
2791 	while (!sc->sc_fw_chunk_done) {
2792 		err = tsleep(&sc->sc_fw, 0, "iwmfw", hz);
2793 		if (err)
2794 			break;
2795 	}
2796 
2797 	if (!sc->sc_fw_chunk_done)
2798 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
2799 		    DEVNAME(sc), dst_addr, byte_cnt);
2800 
2801 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2802 	    dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2803 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2804 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2805 		iwm_nic_unlock(sc);
2806 	}
2807 
2808 	return err;
2809 }
2810 
2811 int
2812 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2813 {
2814 	struct iwm_fw_sects *fws;
2815 	int err, i;
2816 	void *data;
2817 	uint32_t dlen;
2818 	uint32_t offset;
2819 
2820 	sc->sc_uc.uc_intr = 0;
2821 
2822 	fws = &sc->sc_fw.fw_sects[ucode_type];
2823 	for (i = 0; i < fws->fw_count; i++) {
2824 		data = fws->fw_sect[i].fws_data;
2825 		dlen = fws->fw_sect[i].fws_len;
2826 		offset = fws->fw_sect[i].fws_devoff;
2827 		if (dlen > sc->sc_fwdmasegsz) {
2828 			err = EFBIG;
2829 		} else
2830 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
2831 		if (err) {
2832 			printf("%s: could not load firmware chunk %u of %u\n",
2833 			    DEVNAME(sc), i, fws->fw_count);
2834 			return err;
2835 		}
2836 	}
2837 
2838 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2839 
2840 	return 0;
2841 }
2842 
2843 int
2844 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2845     int cpu, int *first_ucode_section)
2846 {
2847 	int shift_param;
2848 	int i, err = 0, sec_num = 0x1;
2849 	uint32_t val, last_read_idx = 0;
2850 	void *data;
2851 	uint32_t dlen;
2852 	uint32_t offset;
2853 
2854 	if (cpu == 1) {
2855 		shift_param = 0;
2856 		*first_ucode_section = 0;
2857 	} else {
2858 		shift_param = 16;
2859 		(*first_ucode_section)++;
2860 	}
2861 
2862 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2863 		last_read_idx = i;
2864 		data = fws->fw_sect[i].fws_data;
2865 		dlen = fws->fw_sect[i].fws_len;
2866 		offset = fws->fw_sect[i].fws_devoff;
2867 
2868 		/*
2869 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2870 		 * CPU1 to CPU2.
2871 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2872 		 * CPU2 non paged to CPU2 paging sec.
2873 		 */
2874 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2875 		    offset == IWM_PAGING_SEPARATOR_SECTION)
2876 			break;
2877 
2878 		if (dlen > sc->sc_fwdmasegsz) {
2879 			err = EFBIG;
2880 		} else
2881 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
2882 		if (err) {
2883 			printf("%s: could not load firmware chunk %d "
2884 			    "(error %d)\n", DEVNAME(sc), i, err);
2885 			return err;
2886 		}
2887 
2888 		/* Notify the ucode of the loaded section number and status */
2889 		if (iwm_nic_lock(sc)) {
2890 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2891 			val = val | (sec_num << shift_param);
2892 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2893 			sec_num = (sec_num << 1) | 0x1;
2894 			iwm_nic_unlock(sc);
2895 
2896 			/*
2897 			 * The firmware won't load correctly without this delay.
2898 			 */
2899 			DELAY(8000);
2900 		}
2901 	}
2902 
2903 	*first_ucode_section = last_read_idx;
2904 
2905 	if (iwm_nic_lock(sc)) {
2906 		if (cpu == 1)
2907 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2908 		else
2909 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2910 		iwm_nic_unlock(sc);
2911 	}
2912 
2913 	return 0;
2914 }
2915 
2916 int
2917 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2918 {
2919 	struct iwm_fw_sects *fws;
2920 	int err = 0;
2921 	int first_ucode_section;
2922 
2923 	fws = &sc->sc_fw.fw_sects[ucode_type];
2924 
2925 	/* configure the ucode to be ready to get the secured image */
2926 	/* release CPU reset */
2927 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2928 
2929 	/* load to FW the binary Secured sections of CPU1 */
2930 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2931 	if (err)
2932 		return err;
2933 
2934 	/* load to FW the binary sections of CPU2 */
2935 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2936 }
2937 
2938 int
2939 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2940 {
2941 	int err, w;
2942 
2943 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2944 		err = iwm_load_firmware_8000(sc, ucode_type);
2945 	else
2946 		err = iwm_load_firmware_7000(sc, ucode_type);
2947 
2948 	if (err)
2949 		return err;
2950 
2951 	/* wait for the firmware to load */
2952 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2953 		err = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2954 	}
2955 	if (err || !sc->sc_uc.uc_ok)
2956 		printf("%s: could not load firmware\n", DEVNAME(sc));
2957 
2958 	/*
2959 	 * Give the firmware some time to initialize.
2960 	 * Accessing it too early causes errors.
2961 	 */
2962 	tsleep(&w, PCATCH, "iwmfwinit", hz);
2963 
2964 	return err;
2965 }
2966 
2967 int
2968 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2969 {
2970 	int err;
2971 
2972 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2973 
2974 	err = iwm_nic_init(sc);
2975 	if (err) {
2976 		printf("%s: unable to init nic\n", DEVNAME(sc));
2977 		return err;
2978 	}
2979 
2980 	/* make sure rfkill handshake bits are cleared */
2981 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2982 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2983 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2984 
2985 	/* clear (again), then enable host interrupts */
2986 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2987 	iwm_enable_interrupts(sc);
2988 
2989 	/* really make sure rfkill handshake bits are cleared */
2990 	/* maybe we should write a few times more?  just to make sure */
2991 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2992 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2993 
2994 	return iwm_load_firmware(sc, ucode_type);
2995 }
2996 
2997 int
2998 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2999 {
3000 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3001 		.valid = htole32(valid_tx_ant),
3002 	};
3003 
3004 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3005 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3006 }
3007 
3008 int
3009 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3010 {
3011 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3012 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3013 
3014 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3015 	phy_cfg_cmd.calib_control.event_trigger =
3016 	    sc->sc_default_calib[ucode_type].event_trigger;
3017 	phy_cfg_cmd.calib_control.flow_trigger =
3018 	    sc->sc_default_calib[ucode_type].flow_trigger;
3019 
3020 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3021 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3022 }
3023 
3024 int
3025 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3026 	enum iwm_ucode_type ucode_type)
3027 {
3028 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3029 	int err;
3030 
3031 	err = iwm_read_firmware(sc, ucode_type);
3032 	if (err)
3033 		return err;
3034 
3035 	sc->sc_uc_current = ucode_type;
3036 	err = iwm_start_fw(sc, ucode_type);
3037 	if (err) {
3038 		sc->sc_uc_current = old_type;
3039 		return err;
3040 	}
3041 
3042 	return iwm_post_alive(sc);
3043 }
3044 
3045 int
3046 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3047 {
3048 	int err;
3049 
3050 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3051 		printf("%s: radio is disabled by hardware switch\n",
3052 		    DEVNAME(sc));
3053 		return EPERM;
3054 	}
3055 
3056 	sc->sc_init_complete = 0;
3057 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3058 	if (err) {
3059 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3060 		return err;
3061 	}
3062 
3063 	if (justnvm) {
3064 		err = iwm_nvm_init(sc);
3065 		if (err) {
3066 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3067 			return err;
3068 		}
3069 
3070 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3071 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3072 			    sc->sc_nvm.hw_addr);
3073 
3074 		return 0;
3075 	}
3076 
3077 	err = iwm_send_bt_init_conf(sc);
3078 	if (err)
3079 		return err;
3080 
3081 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3082 	if (err)
3083 		return err;
3084 
3085 	/* Send TX valid antennas before triggering calibrations */
3086 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3087 	if (err)
3088 		return err;
3089 
3090 	/*
3091 	 * Send phy configurations command to init uCode
3092 	 * to start the 16.0 uCode init image internal calibrations.
3093 	 */
3094 	err = iwm_send_phy_cfg_cmd(sc);
3095 	if (err)
3096 		return err;
3097 
3098 	/*
3099 	 * Nothing to do but wait for the init complete notification
3100 	 * from the firmware
3101 	 */
3102 	while (!sc->sc_init_complete) {
3103 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", 2*hz);
3104 		if (err)
3105 			break;
3106 	}
3107 
3108 	return err;
3109 }
3110 
3111 int
3112 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3113 {
3114 	struct iwm_rx_ring *ring = &sc->rxq;
3115 	struct iwm_rx_data *data = &ring->data[idx];
3116 	struct mbuf *m;
3117 	int err;
3118 	int fatal = 0;
3119 
3120 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3121 	if (m == NULL)
3122 		return ENOBUFS;
3123 
3124 	if (size <= MCLBYTES) {
3125 		MCLGET(m, M_DONTWAIT);
3126 	} else {
3127 		MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
3128 	}
3129 	if ((m->m_flags & M_EXT) == 0) {
3130 		m_freem(m);
3131 		return ENOBUFS;
3132 	}
3133 
3134 	if (data->m != NULL) {
3135 		bus_dmamap_unload(sc->sc_dmat, data->map);
3136 		fatal = 1;
3137 	}
3138 
3139 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3140 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3141 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3142 	if (err) {
3143 		/* XXX */
3144 		if (fatal)
3145 			panic("iwm: could not load RX mbuf");
3146 		m_freem(m);
3147 		return err;
3148 	}
3149 	data->m = m;
3150 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3151 
3152 	/* Update RX descriptor. */
3153 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3154 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3155 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3156 
3157 	return 0;
3158 }
3159 
3160 #define IWM_RSSI_OFFSET 50
3161 int
3162 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3163 {
3164 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3165 	uint32_t agc_a, agc_b;
3166 	uint32_t val;
3167 
3168 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3169 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3170 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3171 
3172 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3173 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3174 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3175 
3176 	/*
3177 	 * dBm = rssi dB - agc dB - constant.
3178 	 * Higher AGC (higher radio gain) means lower signal.
3179 	 */
3180 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3181 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3182 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3183 
3184 	return max_rssi_dbm;
3185 }
3186 
3187 /*
3188  * RSSI values are reported by the FW as positive values - need to negate
3189  * to obtain their dBM.  Account for missing antennas by replacing 0
3190  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3191  */
3192 int
3193 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3194 {
3195 	int energy_a, energy_b, energy_c, max_energy;
3196 	uint32_t val;
3197 
3198 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3199 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3200 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3201 	energy_a = energy_a ? -energy_a : -256;
3202 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3203 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3204 	energy_b = energy_b ? -energy_b : -256;
3205 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3206 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3207 	energy_c = energy_c ? -energy_c : -256;
3208 	max_energy = MAX(energy_a, energy_b);
3209 	max_energy = MAX(max_energy, energy_c);
3210 
3211 	return max_energy;
3212 }
3213 
3214 void
3215 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3216     struct iwm_rx_data *data)
3217 {
3218 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3219 
3220 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3221 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3222 
3223 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3224 }
3225 
3226 /*
3227  * Retrieve the average noise (in dBm) among receivers.
3228  */
3229 int
3230 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3231 {
3232 	int i, total, nbant, noise;
3233 
3234 	total = nbant = noise = 0;
3235 	for (i = 0; i < 3; i++) {
3236 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3237 		if (noise) {
3238 			total += noise;
3239 			nbant++;
3240 		}
3241 	}
3242 
3243 	/* There should be at least one antenna but check anyway. */
3244 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3245 }
3246 
3247 void
3248 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3249     struct iwm_rx_data *data)
3250 {
3251 	struct ieee80211com *ic = &sc->sc_ic;
3252 	struct ieee80211_frame *wh;
3253 	struct ieee80211_node *ni;
3254 	struct ieee80211_channel *c = NULL;
3255 	struct ieee80211_rxinfo rxi;
3256 	struct mbuf *m;
3257 	struct iwm_rx_phy_info *phy_info;
3258 	struct iwm_rx_mpdu_res_start *rx_res;
3259 	int device_timestamp;
3260 	uint32_t len;
3261 	uint32_t rx_pkt_status;
3262 	int rssi;
3263 
3264 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3265 	    BUS_DMASYNC_POSTREAD);
3266 
3267 	phy_info = &sc->sc_last_phy_info;
3268 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3269 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3270 	len = le16toh(rx_res->byte_count);
3271 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3272 	    sizeof(*rx_res) + len));
3273 
3274 	m = data->m;
3275 	m->m_data = pkt->data + sizeof(*rx_res);
3276 	m->m_pkthdr.len = m->m_len = len;
3277 
3278 	if (__predict_false(phy_info->cfg_phy_cnt > 20))
3279 		return;
3280 
3281 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3282 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))
3283 		return; /* drop */
3284 
3285 	device_timestamp = le32toh(phy_info->system_timestamp);
3286 
3287 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3288 		rssi = iwm_get_signal_strength(sc, phy_info);
3289 	} else {
3290 		rssi = iwm_calc_rssi(sc, phy_info);
3291 	}
3292 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
3293 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
3294 
3295 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3296 		return;
3297 
3298 	if (le32toh(phy_info->channel) < nitems(ic->ic_channels))
3299 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3300 
3301 	memset(&rxi, 0, sizeof(rxi));
3302 	rxi.rxi_rssi = rssi;
3303 	rxi.rxi_tstamp = device_timestamp;
3304 	ni = ieee80211_find_rxnode(ic, wh);
3305 	if (c)
3306 		ni->ni_chan = c;
3307 
3308 #if NBPFILTER > 0
3309 	if (sc->sc_drvbpf != NULL) {
3310 		struct mbuf mb;
3311 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3312 
3313 		tap->wr_flags = 0;
3314 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3315 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3316 		tap->wr_chan_freq =
3317 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3318 		tap->wr_chan_flags =
3319 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3320 		tap->wr_dbm_antsignal = (int8_t)rssi;
3321 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3322 		tap->wr_tsft = phy_info->system_timestamp;
3323 		if (phy_info->phy_flags &
3324 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3325 #ifdef notyet
3326 			uint8_t mcs = (phy_info->rate_n_flags &
3327 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
3328 #endif
3329 			/* XXX need a way to pass current MCS in 11n mode */
3330 			tap->wr_rate = 0;
3331 		} else {
3332 			uint8_t rate = (phy_info->rate_n_flags &
3333 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3334 			switch (rate) {
3335 			/* CCK rates. */
3336 			case  10: tap->wr_rate =   2; break;
3337 			case  20: tap->wr_rate =   4; break;
3338 			case  55: tap->wr_rate =  11; break;
3339 			case 110: tap->wr_rate =  22; break;
3340 			/* OFDM rates. */
3341 			case 0xd: tap->wr_rate =  12; break;
3342 			case 0xf: tap->wr_rate =  18; break;
3343 			case 0x5: tap->wr_rate =  24; break;
3344 			case 0x7: tap->wr_rate =  36; break;
3345 			case 0x9: tap->wr_rate =  48; break;
3346 			case 0xb: tap->wr_rate =  72; break;
3347 			case 0x1: tap->wr_rate =  96; break;
3348 			case 0x3: tap->wr_rate = 108; break;
3349 			/* Unknown rate: should not happen. */
3350 			default:  tap->wr_rate =   0;
3351 			}
3352 		}
3353 
3354 		mb.m_data = (caddr_t)tap;
3355 		mb.m_len = sc->sc_rxtap_len;
3356 		mb.m_next = m;
3357 		mb.m_nextpkt = NULL;
3358 		mb.m_type = 0;
3359 		mb.m_flags = 0;
3360 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
3361 	}
3362 #endif
3363 	ieee80211_input(IC2IFP(ic), m, ni, &rxi);
3364 	ieee80211_release_node(ic, ni);
3365 }
3366 
3367 void
3368 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3369     struct iwm_node *in)
3370 {
3371 	struct ieee80211com *ic = &sc->sc_ic;
3372 	struct ifnet *ifp = IC2IFP(ic);
3373 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3374 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3375 	int failack = tx_resp->failure_frame;
3376 
3377 	KASSERT(tx_resp->frame_count == 1);
3378 
3379 	/* Update rate control statistics. */
3380 	in->in_amn.amn_txcnt++;
3381 	if (failack > 0) {
3382 		in->in_amn.amn_retrycnt++;
3383 	}
3384 
3385 	if (status != IWM_TX_STATUS_SUCCESS &&
3386 	    status != IWM_TX_STATUS_DIRECT_DONE)
3387 		ifp->if_oerrors++;
3388 	else
3389 		ifp->if_opackets++;
3390 }
3391 
3392 void
3393 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3394     struct iwm_rx_data *data)
3395 {
3396 	struct ieee80211com *ic = &sc->sc_ic;
3397 	struct ifnet *ifp = IC2IFP(ic);
3398 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3399 	int idx = cmd_hdr->idx;
3400 	int qid = cmd_hdr->qid;
3401 	struct iwm_tx_ring *ring = &sc->txq[qid];
3402 	struct iwm_tx_data *txd = &ring->data[idx];
3403 	struct iwm_node *in = txd->in;
3404 
3405 	if (txd->done)
3406 		return;
3407 
3408 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3409 	    BUS_DMASYNC_POSTREAD);
3410 
3411 	sc->sc_tx_timer = 0;
3412 
3413 	iwm_rx_tx_cmd_single(sc, pkt, in);
3414 
3415 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3416 	    BUS_DMASYNC_POSTWRITE);
3417 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3418 	m_freem(txd->m);
3419 
3420 	KASSERT(txd->done == 0);
3421 	txd->done = 1;
3422 	KASSERT(txd->in);
3423 
3424 	txd->m = NULL;
3425 	txd->in = NULL;
3426 	ieee80211_release_node(ic, &in->in_ni);
3427 
3428 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3429 		sc->qfullmsk &= ~(1 << ring->qid);
3430 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
3431 			ifq_clr_oactive(&ifp->if_snd);
3432 			/*
3433 			 * Well, we're in interrupt context, but then again
3434 			 * I guess net80211 does all sorts of stunts in
3435 			 * interrupt context, so maybe this is no biggie.
3436 			 */
3437 			(*ifp->if_start)(ifp);
3438 		}
3439 	}
3440 }
3441 
3442 int
3443 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3444 {
3445 	struct iwm_binding_cmd cmd;
3446 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3447 	int i, err;
3448 	uint32_t status;
3449 
3450 	memset(&cmd, 0, sizeof(cmd));
3451 
3452 	cmd.id_and_color
3453 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3454 	cmd.action = htole32(action);
3455 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3456 
3457 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3458 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3459 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3460 
3461 	status = 0;
3462 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3463 	    sizeof(cmd), &cmd, &status);
3464 	if (err == 0 && status != 0)
3465 		err = EIO;
3466 
3467 	return err;
3468 }
3469 
3470 void
3471 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3472     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3473 {
3474 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3475 
3476 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3477 	    ctxt->color));
3478 	cmd->action = htole32(action);
3479 	cmd->apply_time = htole32(apply_time);
3480 }
3481 
3482 void
3483 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3484     struct ieee80211_channel *chan, uint8_t chains_static,
3485     uint8_t chains_dynamic)
3486 {
3487 	struct ieee80211com *ic = &sc->sc_ic;
3488 	uint8_t active_cnt, idle_cnt;
3489 
3490 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3491 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3492 
3493 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3494 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3495 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3496 
3497 	/* Set rx the chains */
3498 	idle_cnt = chains_static;
3499 	active_cnt = chains_dynamic;
3500 
3501 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3502 					IWM_PHY_RX_CHAIN_VALID_POS);
3503 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3504 	cmd->rxchain_info |= htole32(active_cnt <<
3505 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3506 
3507 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3508 }
3509 
3510 int
3511 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3512     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3513     uint32_t apply_time)
3514 {
3515 	struct iwm_phy_context_cmd cmd;
3516 
3517 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3518 
3519 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3520 	    chains_static, chains_dynamic);
3521 
3522 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3523 	    sizeof(struct iwm_phy_context_cmd), &cmd);
3524 }
3525 
3526 int
3527 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3528 {
3529 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3530 	struct iwm_tfd *desc;
3531 	struct iwm_tx_data *txdata;
3532 	struct iwm_device_cmd *cmd;
3533 	struct mbuf *m;
3534 	bus_addr_t paddr;
3535 	uint32_t addr_lo;
3536 	int err = 0, i, paylen, off, s;
3537 	int code;
3538 	int async, wantresp;
3539 	int group_id;
3540 	size_t hdrlen, datasz;
3541 	uint8_t *data;
3542 
3543 	code = hcmd->id;
3544 	async = hcmd->flags & IWM_CMD_ASYNC;
3545 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3546 
3547 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3548 		paylen += hcmd->len[i];
3549 	}
3550 
3551 	/* if the command wants an answer, busy sc_cmd_resp */
3552 	if (wantresp) {
3553 		KASSERT(!async);
3554 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3555 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3556 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
3557 	}
3558 
3559 	/*
3560 	 * Is the hardware still available?  (after e.g. above wait).
3561 	 */
3562 	s = splnet();
3563 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
3564 		err = ENXIO;
3565 		goto out;
3566 	}
3567 
3568 	desc = &ring->desc[ring->cur];
3569 	txdata = &ring->data[ring->cur];
3570 
3571 	group_id = iwm_cmd_groupid(code);
3572 	if (group_id != 0) {
3573 		hdrlen = sizeof(cmd->hdr_wide);
3574 		datasz = sizeof(cmd->data_wide);
3575 	} else {
3576 		hdrlen = sizeof(cmd->hdr);
3577 		datasz = sizeof(cmd->data);
3578 	}
3579 
3580 	if (paylen > datasz) {
3581 		/* Command is too large to fit in pre-allocated space. */
3582 		size_t totlen = hdrlen + paylen;
3583 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3584 			printf("%s: firmware command too long (%zd bytes)\n",
3585 			    DEVNAME(sc), totlen);
3586 			err = EINVAL;
3587 			goto out;
3588 		}
3589 		m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
3590 		if (m == NULL) {
3591 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
3592 			    DEVNAME(sc), totlen);
3593 			err = ENOMEM;
3594 			goto out;
3595 		}
3596 		cmd = mtod(m, struct iwm_device_cmd *);
3597 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3598 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3599 		if (err) {
3600 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
3601 			    DEVNAME(sc), totlen);
3602 			m_freem(m);
3603 			goto out;
3604 		}
3605 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
3606 		paddr = txdata->map->dm_segs[0].ds_addr;
3607 	} else {
3608 		cmd = &ring->cmd[ring->cur];
3609 		paddr = txdata->cmd_paddr;
3610 	}
3611 
3612 	if (group_id != 0) {
3613 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3614 		cmd->hdr_wide.group_id = group_id;
3615 		cmd->hdr_wide.qid = ring->qid;
3616 		cmd->hdr_wide.idx = ring->cur;
3617 		cmd->hdr_wide.length = htole16(paylen);
3618 		cmd->hdr_wide.version = iwm_cmd_version(code);
3619 		data = cmd->data_wide;
3620 	} else {
3621 		cmd->hdr.code = code;
3622 		cmd->hdr.flags = 0;
3623 		cmd->hdr.qid = ring->qid;
3624 		cmd->hdr.idx = ring->cur;
3625 		data = cmd->data;
3626 	}
3627 
3628 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3629 		if (hcmd->len[i] == 0)
3630 			continue;
3631 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3632 		off += hcmd->len[i];
3633 	}
3634 	KASSERT(off == paylen);
3635 
3636 	/* lo field is not aligned */
3637 	addr_lo = htole32((uint32_t)paddr);
3638 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3639 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
3640 	    | ((hdrlen + paylen) << 4));
3641 	desc->num_tbs = 1;
3642 
3643 	if (paylen > datasz) {
3644 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3645 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3646 	} else {
3647 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3648 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3649 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3650 	}
3651 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3652 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3653 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3654 
3655 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3656 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3657 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3658 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3659 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3660 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3661 		printf("%s: acquiring device failed\n", DEVNAME(sc));
3662 		err = EBUSY;
3663 		goto out;
3664 	}
3665 
3666 #if 0
3667 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3668 #endif
3669 	/* Kick command ring. */
3670 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3671 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3672 
3673 	if (!async) {
3674 		int generation = sc->sc_generation;
3675 		err = tsleep(desc, PCATCH, "iwmcmd", hz);
3676 		if (err == 0) {
3677 			/* if hardware is no longer up, return error */
3678 			if (generation != sc->sc_generation) {
3679 				err = ENXIO;
3680 			} else {
3681 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3682 			}
3683 		}
3684 	}
3685  out:
3686 	if (wantresp && err) {
3687 		iwm_free_resp(sc, hcmd);
3688 	}
3689 	splx(s);
3690 
3691 	return err;
3692 }
3693 
3694 int
3695 iwm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id, uint32_t flags, uint16_t len,
3696     const void *data)
3697 {
3698 	struct iwm_host_cmd cmd = {
3699 		.id = id,
3700 		.len = { len, },
3701 		.data = { data, },
3702 		.flags = flags,
3703 	};
3704 
3705 	return iwm_send_cmd(sc, &cmd);
3706 }
3707 
3708 int
3709 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
3710     uint32_t *status)
3711 {
3712 	struct iwm_rx_packet *pkt;
3713 	struct iwm_cmd_response *resp;
3714 	int err, resp_len;
3715 
3716 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3717 	cmd->flags |= IWM_CMD_WANT_SKB;
3718 
3719 	err = iwm_send_cmd(sc, cmd);
3720 	if (err)
3721 		return err;
3722 	pkt = cmd->resp_pkt;
3723 
3724 	/* Can happen if RFKILL is asserted */
3725 	if (!pkt) {
3726 		err = 0;
3727 		goto out_free_resp;
3728 	}
3729 
3730 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3731 		err = EIO;
3732 		goto out_free_resp;
3733 	}
3734 
3735 	resp_len = iwm_rx_packet_payload_len(pkt);
3736 	if (resp_len != sizeof(*resp)) {
3737 		err = EIO;
3738 		goto out_free_resp;
3739 	}
3740 
3741 	resp = (void *)pkt->data;
3742 	*status = le32toh(resp->status);
3743  out_free_resp:
3744 	iwm_free_resp(sc, cmd);
3745 	return err;
3746 }
3747 
3748 int
3749 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id, uint16_t len,
3750     const void *data, uint32_t *status)
3751 {
3752 	struct iwm_host_cmd cmd = {
3753 		.id = id,
3754 		.len = { len, },
3755 		.data = { data, },
3756 	};
3757 
3758 	return iwm_send_cmd_status(sc, &cmd, status);
3759 }
3760 
3761 void
3762 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3763 {
3764 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
3765 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB)) == IWM_CMD_WANT_SKB);
3766 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
3767 	wakeup(&sc->sc_wantresp);
3768 }
3769 
3770 void
3771 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3772 {
3773 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3774 	struct iwm_tx_data *data;
3775 
3776 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3777 		return;	/* Not a command ack. */
3778 	}
3779 
3780 	data = &ring->data[pkt->hdr.idx];
3781 
3782 	if (data->m != NULL) {
3783 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3784 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3785 		bus_dmamap_unload(sc->sc_dmat, data->map);
3786 		m_freem(data->m);
3787 		data->m = NULL;
3788 	}
3789 	wakeup(&ring->desc[pkt->hdr.idx]);
3790 }
3791 
3792 #if 0
3793 /*
3794  * necessary only for block ack mode
3795  */
3796 void
3797 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3798     uint16_t len)
3799 {
3800 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3801 	uint16_t w_val;
3802 
3803 	scd_bc_tbl = sc->sched_dma.vaddr;
3804 
3805 	len += 8; /* magic numbers came naturally from paris */
3806 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3807 		len = roundup(len, 4) / 4;
3808 
3809 	w_val = htole16(sta_id << 12 | len);
3810 
3811 	/* Update TX scheduler. */
3812 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3813 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3814 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3815 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3816 
3817 	/* I really wonder what this is ?!? */
3818 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3819 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3820 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3821 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3822 		    (char *)(void *)sc->sched_dma.vaddr,
3823 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3824 	}
3825 }
3826 #endif
3827 
3828 /*
3829  * Fill in various bit for management frames, and leave them
3830  * unfilled for data frames (firmware takes care of that).
3831  * Return the selected TX rate.
3832  */
3833 const struct iwm_rate *
3834 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3835     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3836 {
3837 	struct ieee80211com *ic = &sc->sc_ic;
3838 	struct ieee80211_node *ni = &in->in_ni;
3839 	const struct iwm_rate *rinfo;
3840 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3841 	int ridx, rate_flags, i;
3842 	int nrates = ni->ni_rates.rs_nrates;
3843 
3844 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3845 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3846 
3847 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3848 	    type != IEEE80211_FC0_TYPE_DATA) {
3849 		/* for non-data, use the lowest supported rate */
3850 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
3851 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
3852 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
3853 	} else if (ic->ic_fixed_mcs != -1) {
3854 		ridx = sc->sc_fixed_ridx;
3855 	} else if (ic->ic_fixed_rate != -1) {
3856 		ridx = sc->sc_fixed_ridx;
3857 	} else {
3858 		/* for data frames, use RS table */
3859 		tx->initial_rate_index = 0;
3860 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3861 		if (ni->ni_flags & IEEE80211_NODE_HT) {
3862 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
3863 			return &iwm_rates[ridx];
3864 		}
3865 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
3866 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
3867 		for (i = 0; i < nrates; i++) {
3868 			if (iwm_rates[i].rate == (ni->ni_txrate &
3869 			    IEEE80211_RATE_VAL)) {
3870 				ridx = i;
3871 				break;
3872 			}
3873 		}
3874 		return &iwm_rates[ridx];
3875 	}
3876 
3877 	rinfo = &iwm_rates[ridx];
3878 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3879 	if (IWM_RIDX_IS_CCK(ridx))
3880 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3881 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3882 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
3883 		rate_flags |= IWM_RATE_MCS_HT_MSK;
3884 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
3885 	} else
3886 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3887 
3888 	return rinfo;
3889 }
3890 
3891 #define TB0_SIZE 16
3892 int
3893 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3894 {
3895 	struct ieee80211com *ic = &sc->sc_ic;
3896 	struct iwm_node *in = (void *)ni;
3897 	struct iwm_tx_ring *ring;
3898 	struct iwm_tx_data *data;
3899 	struct iwm_tfd *desc;
3900 	struct iwm_device_cmd *cmd;
3901 	struct iwm_tx_cmd *tx;
3902 	struct ieee80211_frame *wh;
3903 	struct ieee80211_key *k = NULL;
3904 	const struct iwm_rate *rinfo;
3905 	uint32_t flags;
3906 	u_int hdrlen;
3907 	bus_dma_segment_t *seg;
3908 	uint8_t tid, type;
3909 	int i, totlen, err, pad;
3910 	int hdrlen2;
3911 
3912 	wh = mtod(m, struct ieee80211_frame *);
3913 	hdrlen = ieee80211_get_hdrlen(wh);
3914 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3915 
3916 	hdrlen2 = (ieee80211_has_qos(wh)) ?
3917 	    sizeof (struct ieee80211_qosframe) :
3918 	    sizeof (struct ieee80211_frame);
3919 
3920 	tid = 0;
3921 
3922 	ring = &sc->txq[ac];
3923 	desc = &ring->desc[ring->cur];
3924 	memset(desc, 0, sizeof(*desc));
3925 	data = &ring->data[ring->cur];
3926 
3927 	cmd = &ring->cmd[ring->cur];
3928 	cmd->hdr.code = IWM_TX_CMD;
3929 	cmd->hdr.flags = 0;
3930 	cmd->hdr.qid = ring->qid;
3931 	cmd->hdr.idx = ring->cur;
3932 
3933 	tx = (void *)cmd->data;
3934 	memset(tx, 0, sizeof(*tx));
3935 
3936 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3937 
3938 #if NBPFILTER > 0
3939 	if (sc->sc_drvbpf != NULL) {
3940 		struct mbuf mb;
3941 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3942 
3943 		tap->wt_flags = 0;
3944 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3945 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3946 		if (rinfo->plcp == IWM_RATE_INVM_PLCP) {
3947 			/* XXX need a way to pass current MCS in 11n mode */
3948 			tap->wt_rate = 0;
3949 		} else
3950 			tap->wt_rate = rinfo->rate;
3951 		tap->wt_hwqueue = ac;
3952 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
3953 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
3954 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3955 
3956 		mb.m_data = (caddr_t)tap;
3957 		mb.m_len = sc->sc_txtap_len;
3958 		mb.m_next = m;
3959 		mb.m_nextpkt = NULL;
3960 		mb.m_type = 0;
3961 		mb.m_flags = 0;
3962 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
3963 	}
3964 #endif
3965 
3966 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3967                 k = ieee80211_get_txkey(ic, wh, ni);
3968 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
3969 			return ENOBUFS;
3970 		/* 802.11 header may have moved. */
3971 		wh = mtod(m, struct ieee80211_frame *);
3972 	}
3973 	totlen = m->m_pkthdr.len;
3974 
3975 	flags = 0;
3976 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3977 		flags |= IWM_TX_CMD_FLG_ACK;
3978 	}
3979 
3980 	if (type == IEEE80211_FC0_TYPE_DATA &&
3981 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3982 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
3983 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
3984 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3985 
3986 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3987 	    type != IEEE80211_FC0_TYPE_DATA)
3988 		tx->sta_id = IWM_AUX_STA_ID;
3989 	else
3990 		tx->sta_id = IWM_STATION_ID;
3991 
3992 	if (type == IEEE80211_FC0_TYPE_MGT) {
3993 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3994 
3995 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3996 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3997 			tx->pm_frame_timeout = htole16(3);
3998 		else
3999 			tx->pm_frame_timeout = htole16(2);
4000 	} else {
4001 		tx->pm_frame_timeout = htole16(0);
4002 	}
4003 
4004 	if (hdrlen & 3) {
4005 		/* First segment length must be a multiple of 4. */
4006 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4007 		pad = 4 - (hdrlen & 3);
4008 	} else
4009 		pad = 0;
4010 
4011 	tx->driver_txop = 0;
4012 	tx->next_frame_len = 0;
4013 
4014 	tx->len = htole16(totlen);
4015 	tx->tid_tspec = tid;
4016 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4017 
4018 	/* Set physical address of "scratch area". */
4019 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4020 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4021 
4022 	/* Copy 802.11 header in TX command. */
4023 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4024 
4025 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4026 
4027 	tx->sec_ctl = 0;
4028 	tx->tx_flags |= htole32(flags);
4029 
4030 	/* Trim 802.11 header. */
4031 	m_adj(m, hdrlen);
4032 
4033 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4034 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4035 	if (err && err != EFBIG) {
4036 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4037 		m_freem(m);
4038 		return err;
4039 	}
4040 	if (err) {
4041 		/* Too many DMA segments, linearize mbuf. */
4042 		if (m_defrag(m, M_DONTWAIT)) {
4043 			m_freem(m);
4044 			return ENOBUFS;
4045 		}
4046 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4047 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4048 		if (err) {
4049 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4050 			    err);
4051 			m_freem(m);
4052 			return err;
4053 		}
4054 	}
4055 	data->m = m;
4056 	data->in = in;
4057 	data->done = 0;
4058 
4059 	/* Fill TX descriptor. */
4060 	desc->num_tbs = 2 + data->map->dm_nsegs;
4061 
4062 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4063 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4064 	    (TB0_SIZE << 4);
4065 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4066 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4067 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4068 	      + hdrlen + pad - TB0_SIZE) << 4);
4069 
4070 	/* Other DMA segments are for data payload. */
4071 	seg = data->map->dm_segs;
4072 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4073 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4074 		desc->tbs[i+2].hi_n_len = \
4075 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4076 		    | ((seg->ds_len) << 4);
4077 	}
4078 
4079 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4080 	    BUS_DMASYNC_PREWRITE);
4081 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4082 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4083 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4084 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4085 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4086 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4087 
4088 #if 0
4089 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4090 #endif
4091 
4092 	/* Kick TX ring. */
4093 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4094 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4095 
4096 	/* Mark TX ring as full if we reach a certain threshold. */
4097 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4098 		sc->qfullmsk |= 1 << ring->qid;
4099 	}
4100 
4101 	return 0;
4102 }
4103 
4104 #if 0
4105 /* not necessary? */
4106 int
4107 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4108 {
4109 	struct iwm_tx_path_flush_cmd flush_cmd = {
4110 		.queues_ctl = htole32(tfd_msk),
4111 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4112 	};
4113 	int err;
4114 
4115 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
4116 	    sync ? 0 : IWM_CMD_ASYNC,
4117 	    sizeof(flush_cmd), &flush_cmd);
4118 	if (err)
4119                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4120 	return err;
4121 }
4122 #endif
4123 
4124 void
4125 iwm_led_enable(struct iwm_softc *sc)
4126 {
4127 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4128 }
4129 
4130 void
4131 iwm_led_disable(struct iwm_softc *sc)
4132 {
4133 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4134 }
4135 
4136 int
4137 iwm_led_is_enabled(struct iwm_softc *sc)
4138 {
4139 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4140 }
4141 
4142 void
4143 iwm_led_blink_timeout(void *arg)
4144 {
4145 	struct iwm_softc *sc = arg;
4146 
4147 	if (iwm_led_is_enabled(sc))
4148 		iwm_led_disable(sc);
4149 	else
4150 		iwm_led_enable(sc);
4151 
4152 	timeout_add_msec(&sc->sc_led_blink_to, 200);
4153 }
4154 
4155 void
4156 iwm_led_blink_start(struct iwm_softc *sc)
4157 {
4158 	timeout_add(&sc->sc_led_blink_to, 0);
4159 }
4160 
4161 void
4162 iwm_led_blink_stop(struct iwm_softc *sc)
4163 {
4164 	timeout_del(&sc->sc_led_blink_to);
4165 	iwm_led_disable(sc);
4166 }
4167 
4168 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4169 
4170 int
4171 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4172     struct iwm_beacon_filter_cmd *cmd)
4173 {
4174 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4175 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4176 }
4177 
4178 void
4179 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4180     struct iwm_beacon_filter_cmd *cmd)
4181 {
4182 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4183 }
4184 
4185 int
4186 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4187 {
4188 	struct iwm_beacon_filter_cmd cmd = {
4189 		IWM_BF_CMD_CONFIG_DEFAULTS,
4190 		.bf_enable_beacon_filter = htole32(1),
4191 		.ba_enable_beacon_abort = htole32(enable),
4192 	};
4193 
4194 	if (!sc->sc_bf.bf_enabled)
4195 		return 0;
4196 
4197 	sc->sc_bf.ba_enabled = enable;
4198 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4199 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4200 }
4201 
4202 void
4203 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4204     struct iwm_mac_power_cmd *cmd)
4205 {
4206 	struct ieee80211com *ic = &sc->sc_ic;
4207 	struct ieee80211_node *ni = &in->in_ni;
4208 	int dtimper, dtimper_msec;
4209 	int keep_alive;
4210 
4211 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4212 	    in->in_color));
4213 	dtimper = ic->ic_dtim_period ?: 1;
4214 
4215 	/*
4216 	 * Regardless of power management state the driver must set
4217 	 * keep alive period. FW will use it for sending keep alive NDPs
4218 	 * immediately after association. Check that keep alive period
4219 	 * is at least 3 * DTIM.
4220 	 */
4221 	dtimper_msec = dtimper * ni->ni_intval;
4222 	keep_alive
4223 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4224 	keep_alive = roundup(keep_alive, 1000) / 1000;
4225 	cmd->keep_alive_seconds = htole16(keep_alive);
4226 }
4227 
4228 int
4229 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4230 {
4231 	int err;
4232 	int ba_enable;
4233 	struct iwm_mac_power_cmd cmd;
4234 
4235 	memset(&cmd, 0, sizeof(cmd));
4236 
4237 	iwm_power_build_cmd(sc, in, &cmd);
4238 
4239 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4240 	    sizeof(cmd), &cmd);
4241 	if (err != 0)
4242 		return err;
4243 
4244 	ba_enable = !!(cmd.flags &
4245 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4246 	return iwm_update_beacon_abort(sc, in, ba_enable);
4247 }
4248 
4249 int
4250 iwm_power_update_device(struct iwm_softc *sc)
4251 {
4252 	struct iwm_device_power_cmd cmd = {
4253 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4254 	};
4255 
4256 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4257 		return 0;
4258 
4259 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4260 
4261 	return iwm_send_cmd_pdu(sc,
4262 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4263 }
4264 
4265 int
4266 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4267 {
4268 	struct iwm_beacon_filter_cmd cmd = {
4269 		IWM_BF_CMD_CONFIG_DEFAULTS,
4270 		.bf_enable_beacon_filter = htole32(1),
4271 	};
4272 	int err;
4273 
4274 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4275 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4276 
4277 	if (err == 0)
4278 		sc->sc_bf.bf_enabled = 1;
4279 
4280 	return err;
4281 }
4282 
4283 int
4284 iwm_disable_beacon_filter(struct iwm_softc *sc)
4285 {
4286 	struct iwm_beacon_filter_cmd cmd;
4287 	int err;
4288 
4289 	memset(&cmd, 0, sizeof(cmd));
4290 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4291 		return 0;
4292 
4293 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4294 	if (err == 0)
4295 		sc->sc_bf.bf_enabled = 0;
4296 
4297 	return err;
4298 }
4299 
4300 int
4301 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4302 {
4303 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
4304 	int err;
4305 	uint32_t status;
4306 	struct ieee80211com *ic = &sc->sc_ic;
4307 
4308 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4309 
4310 	add_sta_cmd.sta_id = IWM_STATION_ID;
4311 	add_sta_cmd.mac_id_n_color
4312 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4313 	if (!update) {
4314 		int ac;
4315 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
4316 			add_sta_cmd.tfd_queue_msk |=
4317 			    htole32(1 << iwm_ac_to_tx_fifo[ac]);
4318 		}
4319 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4320 	}
4321 	add_sta_cmd.add_modify = update ? 1 : 0;
4322 	add_sta_cmd.station_flags_msk
4323 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4324 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4325 	if (update)
4326 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4327 
4328 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4329 		add_sta_cmd.station_flags_msk
4330 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4331 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4332 
4333 		add_sta_cmd.station_flags
4334 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4335 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4336 		case IEEE80211_AMPDU_PARAM_SS_2:
4337 			add_sta_cmd.station_flags
4338 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4339 			break;
4340 		case IEEE80211_AMPDU_PARAM_SS_4:
4341 			add_sta_cmd.station_flags
4342 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4343 			break;
4344 		case IEEE80211_AMPDU_PARAM_SS_8:
4345 			add_sta_cmd.station_flags
4346 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4347 			break;
4348 		case IEEE80211_AMPDU_PARAM_SS_16:
4349 			add_sta_cmd.station_flags
4350 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4351 			break;
4352 		default:
4353 			break;
4354 		}
4355 	}
4356 
4357 	status = IWM_ADD_STA_SUCCESS;
4358 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4359 	    &add_sta_cmd, &status);
4360 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4361 		err = EIO;
4362 
4363 	return err;
4364 }
4365 
4366 int
4367 iwm_add_aux_sta(struct iwm_softc *sc)
4368 {
4369 	struct iwm_add_sta_cmd_v7 cmd;
4370 	int err;
4371 	uint32_t status;
4372 
4373 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4374 	if (err)
4375 		return err;
4376 
4377 	memset(&cmd, 0, sizeof(cmd));
4378 	cmd.sta_id = IWM_AUX_STA_ID;
4379 	cmd.mac_id_n_color =
4380 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4381 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4382 	cmd.tid_disable_tx = htole16(0xffff);
4383 
4384 	status = IWM_ADD_STA_SUCCESS;
4385 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4386 	    &status);
4387 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4388 		err = EIO;
4389 
4390 	return err;
4391 }
4392 
4393 #define IWM_PLCP_QUIET_THRESH 1
4394 #define IWM_ACTIVE_QUIET_TIME 10
4395 #define LONG_OUT_TIME_PERIOD 600
4396 #define SHORT_OUT_TIME_PERIOD 200
4397 #define SUSPEND_TIME_PERIOD 100
4398 
4399 uint16_t
4400 iwm_scan_rx_chain(struct iwm_softc *sc)
4401 {
4402 	uint16_t rx_chain;
4403 	uint8_t rx_ant;
4404 
4405 	rx_ant = iwm_fw_valid_rx_ant(sc);
4406 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4407 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4408 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4409 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4410 	return htole16(rx_chain);
4411 }
4412 
4413 #define ieee80211_tu_to_usec(a) (1024*(a))
4414 
4415 uint32_t
4416 iwm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4417 {
4418 	if (!is_assoc)
4419 		return 0;
4420 	if (flags & 0x1)
4421 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4422 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4423 }
4424 
4425 uint32_t
4426 iwm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4427 {
4428 	if (!is_assoc)
4429 		return 0;
4430 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4431 }
4432 
4433 uint32_t
4434 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4435 {
4436 	uint32_t tx_ant;
4437 	int i, ind;
4438 
4439 	for (i = 0, ind = sc->sc_scan_last_antenna;
4440 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4441 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4442 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4443 			sc->sc_scan_last_antenna = ind;
4444 			break;
4445 		}
4446 	}
4447 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4448 
4449 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4450 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4451 				   tx_ant);
4452 	else
4453 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
4454 }
4455 
4456 /*
4457  * If req->n_ssids > 0, it means we should do an active scan.
4458  * In case of active scan w/o directed scan, we receive a zero-length SSID
4459  * just to notify that this scan is active and not passive.
4460  * In order to notify the FW of the number of SSIDs we wish to scan (including
4461  * the zero-length one), we need to set the corresponding bits in chan->type,
4462  * one for each SSID, and set the active bit (first). If the first SSID is
4463  * already included in the probe template, so we need to set only
4464  * req->n_ssids - 1 bits in addition to the first bit.
4465  */
4466 uint16_t
4467 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4468 {
4469 	if (flags & IEEE80211_CHAN_2GHZ)
4470 		return 30  + 3 * (n_ssids + 1);
4471 	return 20  + 2 * (n_ssids + 1);
4472 }
4473 
4474 uint16_t
4475 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4476 {
4477 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4478 }
4479 
4480 uint8_t
4481 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4482     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4483 {
4484 	struct ieee80211com *ic = &sc->sc_ic;
4485 	struct ieee80211_channel *c;
4486 	uint8_t nchan;
4487 
4488 	for (nchan = 0, c = &ic->ic_channels[1];
4489 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4490 	    nchan < sc->sc_capa_n_scan_channels;
4491 	    c++) {
4492 		if (c->ic_flags == 0)
4493 			continue;
4494 
4495 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4496 		chan->iter_count = htole16(1);
4497 		chan->iter_interval = 0;
4498 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4499 #if 0 /* makes scanning while associated less useful */
4500 		if (n_ssids != 0)
4501 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
4502 #endif
4503 		chan++;
4504 		nchan++;
4505 	}
4506 
4507 	return nchan;
4508 }
4509 
4510 uint8_t
4511 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4512     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4513 {
4514 	struct ieee80211com *ic = &sc->sc_ic;
4515 	struct ieee80211_channel *c;
4516 	uint8_t nchan;
4517 
4518 	for (nchan = 0, c = &ic->ic_channels[1];
4519 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4520 	    nchan < sc->sc_capa_n_scan_channels;
4521 	    c++) {
4522 		if (c->ic_flags == 0)
4523 			continue;
4524 
4525 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4526 		chan->iter_count = 1;
4527 		chan->iter_interval = htole16(0);
4528 #if 0 /* makes scanning while associated less useful */
4529 		if (n_ssids != 0)
4530 			chan->flags = htole32(1 << 0); /* select SSID 0 */
4531 #endif
4532 		chan++;
4533 		nchan++;
4534 	}
4535 
4536 	return nchan;
4537 }
4538 
4539 int
4540 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4541 {
4542 	struct ieee80211com *ic = &sc->sc_ic;
4543 	struct ifnet *ifp = IC2IFP(ic);
4544 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4545 	struct ieee80211_rateset *rs;
4546 	size_t remain = sizeof(preq->buf);
4547 	uint8_t *frm, *pos;
4548 
4549 	memset(preq, 0, sizeof(*preq));
4550 
4551 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4552 		return ENOBUFS;
4553 
4554 	/*
4555 	 * Build a probe request frame.  Most of the following code is a
4556 	 * copy & paste of what is done in net80211.
4557 	 */
4558 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4559 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4560 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4561 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
4562 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4563 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4564 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4565 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4566 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4567 
4568 	frm = (uint8_t *)(wh + 1);
4569 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4570 
4571 	/* Tell the firmware where the MAC header is. */
4572 	preq->mac_header.offset = 0;
4573 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4574 	remain -= frm - (uint8_t *)wh;
4575 
4576 	/* Fill in 2GHz IEs and tell firmware where they are. */
4577 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4578 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4579 		if (remain < 4 + rs->rs_nrates)
4580 			return ENOBUFS;
4581 	} else if (remain < 2 + rs->rs_nrates)
4582 		return ENOBUFS;
4583 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4584 	pos = frm;
4585 	frm = ieee80211_add_rates(frm, rs);
4586 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4587 		frm = ieee80211_add_xrates(frm, rs);
4588 	preq->band_data[0].len = htole16(frm - pos);
4589 	remain -= frm - pos;
4590 
4591 	if (isset(sc->sc_enabled_capa,
4592 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4593 		if (remain < 3)
4594 			return ENOBUFS;
4595 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4596 		*frm++ = 1;
4597 		*frm++ = 0;
4598 		remain -= 3;
4599 	}
4600 
4601 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4602 		/* Fill in 5GHz IEs. */
4603 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4604 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4605 			if (remain < 4 + rs->rs_nrates)
4606 				return ENOBUFS;
4607 		} else if (remain < 2 + rs->rs_nrates)
4608 			return ENOBUFS;
4609 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4610 		pos = frm;
4611 		frm = ieee80211_add_rates(frm, rs);
4612 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4613 			frm = ieee80211_add_xrates(frm, rs);
4614 		preq->band_data[1].len = htole16(frm - pos);
4615 		remain -= frm - pos;
4616 	}
4617 
4618 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4619 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4620 	pos = frm;
4621 	if (ic->ic_flags & IEEE80211_F_HTON) {
4622 		if (remain < 28)
4623 			return ENOBUFS;
4624 		frm = ieee80211_add_htcaps(frm, ic);
4625 		/* XXX add WME info? */
4626 	}
4627 	preq->common_data.len = htole16(frm - pos);
4628 
4629 	return 0;
4630 }
4631 
4632 int
4633 iwm_lmac_scan(struct iwm_softc *sc)
4634 {
4635 	struct ieee80211com *ic = &sc->sc_ic;
4636 	struct iwm_host_cmd hcmd = {
4637 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4638 		.len = { 0, },
4639 		.data = { NULL, },
4640 		.flags = 0,
4641 	};
4642 	struct iwm_scan_req_lmac *req;
4643 	size_t req_len;
4644 	int err;
4645 
4646 	req_len = sizeof(struct iwm_scan_req_lmac) +
4647 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
4648 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4649 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4650 		return ENOMEM;
4651 	req = malloc(req_len, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4652 	if (req == NULL)
4653 		return ENOMEM;
4654 
4655 	hcmd.len[0] = (uint16_t)req_len;
4656 	hcmd.data[0] = (void *)req;
4657 
4658 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4659 	req->active_dwell = 10;
4660 	req->passive_dwell = 110;
4661 	req->fragmented_dwell = 44;
4662 	req->extended_dwell = 90;
4663 	req->max_out_time = 0;
4664 	req->suspend_time = 0;
4665 
4666 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4667 	req->rx_chain_select = iwm_scan_rx_chain(sc);
4668 	req->iter_num = htole32(1);
4669 	req->delay = 0;
4670 
4671 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4672 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4673 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4674 	if (ic->ic_des_esslen == 0)
4675 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4676 	else
4677 		req->scan_flags |=
4678 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4679 	if (isset(sc->sc_enabled_capa,
4680 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4681 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4682 
4683 	req->flags = htole32(IWM_PHY_BAND_24);
4684 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4685 		req->flags |= htole32(IWM_PHY_BAND_5);
4686 	req->filter_flags =
4687 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4688 
4689 	/* Tx flags 2 GHz. */
4690 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4691 	    IWM_TX_CMD_FLG_BT_DIS);
4692 	req->tx_cmd[0].rate_n_flags =
4693 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
4694 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
4695 
4696 	/* Tx flags 5 GHz. */
4697 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4698 	    IWM_TX_CMD_FLG_BT_DIS);
4699 	req->tx_cmd[1].rate_n_flags =
4700 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
4701 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
4702 
4703 	/* Check if we're doing an active directed scan. */
4704 	if (ic->ic_des_esslen != 0) {
4705 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4706 		req->direct_scan[0].len = ic->ic_des_esslen;
4707 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
4708 		    ic->ic_des_esslen);
4709 	}
4710 
4711 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
4712 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
4713 	    ic->ic_des_esslen != 0);
4714 
4715 	err = iwm_fill_probe_req(sc,
4716 			    (struct iwm_scan_probe_req *)(req->data +
4717 			    (sizeof(struct iwm_scan_channel_cfg_lmac) *
4718 			    sc->sc_capa_n_scan_channels)));
4719 	if (err) {
4720 		free(req, M_DEVBUF, req_len);
4721 		return err;
4722 	}
4723 
4724 	/* Specify the scan plan: We'll do one iteration. */
4725 	req->schedule[0].iterations = 1;
4726 	req->schedule[0].full_scan_mul = 1;
4727 
4728 	/* Disable EBS. */
4729 	req->channel_opt[0].non_ebs_ratio = 1;
4730 	req->channel_opt[1].non_ebs_ratio = 1;
4731 
4732 	err = iwm_send_cmd(sc, &hcmd);
4733 	free(req, M_DEVBUF, req_len);
4734 	return err;
4735 }
4736 
4737 int
4738 iwm_config_umac_scan(struct iwm_softc *sc)
4739 {
4740 	struct ieee80211com *ic = &sc->sc_ic;
4741 	struct iwm_scan_config *scan_config;
4742 	int err, nchan;
4743 	size_t cmd_size;
4744 	struct ieee80211_channel *c;
4745 	struct iwm_host_cmd hcmd = {
4746 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
4747 		.flags = 0,
4748 	};
4749 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
4750 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
4751 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
4752 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
4753 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
4754 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
4755 	    IWM_SCAN_CONFIG_RATE_54M);
4756 
4757 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4758 
4759 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4760 	if (scan_config == NULL)
4761 		return ENOMEM;
4762 
4763 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
4764 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
4765 	scan_config->legacy_rates = htole32(rates |
4766 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
4767 
4768 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4769 	scan_config->dwell_active = 10;
4770 	scan_config->dwell_passive = 110;
4771 	scan_config->dwell_fragmented = 44;
4772 	scan_config->dwell_extended = 90;
4773 	scan_config->out_of_channel_time = htole32(0);
4774 	scan_config->suspend_time = htole32(0);
4775 
4776 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
4777 
4778 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
4779 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
4780 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
4781 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
4782 
4783 	for (c = &ic->ic_channels[1], nchan = 0;
4784 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4785 	    nchan < sc->sc_capa_n_scan_channels; c++) {
4786 		if (c->ic_flags == 0)
4787 			continue;
4788 		scan_config->channel_array[nchan++] =
4789 		    ieee80211_mhz2ieee(c->ic_freq, 0);
4790 	}
4791 
4792 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
4793 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
4794 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
4795 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
4796 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
4797 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
4798 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
4799 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
4800 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
4801 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
4802 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
4803 
4804 	hcmd.data[0] = scan_config;
4805 	hcmd.len[0] = cmd_size;
4806 
4807 	err = iwm_send_cmd(sc, &hcmd);
4808 	free(scan_config, M_DEVBUF, cmd_size);
4809 	return err;
4810 }
4811 
4812 int
4813 iwm_umac_scan(struct iwm_softc *sc)
4814 {
4815 	struct ieee80211com *ic = &sc->sc_ic;
4816 	struct iwm_host_cmd hcmd = {
4817 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
4818 		.len = { 0, },
4819 		.data = { NULL, },
4820 		.flags =0,
4821 	};
4822 	struct iwm_scan_req_umac *req;
4823 	struct iwm_scan_req_umac_tail *tail;
4824 	size_t req_len;
4825 	int err;
4826 
4827 	req_len = sizeof(struct iwm_scan_req_umac) +
4828 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
4829 	    sc->sc_capa_n_scan_channels) +
4830 	    sizeof(struct iwm_scan_req_umac_tail);
4831 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4832 		return ENOMEM;
4833 	req = malloc(req_len, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4834 	if (req == NULL)
4835 		return ENOMEM;
4836 
4837 	hcmd.len[0] = (uint16_t)req_len;
4838 	hcmd.data[0] = (void *)req;
4839 
4840 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4841 	req->active_dwell = 10;
4842 	req->passive_dwell = 110;
4843 	req->fragmented_dwell = 44;
4844 	req->extended_dwell = 90;
4845 	req->max_out_time = 0;
4846 	req->suspend_time = 0;
4847 
4848 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
4849 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
4850 
4851 	req->n_channels = iwm_umac_scan_fill_channels(sc,
4852 	    (struct iwm_scan_channel_cfg_umac *)req->data,
4853 	    ic->ic_des_esslen != 0);
4854 
4855 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
4856 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
4857 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
4858 
4859 	tail = (void *)&req->data +
4860 		sizeof(struct iwm_scan_channel_cfg_umac) *
4861 			sc->sc_capa_n_scan_channels;
4862 
4863 	/* Check if we're doing an active directed scan. */
4864 	if (ic->ic_des_esslen != 0) {
4865 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4866 		tail->direct_scan[0].len = ic->ic_des_esslen;
4867 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
4868 		    ic->ic_des_esslen);
4869 		req->general_flags |=
4870 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
4871 	} else
4872 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
4873 
4874 	if (isset(sc->sc_enabled_capa,
4875 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4876 		req->general_flags |=
4877 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
4878 
4879 	err = iwm_fill_probe_req(sc, &tail->preq);
4880 	if (err) {
4881 		free(req, M_DEVBUF, req_len);
4882 		return err;
4883 	}
4884 
4885 	/* Specify the scan plan: We'll do one iteration. */
4886 	tail->schedule[0].interval = 0;
4887 	tail->schedule[0].iter_count = 1;
4888 
4889 	err = iwm_send_cmd(sc, &hcmd);
4890 	free(req, M_DEVBUF, req_len);
4891 	return err;
4892 }
4893 
4894 void
4895 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
4896     int *ofdm_rates)
4897 {
4898 	struct ieee80211_node *ni = &in->in_ni;
4899 	int lowest_present_ofdm = 100;
4900 	int lowest_present_cck = 100;
4901 	uint8_t cck = 0;
4902 	uint8_t ofdm = 0;
4903 	int i;
4904 
4905 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
4906 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4907 		for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4908 			cck |= (1 << i);
4909 			if (lowest_present_cck > i)
4910 				lowest_present_cck = i;
4911 		}
4912 	}
4913 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4914 		int adj = i - IWM_FIRST_OFDM_RATE;
4915 		ofdm |= (1 << adj);
4916 		if (lowest_present_ofdm > i)
4917 			lowest_present_ofdm = i;
4918 	}
4919 
4920 	/*
4921 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
4922 	 * variables. This isn't sufficient though, as there might not
4923 	 * be all the right rates in the bitmap. E.g. if the only basic
4924 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4925 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4926 	 *
4927 	 *    [...] a STA responding to a received frame shall transmit
4928 	 *    its Control Response frame [...] at the highest rate in the
4929 	 *    BSSBasicRateSet parameter that is less than or equal to the
4930 	 *    rate of the immediately previous frame in the frame exchange
4931 	 *    sequence ([...]) and that is of the same modulation class
4932 	 *    ([...]) as the received frame. If no rate contained in the
4933 	 *    BSSBasicRateSet parameter meets these conditions, then the
4934 	 *    control frame sent in response to a received frame shall be
4935 	 *    transmitted at the highest mandatory rate of the PHY that is
4936 	 *    less than or equal to the rate of the received frame, and
4937 	 *    that is of the same modulation class as the received frame.
4938 	 *
4939 	 * As a consequence, we need to add all mandatory rates that are
4940 	 * lower than all of the basic rates to these bitmaps.
4941 	 */
4942 
4943 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4944 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4945 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4946 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4947 	/* 6M already there or needed so always add */
4948 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4949 
4950 	/*
4951 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4952 	 * Note, however:
4953 	 *  - if no CCK rates are basic, it must be ERP since there must
4954 	 *    be some basic rates at all, so they're OFDM => ERP PHY
4955 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
4956 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4957 	 *  - if 5.5M is basic, 1M and 2M are mandatory
4958 	 *  - if 2M is basic, 1M is mandatory
4959 	 *  - if 1M is basic, that's the only valid ACK rate.
4960 	 * As a consequence, it's not as complicated as it sounds, just add
4961 	 * any lower rates to the ACK rate bitmap.
4962 	 */
4963 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
4964 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4965 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
4966 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4967 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
4968 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4969 	/* 1M already there or needed so always add */
4970 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4971 
4972 	*cck_rates = cck;
4973 	*ofdm_rates = ofdm;
4974 }
4975 
4976 void
4977 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4978     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4979 {
4980 	struct ieee80211com *ic = &sc->sc_ic;
4981 	struct ieee80211_node *ni = ic->ic_bss;
4982 	int cck_ack_rates, ofdm_ack_rates;
4983 	int i;
4984 
4985 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4986 	    in->in_color));
4987 	cmd->action = htole32(action);
4988 
4989 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4990 	cmd->tsf_id = htole32(in->in_tsfid);
4991 
4992 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4993 	if (in->in_assoc) {
4994 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4995 	} else {
4996 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
4997 	}
4998 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4999 	cmd->cck_rates = htole32(cck_ack_rates);
5000 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5001 
5002 	cmd->cck_short_preamble
5003 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5004 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5005 	cmd->short_slot
5006 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5007 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5008 
5009 	for (i = 0; i < IWM_AC_NUM+1; i++) {
5010 		int txf = i;
5011 
5012 		cmd->ac[txf].cw_min = htole16(0x0f);
5013 		cmd->ac[txf].cw_max = htole16(0x3f);
5014 		cmd->ac[txf].aifsn = 1;
5015 		cmd->ac[txf].fifos_mask = (1 << txf);
5016 		cmd->ac[txf].edca_txop = 0;
5017 	}
5018 
5019 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5020 		enum ieee80211_htprot htprot =
5021 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5022 		switch (htprot) {
5023 		case IEEE80211_HTPROT_NONE:
5024 			break;
5025 		case IEEE80211_HTPROT_NONMEMBER:
5026 		case IEEE80211_HTPROT_NONHT_MIXED:
5027 			cmd->protection_flags |=
5028 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5029 		case IEEE80211_HTPROT_20MHZ:
5030 			cmd->protection_flags |=
5031 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5032 			    IWM_MAC_PROT_FLG_FAT_PROT);
5033 			break;
5034 		default:
5035 			break;
5036 		}
5037 
5038 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5039 	}
5040 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5041 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5042 
5043 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5044 }
5045 
5046 void
5047 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5048     struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
5049 {
5050 	struct ieee80211_node *ni = &in->in_ni;
5051 	struct ieee80211com *ic = &sc->sc_ic;
5052 
5053 	ctxt_sta->is_assoc = htole32(0);
5054 	ctxt_sta->bi = htole32(ni->ni_intval);
5055 	ctxt_sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5056 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * ic->ic_dtim_period);
5057 	ctxt_sta->dtim_reciprocal =
5058 	    htole32(iwm_reciprocal(ni->ni_intval * ic->ic_dtim_period));
5059 
5060 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
5061 	ctxt_sta->listen_interval = htole32(10);
5062 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
5063 }
5064 
5065 int
5066 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
5067 {
5068 	struct iwm_mac_ctx_cmd cmd;
5069 
5070 	memset(&cmd, 0, sizeof(cmd));
5071 
5072 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
5073 
5074 	/* Allow beacons to pass through as long as we are not associated or we
5075 	 * do not have dtim period information */
5076 	if (!in->in_assoc || !sc->sc_ic.ic_dtim_period)
5077 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5078 	else
5079 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
5080 
5081 	/* Fill the data specific for station mode */
5082 	iwm_mac_ctxt_cmd_fill_sta(sc, in,
5083 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
5084 
5085 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd),
5086 	    &cmd);
5087 }
5088 
5089 int
5090 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5091 {
5092 	struct iwm_time_quota_cmd cmd;
5093 	int i, idx, num_active_macs, quota, quota_rem;
5094 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5095 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5096 	uint16_t id;
5097 
5098 	memset(&cmd, 0, sizeof(cmd));
5099 
5100 	/* currently, PHY ID == binding ID */
5101 	if (in) {
5102 		id = in->in_phyctxt->id;
5103 		KASSERT(id < IWM_MAX_BINDINGS);
5104 		colors[id] = in->in_phyctxt->color;
5105 
5106 		if (1)
5107 			n_ifs[id] = 1;
5108 	}
5109 
5110 	/*
5111 	 * The FW's scheduling session consists of
5112 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5113 	 * equally between all the bindings that require quota
5114 	 */
5115 	num_active_macs = 0;
5116 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5117 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5118 		num_active_macs += n_ifs[i];
5119 	}
5120 
5121 	quota = 0;
5122 	quota_rem = 0;
5123 	if (num_active_macs) {
5124 		quota = IWM_MAX_QUOTA / num_active_macs;
5125 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5126 	}
5127 
5128 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5129 		if (colors[i] < 0)
5130 			continue;
5131 
5132 		cmd.quotas[idx].id_and_color =
5133 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5134 
5135 		if (n_ifs[i] <= 0) {
5136 			cmd.quotas[idx].quota = htole32(0);
5137 			cmd.quotas[idx].max_duration = htole32(0);
5138 		} else {
5139 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5140 			cmd.quotas[idx].max_duration = htole32(0);
5141 		}
5142 		idx++;
5143 	}
5144 
5145 	/* Give the remainder of the session to the first binding */
5146 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5147 
5148 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
5149 	    sizeof(cmd), &cmd);
5150 }
5151 
5152 int
5153 iwm_auth(struct iwm_softc *sc)
5154 {
5155 	struct ieee80211com *ic = &sc->sc_ic;
5156 	struct iwm_node *in = (void *)ic->ic_bss;
5157 	uint32_t duration;
5158 	int err;
5159 
5160 	in->in_assoc = 0;
5161 
5162 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5163 	if (err)
5164 		return err;
5165 
5166 	err = iwm_allow_mcast(sc);
5167 	if (err)
5168 		return err;
5169 
5170 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5171 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5172 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5173 	if (err)
5174 		return err;
5175 	in->in_phyctxt = &sc->sc_phyctxt[0];
5176 
5177 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5178 	if (err)
5179 		return err;
5180 
5181 	err = iwm_add_sta_cmd(sc, in, 0);
5182 	if (err)
5183 		return err;
5184 
5185 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
5186 	if (err) {
5187 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5188 		return err;
5189 	}
5190 
5191 	/*
5192 	 * Prevent the FW from wandering off channel during association
5193 	 * by "protecting" the session with a time event.
5194 	 */
5195 	/* XXX duration is in units of TU, not MS */
5196 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
5197 	iwm_protect_session(sc, in, duration, 500 /* XXX magic number */);
5198 	DELAY(100);
5199 
5200 	return 0;
5201 }
5202 
5203 int
5204 iwm_assoc(struct iwm_softc *sc)
5205 {
5206 	struct ieee80211com *ic = &sc->sc_ic;
5207 	struct iwm_node *in = (void *)ic->ic_bss;
5208 	int err;
5209 
5210 	err = iwm_add_sta_cmd(sc, in, 1);
5211 	if (err)
5212 		return err;
5213 
5214 	in->in_assoc = 1;
5215 
5216 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
5217 	if (err) {
5218 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5219 		return err;
5220 	}
5221 
5222 	return 0;
5223 }
5224 
5225 struct ieee80211_node *
5226 iwm_node_alloc(struct ieee80211com *ic)
5227 {
5228 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5229 }
5230 
5231 void
5232 iwm_calib_timeout(void *arg)
5233 {
5234 	struct iwm_softc *sc = arg;
5235 	struct ieee80211com *ic = &sc->sc_ic;
5236 	struct iwm_node *in = (void *)ic->ic_bss;
5237 	struct ieee80211_node *ni = &in->in_ni;
5238 	int s, otxrate;
5239 
5240 	s = splnet();
5241 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
5242 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5243 		if (ni->ni_flags & IEEE80211_NODE_HT)
5244 			otxrate = ni->ni_txmcs;
5245 		else
5246 			otxrate = ni->ni_txrate;
5247 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5248 
5249 		/*
5250 		 * If AMRR has chosen a new TX rate we must update
5251 		 * the firwmare's LQ rate table from process context.
5252 		 */
5253 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5254 		    otxrate != ni->ni_txmcs)
5255 			task_add(systq, &sc->setrates_task);
5256 		else if (otxrate != ni->ni_txrate)
5257 			task_add(systq, &sc->setrates_task);
5258 	}
5259 	splx(s);
5260 
5261 	timeout_add_msec(&sc->sc_calib_to, 500);
5262 }
5263 
5264 void
5265 iwm_setrates_task(void *arg)
5266 {
5267 	struct iwm_softc *sc = arg;
5268 	struct ieee80211com *ic = &sc->sc_ic;
5269 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5270 
5271 	/* Update rates table based on new TX rate determined by AMRR. */
5272 	iwm_setrates(in);
5273 }
5274 
5275 void
5276 iwm_setrates(struct iwm_node *in)
5277 {
5278 	struct ieee80211_node *ni = &in->in_ni;
5279 	struct ieee80211com *ic = ni->ni_ic;
5280 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5281 	struct iwm_lq_cmd *lq = &in->in_lq;
5282 	struct ieee80211_rateset *rs = &ni->ni_rates;
5283 	int i, ridx, ridx_min, j, sgi_ok, tab = 0;
5284 	struct iwm_host_cmd cmd = {
5285 		.id = IWM_LQ_CMD,
5286 		.len = { sizeof(in->in_lq), },
5287 		.flags = 0,
5288 	};
5289 
5290 	memset(lq, 0, sizeof(*lq));
5291 	lq->sta_id = IWM_STATION_ID;
5292 
5293 	/* For HT, enable RTS/CTS, and SGI (if supported). */
5294 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5295 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5296 		sgi_ok = (ni->ni_htcaps & IEEE80211_HTCAP_SGI20);
5297 	} else
5298 		sgi_ok = 0;
5299 
5300 	/*
5301 	 * Fill the LQ rate selection table with legacy and/or HT rates
5302 	 * in descending order, i.e. with the node's current TX rate first.
5303 	 * In cases where throughput of an HT rate corresponds to a legacy
5304 	 * rate it makes no sense to add both. We rely on the fact that
5305 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
5306 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5307 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5308 	 */
5309 	j = 0;
5310 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5311 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
5312 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5313 		if (j >= nitems(lq->rs_table))
5314 			break;
5315 		tab = 0;
5316 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5317 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5318 			for (i = ni->ni_txmcs; i >= 0; i--) {
5319 				if (isclr(ni->ni_rxmcs, i))
5320 					continue;
5321 				if (ridx == iwm_mcs2ridx[i]) {
5322 					tab = iwm_rates[ridx].ht_plcp;
5323 					tab |= IWM_RATE_MCS_HT_MSK;
5324 					if (sgi_ok)
5325 						tab |= IWM_RATE_MCS_SGI_MSK;
5326 					break;
5327 				}
5328 			}
5329 		}
5330 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5331 			for (i = ni->ni_txrate; i >= 0; i--) {
5332 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5333 				    IEEE80211_RATE_VAL)) {
5334 					tab = iwm_rates[ridx].plcp;
5335 					break;
5336 				}
5337 			}
5338 		}
5339 
5340 		if (tab == 0)
5341 			continue;
5342 
5343 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
5344 		if (IWM_RIDX_IS_CCK(ridx))
5345 			tab |= IWM_RATE_MCS_CCK_MSK;
5346 		lq->rs_table[j++] = htole32(tab);
5347 	}
5348 
5349 	/* Fill the rest with the lowest possible rate */
5350 	i = j > 0 ? j - 1 : 0;
5351 	while (j < nitems(lq->rs_table))
5352 		lq->rs_table[j++] = lq->rs_table[i];
5353 
5354 	lq->single_stream_ant_msk = IWM_ANT_A;
5355 	lq->dual_stream_ant_msk = IWM_ANT_AB;
5356 
5357 	lq->agg_time_limit = htole16(4000);	/* 4ms */
5358 	lq->agg_disable_start_th = 3;
5359 #ifdef notyet
5360 	lq->agg_frame_cnt_limit = 0x3f;
5361 #else
5362 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5363 #endif
5364 
5365 	cmd.data[0] = &in->in_lq;
5366 	iwm_send_cmd(sc, &cmd);
5367 }
5368 
5369 int
5370 iwm_media_change(struct ifnet *ifp)
5371 {
5372 	struct iwm_softc *sc = ifp->if_softc;
5373 	struct ieee80211com *ic = &sc->sc_ic;
5374 	uint8_t rate, ridx;
5375 	int err;
5376 
5377 	err = ieee80211_media_change(ifp);
5378 	if (err != ENETRESET)
5379 		return err;
5380 
5381 	if (ic->ic_fixed_mcs != -1)
5382 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5383 	else if (ic->ic_fixed_rate != -1) {
5384 		rate = ic->ic_sup_rates[ic->ic_curmode].
5385 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5386 		/* Map 802.11 rate to HW rate index. */
5387 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5388 			if (iwm_rates[ridx].rate == rate)
5389 				break;
5390 		sc->sc_fixed_ridx = ridx;
5391 	}
5392 
5393 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5394 	    (IFF_UP | IFF_RUNNING)) {
5395 		iwm_stop(ifp, 0);
5396 		err = iwm_init(ifp);
5397 	}
5398 	return err;
5399 }
5400 
5401 void
5402 iwm_newstate_task(void *psc)
5403 {
5404 	struct iwm_softc *sc = (struct iwm_softc *)psc;
5405 	struct ieee80211com *ic = &sc->sc_ic;
5406 	enum ieee80211_state nstate = sc->ns_nstate;
5407 	enum ieee80211_state ostate = ic->ic_state;
5408 	struct iwm_node *in;
5409 	int arg = sc->ns_arg;
5410 	int err;
5411 
5412 	DPRINTF(("switching state %s->%s\n",
5413 	    ieee80211_state_name[ostate],
5414 	    ieee80211_state_name[nstate]));
5415 
5416 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5417 		iwm_led_blink_stop(sc);
5418 
5419 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
5420 		iwm_disable_beacon_filter(sc);
5421 
5422 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5423 	/* XXX Is there a way to switch states without a full reset? */
5424 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5425 		in = (struct iwm_node *)ic->ic_bss;
5426 		if (in)
5427 			in->in_assoc = 0;
5428 
5429 		iwm_stop_device(sc);
5430 		iwm_init_hw(sc);
5431 
5432 		/*
5433 		 * Upon receiving a deauth frame from AP the net80211 stack
5434 		 * puts the driver into AUTH state. This will fail with this
5435 		 * driver so bring the FSM from RUN to SCAN in this case.
5436 		 */
5437 		if (nstate == IEEE80211_S_SCAN ||
5438 		    nstate == IEEE80211_S_AUTH ||
5439 		    nstate == IEEE80211_S_ASSOC) {
5440 			/* Always pass arg as -1 since we can't Tx right now. */
5441 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5442 			nstate = IEEE80211_S_SCAN;
5443 		}
5444 	}
5445 
5446 	switch (nstate) {
5447 	case IEEE80211_S_INIT:
5448 		break;
5449 
5450 	case IEEE80211_S_SCAN:
5451 		if (ic->ic_state == nstate &&
5452 		    (sc->sc_flags & IWM_FLAG_SCANNING))
5453 			return;
5454 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5455 			err = iwm_umac_scan(sc);
5456 		else
5457 			err = iwm_lmac_scan(sc);
5458 		if (err) {
5459 			printf("%s: could not initiate scan\n", DEVNAME(sc));
5460 			return;
5461 		}
5462 		sc->sc_flags |= IWM_FLAG_SCANNING;
5463 		ic->ic_state = nstate;
5464 		iwm_led_blink_start(sc);
5465 		return;
5466 
5467 	case IEEE80211_S_AUTH:
5468 		err = iwm_auth(sc);
5469 		if (err)
5470 			return;
5471 
5472 		break;
5473 
5474 	case IEEE80211_S_ASSOC:
5475 		err = iwm_assoc(sc);
5476 		if (err)
5477 			return;
5478 		break;
5479 
5480 	case IEEE80211_S_RUN:
5481 		in = (struct iwm_node *)ic->ic_bss;
5482 		iwm_power_mac_update_mode(sc, in);
5483 #ifdef notyet
5484 		/*
5485 		 * Disabled for now. Default beacon filter settings
5486 		 * prevent net80211 from getting ERP and HT protection
5487 		 * updates from beacons.
5488 		 */
5489 		iwm_enable_beacon_filter(sc, in);
5490 #endif
5491 		iwm_update_quotas(sc, in);
5492 
5493 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5494 
5495 		/* Start at lowest available bit-rate, AMRR will raise. */
5496 		in->in_ni.ni_txrate = 0;
5497 		in->in_ni.ni_txmcs = 0;
5498 		iwm_setrates(in);
5499 
5500 		timeout_add_msec(&sc->sc_calib_to, 500);
5501 		iwm_led_enable(sc);
5502 		break;
5503 
5504 	default:
5505 		break;
5506 	}
5507 
5508 	sc->sc_newstate(ic, nstate, arg);
5509 }
5510 
5511 int
5512 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5513 {
5514 	struct ifnet *ifp = IC2IFP(ic);
5515 	struct iwm_softc *sc = ifp->if_softc;
5516 
5517 	timeout_del(&sc->sc_calib_to);
5518 
5519 	sc->ns_nstate = nstate;
5520 	sc->ns_arg = arg;
5521 
5522 	task_add(sc->sc_nswq, &sc->newstate_task);
5523 
5524 	return 0;
5525 }
5526 
5527 void
5528 iwm_endscan_cb(void *arg)
5529 {
5530 	struct iwm_softc *sc = arg;
5531 	struct ieee80211com *ic = &sc->sc_ic;
5532 
5533 	sc->sc_flags &= ~IWM_FLAG_SCANNING;
5534 	ieee80211_end_scan(&ic->ic_if);
5535 }
5536 
5537 /*
5538  * Aging and idle timeouts for the different possible scenarios
5539  * in default configuration
5540  */
5541 static const uint32_t
5542 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5543 	{
5544 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
5545 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
5546 	},
5547 	{
5548 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
5549 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
5550 	},
5551 	{
5552 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
5553 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
5554 	},
5555 	{
5556 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
5557 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
5558 	},
5559 	{
5560 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
5561 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
5562 	},
5563 };
5564 
5565 /*
5566  * Aging and idle timeouts for the different possible scenarios
5567  * in single BSS MAC configuration.
5568  */
5569 static const uint32_t
5570 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5571 	{
5572 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
5573 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
5574 	},
5575 	{
5576 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
5577 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
5578 	},
5579 	{
5580 		htole32(IWM_SF_MCAST_AGING_TIMER),
5581 		htole32(IWM_SF_MCAST_IDLE_TIMER)
5582 	},
5583 	{
5584 		htole32(IWM_SF_BA_AGING_TIMER),
5585 		htole32(IWM_SF_BA_IDLE_TIMER)
5586 	},
5587 	{
5588 		htole32(IWM_SF_TX_RE_AGING_TIMER),
5589 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
5590 	},
5591 };
5592 
5593 void
5594 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
5595     struct ieee80211_node *ni)
5596 {
5597 	int i, j, watermark;
5598 
5599 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
5600 
5601 	/*
5602 	 * If we are in association flow - check antenna configuration
5603 	 * capabilities of the AP station, and choose the watermark accordingly.
5604 	 */
5605 	if (ni) {
5606 		if (ni->ni_flags & IEEE80211_NODE_HT) {
5607 #ifdef notyet
5608 			if (ni->ni_rxmcs[2] != 0)
5609 				watermark = IWM_SF_W_MARK_MIMO3;
5610 			else if (ni->ni_rxmcs[1] != 0)
5611 				watermark = IWM_SF_W_MARK_MIMO2;
5612 			else
5613 #endif
5614 				watermark = IWM_SF_W_MARK_SISO;
5615 		} else {
5616 			watermark = IWM_SF_W_MARK_LEGACY;
5617 		}
5618 	/* default watermark value for unassociated mode. */
5619 	} else {
5620 		watermark = IWM_SF_W_MARK_MIMO2;
5621 	}
5622 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
5623 
5624 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
5625 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
5626 			sf_cmd->long_delay_timeouts[i][j] =
5627 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
5628 		}
5629 	}
5630 
5631 	if (ni) {
5632 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
5633 		       sizeof(iwm_sf_full_timeout));
5634 	} else {
5635 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
5636 		       sizeof(iwm_sf_full_timeout_def));
5637 	}
5638 
5639 }
5640 
5641 int
5642 iwm_sf_config(struct iwm_softc *sc, int new_state)
5643 {
5644 	struct ieee80211com *ic = &sc->sc_ic;
5645 	struct iwm_sf_cfg_cmd sf_cmd = {
5646 		.state = htole32(IWM_SF_FULL_ON),
5647 	};
5648 	int err = 0;
5649 
5650 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5651 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
5652 
5653 	switch (new_state) {
5654 	case IWM_SF_UNINIT:
5655 	case IWM_SF_INIT_OFF:
5656 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
5657 		break;
5658 	case IWM_SF_FULL_ON:
5659 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
5660 		break;
5661 	default:
5662 		return EINVAL;
5663 	}
5664 
5665 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
5666 				   sizeof(sf_cmd), &sf_cmd);
5667 	return err;
5668 }
5669 
5670 int
5671 iwm_send_bt_init_conf(struct iwm_softc *sc)
5672 {
5673 	struct iwm_bt_coex_cmd bt_cmd;
5674 
5675 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
5676 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
5677 
5678 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
5679 	    &bt_cmd);
5680 }
5681 
5682 int
5683 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
5684 {
5685 	struct iwm_mcc_update_cmd mcc_cmd;
5686 	struct iwm_host_cmd hcmd = {
5687 		.id = IWM_MCC_UPDATE_CMD,
5688 		.flags = IWM_CMD_WANT_SKB,
5689 		.data = { &mcc_cmd },
5690 	};
5691 	int err;
5692 	int resp_v2 = isset(sc->sc_enabled_capa,
5693 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
5694 
5695 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
5696 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
5697 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
5698 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
5699 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
5700 	else
5701 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
5702 
5703 	if (resp_v2)
5704 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
5705 	else
5706 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
5707 
5708 	err = iwm_send_cmd(sc, &hcmd);
5709 	if (err)
5710 		return err;
5711 
5712 	iwm_free_resp(sc, &hcmd);
5713 
5714 	return 0;
5715 }
5716 
5717 void
5718 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
5719 {
5720 	struct iwm_host_cmd cmd = {
5721 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
5722 		.len = { sizeof(uint32_t), },
5723 		.data = { &backoff, },
5724 	};
5725 
5726 	iwm_send_cmd(sc, &cmd);
5727 }
5728 
5729 int
5730 iwm_init_hw(struct iwm_softc *sc)
5731 {
5732 	struct ieee80211com *ic = &sc->sc_ic;
5733 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5734 	int err, i, ac;
5735 
5736 	err = iwm_preinit(sc);
5737 	if (err)
5738 		return err;
5739 
5740 	err = iwm_start_hw(sc);
5741 	if (err) {
5742 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
5743 		return err;
5744 	}
5745 
5746 	err = iwm_run_init_mvm_ucode(sc, 0);
5747 	if (err)
5748 		return err;
5749 
5750 	/* Should stop and start HW since INIT image just loaded. */
5751 	iwm_stop_device(sc);
5752 	err = iwm_start_hw(sc);
5753 	if (err) {
5754 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
5755 		return err;
5756 	}
5757 
5758 	/* Restart, this time with the regular firmware */
5759 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5760 	if (err) {
5761 		printf("%s: could not load firmware\n", DEVNAME(sc));
5762 		goto err;
5763 	}
5764 
5765 	err = iwm_send_bt_init_conf(sc);
5766 	if (err) {
5767 		printf("%s: could not init bt coex (error %d)\n",
5768 		    DEVNAME(sc), err);
5769 		goto err;
5770 	}
5771 
5772 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
5773 	if (err) {
5774 		printf("%s: could not init tx ant config (error %d)\n",
5775 		    DEVNAME(sc), err);
5776 		goto err;
5777 	}
5778 
5779 	err = iwm_send_phy_db_data(sc);
5780 	if (err) {
5781 		printf("%s: could not init phy db (error %d)\n",
5782 		    DEVNAME(sc), err);
5783 		goto err;
5784 	}
5785 
5786 	err = iwm_send_phy_cfg_cmd(sc);
5787 	if (err) {
5788 		printf("%s: could not send phy config (error %d)\n",
5789 		    DEVNAME(sc), err);
5790 		goto err;
5791 	}
5792 
5793 	/* Add auxiliary station for scanning */
5794 	err = iwm_add_aux_sta(sc);
5795 	if (err) {
5796 		printf("%s: could not add aux station (error %d)\n",
5797 		    DEVNAME(sc), err);
5798 		goto err;
5799 	}
5800 
5801 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5802 		/*
5803 		 * The channel used here isn't relevant as it's
5804 		 * going to be overwritten in the other flows.
5805 		 * For now use the first channel we have.
5806 		 */
5807 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
5808 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
5809 		    IWM_FW_CTXT_ACTION_ADD, 0);
5810 		if (err) {
5811 			printf("%s: could not add phy context %d (error %d)\n",
5812 			    DEVNAME(sc), i, err);
5813 			goto err;
5814 		}
5815 	}
5816 
5817 	/* Initialize tx backoffs to the minimum. */
5818 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
5819 		iwm_tt_tx_backoff(sc, 0);
5820 
5821 	err = iwm_power_update_device(sc);
5822 	if (err) {
5823 		printf("%s: could send power update command (error %d)\n",
5824 		    DEVNAME(sc), err);
5825 		goto err;
5826 	}
5827 
5828 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
5829 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
5830 		if (err) {
5831 			printf("%s: could not init LAR (error %d)\n",
5832 			    DEVNAME(sc), err);
5833 			goto err;
5834 		}
5835 	}
5836 
5837 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
5838 		err = iwm_config_umac_scan(sc);
5839 		if (err) {
5840 			printf("%s: could not configure scan (error %d)\n",
5841 			    DEVNAME(sc), err);
5842 			goto err;
5843 		}
5844 	}
5845 
5846 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5847 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
5848 		    iwm_ac_to_tx_fifo[ac]);
5849 		if (err) {
5850 			printf("%s: could not enable Tx queue %d (error %d)\n",
5851 			    DEVNAME(sc), ac, err);
5852 			goto err;
5853 		}
5854 	}
5855 
5856 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5857 	if (err) {
5858 		printf("%s: could not add MAC context (error %d)\n",
5859 		    DEVNAME(sc), err);
5860 		goto err;
5861  	}
5862 
5863 	err = iwm_disable_beacon_filter(sc);
5864 	if (err) {
5865 		printf("%s: could not disable beacon filter (error %d)\n",
5866 		    DEVNAME(sc), err);
5867 		goto err;
5868 	}
5869 
5870 	return 0;
5871 
5872  err:
5873 	iwm_stop_device(sc);
5874 	return err;
5875 }
5876 
5877 /* Allow multicast from our BSSID. */
5878 int
5879 iwm_allow_mcast(struct iwm_softc *sc)
5880 {
5881 	struct ieee80211com *ic = &sc->sc_ic;
5882 	struct ieee80211_node *ni = ic->ic_bss;
5883 	struct iwm_mcast_filter_cmd *cmd;
5884 	size_t size;
5885 	int err;
5886 
5887 	size = roundup(sizeof(*cmd), 4);
5888 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
5889 	if (cmd == NULL)
5890 		return ENOMEM;
5891 	cmd->filter_own = 1;
5892 	cmd->port_id = 0;
5893 	cmd->count = 0;
5894 	cmd->pass_all = 1;
5895 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5896 
5897 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5898 	    0, size, cmd);
5899 	free(cmd, M_DEVBUF, size);
5900 	return err;
5901 }
5902 
5903 int
5904 iwm_init(struct ifnet *ifp)
5905 {
5906 	struct iwm_softc *sc = ifp->if_softc;
5907 	int err;
5908 
5909 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5910 		return 0;
5911 	}
5912 	sc->sc_generation++;
5913 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5914 
5915 	err = iwm_init_hw(sc);
5916 	if (err) {
5917 		iwm_stop(ifp, 1);
5918 		return err;
5919 	}
5920 
5921 	ifq_clr_oactive(&ifp->if_snd);
5922 	ifp->if_flags |= IFF_RUNNING;
5923 
5924 	ieee80211_begin_scan(ifp);
5925 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5926 
5927 	return 0;
5928 }
5929 
5930 void
5931 iwm_start(struct ifnet *ifp)
5932 {
5933 	struct iwm_softc *sc = ifp->if_softc;
5934 	struct ieee80211com *ic = &sc->sc_ic;
5935 	struct ieee80211_node *ni;
5936 	struct ether_header *eh;
5937 	struct mbuf *m;
5938 	int ac = EDCA_AC_BE; /* XXX */
5939 
5940 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
5941 		return;
5942 
5943 	for (;;) {
5944 		/* why isn't this done per-queue? */
5945 		if (sc->qfullmsk != 0) {
5946 			ifq_set_oactive(&ifp->if_snd);
5947 			break;
5948 		}
5949 
5950 		/* need to send management frames even if we're not RUNning */
5951 		m = mq_dequeue(&ic->ic_mgtq);
5952 		if (m) {
5953 			ni = m->m_pkthdr.ph_cookie;
5954 			goto sendit;
5955 		}
5956 		if (ic->ic_state != IEEE80211_S_RUN) {
5957 			break;
5958 		}
5959 
5960 		IFQ_DEQUEUE(&ifp->if_snd, m);
5961 		if (!m)
5962 			break;
5963 		if (m->m_len < sizeof (*eh) &&
5964 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
5965 			ifp->if_oerrors++;
5966 			continue;
5967 		}
5968 #if NBPFILTER > 0
5969 		if (ifp->if_bpf != NULL)
5970 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
5971 #endif
5972 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
5973 			ifp->if_oerrors++;
5974 			continue;
5975 		}
5976 
5977  sendit:
5978 #if NBPFILTER > 0
5979 		if (ic->ic_rawbpf != NULL)
5980 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
5981 #endif
5982 		if (iwm_tx(sc, m, ni, ac) != 0) {
5983 			ieee80211_release_node(ic, ni);
5984 			ifp->if_oerrors++;
5985 			continue;
5986 		}
5987 
5988 		if (ifp->if_flags & IFF_UP) {
5989 			sc->sc_tx_timer = 15;
5990 			ifp->if_timer = 1;
5991 		}
5992 	}
5993 
5994 	return;
5995 }
5996 
5997 void
5998 iwm_stop(struct ifnet *ifp, int disable)
5999 {
6000 	struct iwm_softc *sc = ifp->if_softc;
6001 	struct ieee80211com *ic = &sc->sc_ic;
6002 	struct iwm_node *in = (void *)ic->ic_bss;
6003 
6004 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6005 	sc->sc_flags |= IWM_FLAG_STOPPED;
6006 	sc->sc_generation++;
6007 	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
6008 	ifp->if_flags &= ~IFF_RUNNING;
6009 	ifq_clr_oactive(&ifp->if_snd);
6010 
6011 	in->in_phyctxt = NULL;
6012 	in->in_assoc = 0;
6013 
6014 	task_del(systq, &sc->init_task);
6015 	task_del(sc->sc_nswq, &sc->newstate_task);
6016 	task_del(sc->sc_eswq, &sc->sc_eswk);
6017 	task_del(systq, &sc->setrates_task);
6018 	task_del(systq, &sc->ba_task);
6019 	task_del(systq, &sc->htprot_task);
6020 
6021 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6022 
6023 	timeout_del(&sc->sc_calib_to);
6024 	iwm_led_blink_stop(sc);
6025 	ifp->if_timer = sc->sc_tx_timer = 0;
6026 	iwm_stop_device(sc);
6027 }
6028 
6029 void
6030 iwm_watchdog(struct ifnet *ifp)
6031 {
6032 	struct iwm_softc *sc = ifp->if_softc;
6033 
6034 	ifp->if_timer = 0;
6035 	if (sc->sc_tx_timer > 0) {
6036 		if (--sc->sc_tx_timer == 0) {
6037 			printf("%s: device timeout\n", DEVNAME(sc));
6038 #ifdef IWM_DEBUG
6039 			iwm_nic_error(sc);
6040 #endif
6041 			task_add(systq, &sc->init_task);
6042 			ifp->if_oerrors++;
6043 			return;
6044 		}
6045 		ifp->if_timer = 1;
6046 	}
6047 
6048 	ieee80211_watchdog(ifp);
6049 }
6050 
6051 int
6052 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6053 {
6054 	struct iwm_softc *sc = ifp->if_softc;
6055 	struct ieee80211com *ic = &sc->sc_ic;
6056 	struct ifreq *ifr;
6057 	int s, err = 0;
6058 
6059 
6060 	/*
6061 	 * Prevent processes from entering this function while another
6062 	 * process is tsleep'ing in it.
6063 	 */
6064 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
6065 	if (err)
6066 		return err;
6067 
6068 	s = splnet();
6069 
6070 	switch (cmd) {
6071 	case SIOCSIFADDR:
6072 		ifp->if_flags |= IFF_UP;
6073 		/* FALLTHROUGH */
6074 	case SIOCSIFFLAGS:
6075 		if (ifp->if_flags & IFF_UP) {
6076 			if (!(ifp->if_flags & IFF_RUNNING)) {
6077 				err = iwm_init(ifp);
6078 				if (err)
6079 					ifp->if_flags &= ~IFF_UP;
6080 			}
6081 		} else {
6082 			if (ifp->if_flags & IFF_RUNNING)
6083 				iwm_stop(ifp, 1);
6084 		}
6085 		break;
6086 
6087 	case SIOCADDMULTI:
6088 	case SIOCDELMULTI:
6089 		ifr = (struct ifreq *)data;
6090 		err = (cmd == SIOCADDMULTI) ?
6091 		    ether_addmulti(ifr, &ic->ic_ac) :
6092 		    ether_delmulti(ifr, &ic->ic_ac);
6093 		if (err == ENETRESET)
6094 			err = 0;
6095 		break;
6096 
6097 	default:
6098 		err = ieee80211_ioctl(ifp, cmd, data);
6099 	}
6100 
6101 	if (err == ENETRESET) {
6102 		err = 0;
6103 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6104 		    (IFF_UP | IFF_RUNNING)) {
6105 			iwm_stop(ifp, 0);
6106 			err = iwm_init(ifp);
6107 		}
6108 	}
6109 
6110 	splx(s);
6111 	rw_exit(&sc->ioctl_rwl);
6112 
6113 	return err;
6114 }
6115 
6116 #ifdef IWM_DEBUG
6117 /*
6118  * Note: This structure is read from the device with IO accesses,
6119  * and the reading already does the endian conversion. As it is
6120  * read with uint32_t-sized accesses, any members with a different size
6121  * need to be ordered correctly though!
6122  */
6123 struct iwm_error_event_table {
6124 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6125 	uint32_t error_id;		/* type of error */
6126 	uint32_t trm_hw_status0;	/* TRM HW status */
6127 	uint32_t trm_hw_status1;	/* TRM HW status */
6128 	uint32_t blink2;		/* branch link */
6129 	uint32_t ilink1;		/* interrupt link */
6130 	uint32_t ilink2;		/* interrupt link */
6131 	uint32_t data1;		/* error-specific data */
6132 	uint32_t data2;		/* error-specific data */
6133 	uint32_t data3;		/* error-specific data */
6134 	uint32_t bcon_time;		/* beacon timer */
6135 	uint32_t tsf_low;		/* network timestamp function timer */
6136 	uint32_t tsf_hi;		/* network timestamp function timer */
6137 	uint32_t gp1;		/* GP1 timer register */
6138 	uint32_t gp2;		/* GP2 timer register */
6139 	uint32_t fw_rev_type;	/* firmware revision type */
6140 	uint32_t major;		/* uCode version major */
6141 	uint32_t minor;		/* uCode version minor */
6142 	uint32_t hw_ver;		/* HW Silicon version */
6143 	uint32_t brd_ver;		/* HW board version */
6144 	uint32_t log_pc;		/* log program counter */
6145 	uint32_t frame_ptr;		/* frame pointer */
6146 	uint32_t stack_ptr;		/* stack pointer */
6147 	uint32_t hcmd;		/* last host command header */
6148 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6149 				 * rxtx_flag */
6150 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6151 				 * host_flag */
6152 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6153 				 * enc_flag */
6154 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6155 				 * time_flag */
6156 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6157 				 * wico interrupt */
6158 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6159 	uint32_t wait_event;		/* wait event() caller address */
6160 	uint32_t l2p_control;	/* L2pControlField */
6161 	uint32_t l2p_duration;	/* L2pDurationField */
6162 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6163 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6164 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6165 				 * (LMPM_PMG_SEL) */
6166 	uint32_t u_timestamp;	/* indicate when the date and time of the
6167 				 * compilation */
6168 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6169 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6170 
6171 /*
6172  * UMAC error struct - relevant starting from family 8000 chip.
6173  * Note: This structure is read from the device with IO accesses,
6174  * and the reading already does the endian conversion. As it is
6175  * read with u32-sized accesses, any members with a different size
6176  * need to be ordered correctly though!
6177  */
6178 struct iwm_umac_error_event_table {
6179 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6180 	uint32_t error_id;	/* type of error */
6181 	uint32_t blink1;	/* branch link */
6182 	uint32_t blink2;	/* branch link */
6183 	uint32_t ilink1;	/* interrupt link */
6184 	uint32_t ilink2;	/* interrupt link */
6185 	uint32_t data1;		/* error-specific data */
6186 	uint32_t data2;		/* error-specific data */
6187 	uint32_t data3;		/* error-specific data */
6188 	uint32_t umac_major;
6189 	uint32_t umac_minor;
6190 	uint32_t frame_pointer;	/* core register 27*/
6191 	uint32_t stack_pointer;	/* core register 28 */
6192 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6193 	uint32_t nic_isr_pref;	/* ISR status register */
6194 } __packed;
6195 
6196 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6197 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6198 
6199 void
6200 iwm_nic_umac_error(struct iwm_softc *sc)
6201 {
6202 	struct iwm_umac_error_event_table table;
6203 	uint32_t base;
6204 
6205 	base = sc->sc_uc.uc_umac_error_event_table;
6206 
6207 	if (base < 0x800000) {
6208 		printf("%s: Invalid error log pointer 0x%08x\n",
6209 		    DEVNAME(sc), base);
6210 		return;
6211 	}
6212 
6213 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6214 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6215 		return;
6216 	}
6217 
6218 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6219 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
6220 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6221 			sc->sc_flags, table.valid);
6222 	}
6223 
6224 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
6225 		iwm_desc_lookup(table.error_id));
6226 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
6227 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
6228 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
6229 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
6230 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
6231 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
6232 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
6233 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
6234 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
6235 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
6236 	    table.frame_pointer);
6237 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
6238 	    table.stack_pointer);
6239 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
6240 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
6241 	    table.nic_isr_pref);
6242 }
6243 
6244 struct {
6245 	const char *name;
6246 	uint8_t num;
6247 } advanced_lookup[] = {
6248 	{ "NMI_INTERRUPT_WDG", 0x34 },
6249 	{ "SYSASSERT", 0x35 },
6250 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6251 	{ "BAD_COMMAND", 0x38 },
6252 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6253 	{ "FATAL_ERROR", 0x3D },
6254 	{ "NMI_TRM_HW_ERR", 0x46 },
6255 	{ "NMI_INTERRUPT_TRM", 0x4C },
6256 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6257 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6258 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6259 	{ "NMI_INTERRUPT_HOST", 0x66 },
6260 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6261 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
6262 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6263 	{ "ADVANCED_SYSASSERT", 0 },
6264 };
6265 
6266 const char *
6267 iwm_desc_lookup(uint32_t num)
6268 {
6269 	int i;
6270 
6271 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
6272 		if (advanced_lookup[i].num == num)
6273 			return advanced_lookup[i].name;
6274 
6275 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6276 	return advanced_lookup[i].name;
6277 }
6278 
6279 /*
6280  * Support for dumping the error log seemed like a good idea ...
6281  * but it's mostly hex junk and the only sensible thing is the
6282  * hw/ucode revision (which we know anyway).  Since it's here,
6283  * I'll just leave it in, just in case e.g. the Intel guys want to
6284  * help us decipher some "ADVANCED_SYSASSERT" later.
6285  */
6286 void
6287 iwm_nic_error(struct iwm_softc *sc)
6288 {
6289 	struct iwm_error_event_table table;
6290 	uint32_t base;
6291 
6292 	printf("%s: dumping device error log\n", DEVNAME(sc));
6293 	base = sc->sc_uc.uc_error_event_table;
6294 	if (base < 0x800000) {
6295 		printf("%s: Invalid error log pointer 0x%08x\n",
6296 		    DEVNAME(sc), base);
6297 		return;
6298 	}
6299 
6300 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6301 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6302 		return;
6303 	}
6304 
6305 	if (!table.valid) {
6306 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
6307 		return;
6308 	}
6309 
6310 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6311 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
6312 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6313 		    sc->sc_flags, table.valid);
6314 	}
6315 
6316 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
6317 	    iwm_desc_lookup(table.error_id));
6318 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
6319 	    table.trm_hw_status0);
6320 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
6321 	    table.trm_hw_status1);
6322 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
6323 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
6324 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
6325 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
6326 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
6327 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
6328 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
6329 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
6330 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
6331 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
6332 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
6333 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
6334 	    table.fw_rev_type);
6335 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
6336 	    table.major);
6337 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
6338 	    table.minor);
6339 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
6340 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
6341 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
6342 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
6343 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
6344 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
6345 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
6346 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
6347 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
6348 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
6349 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
6350 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
6351 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
6352 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
6353 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
6354 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
6355 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
6356 
6357 	if (sc->sc_uc.uc_umac_error_event_table)
6358 		iwm_nic_umac_error(sc);
6359 }
6360 #endif
6361 
6362 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
6363 do {									\
6364 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6365 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
6366 	_var_ = (void *)((_pkt_)+1);					\
6367 } while (/*CONSTCOND*/0)
6368 
6369 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6370 do {									\
6371 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6372 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6373 	_ptr_ = (void *)((_pkt_)+1);					\
6374 } while (/*CONSTCOND*/0)
6375 
6376 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6377 
6378 void
6379 iwm_notif_intr(struct iwm_softc *sc)
6380 {
6381 	uint16_t hw;
6382 
6383 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6384 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6385 
6386 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6387 	while (sc->rxq.cur != hw) {
6388 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6389 		struct iwm_rx_packet *pkt;
6390 		struct iwm_cmd_response *cresp;
6391 		int qid, idx, code;
6392 
6393 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6394 		    BUS_DMASYNC_POSTREAD);
6395 		pkt = mtod(data->m, struct iwm_rx_packet *);
6396 
6397 		qid = pkt->hdr.qid & ~0x80;
6398 		idx = pkt->hdr.idx;
6399 
6400 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6401 
6402 		/*
6403 		 * randomly get these from the firmware, no idea why.
6404 		 * they at least seem harmless, so just ignore them for now
6405 		 */
6406 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6407 		    || pkt->len_n_flags == htole32(0x55550000))) {
6408 			ADVANCE_RXQ(sc);
6409 			continue;
6410 		}
6411 
6412 		switch (code) {
6413 		case IWM_REPLY_RX_PHY_CMD:
6414 			iwm_rx_rx_phy_cmd(sc, pkt, data);
6415 			break;
6416 
6417 		case IWM_REPLY_RX_MPDU_CMD:
6418 			iwm_rx_rx_mpdu(sc, pkt, data);
6419 			break;
6420 
6421 		case IWM_TX_CMD:
6422 			iwm_rx_tx_cmd(sc, pkt, data);
6423 			break;
6424 
6425 		case IWM_MISSED_BEACONS_NOTIFICATION:
6426 			/* OpenBSD does not provide ieee80211_beacon_miss() */
6427 			break;
6428 
6429 		case IWM_MFUART_LOAD_NOTIFICATION:
6430 			break;
6431 
6432 		case IWM_ALIVE: {
6433 			struct iwm_alive_resp_v1 *resp1;
6434 			struct iwm_alive_resp_v2 *resp2;
6435 			struct iwm_alive_resp_v3 *resp3;
6436 
6437 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6438 				SYNC_RESP_STRUCT(resp1, pkt);
6439 				sc->sc_uc.uc_error_event_table
6440 				    = le32toh(resp1->error_event_table_ptr);
6441 				sc->sc_uc.uc_log_event_table
6442 				    = le32toh(resp1->log_event_table_ptr);
6443 				sc->sched_base = le32toh(resp1->scd_base_ptr);
6444 				if (resp1->status == IWM_ALIVE_STATUS_OK)
6445 					sc->sc_uc.uc_ok = 1;
6446 				else
6447 					sc->sc_uc.uc_ok = 0;
6448 			}
6449 
6450 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6451 				SYNC_RESP_STRUCT(resp2, pkt);
6452 				sc->sc_uc.uc_error_event_table
6453 				    = le32toh(resp2->error_event_table_ptr);
6454 				sc->sc_uc.uc_log_event_table
6455 				    = le32toh(resp2->log_event_table_ptr);
6456 				sc->sched_base = le32toh(resp2->scd_base_ptr);
6457 				sc->sc_uc.uc_umac_error_event_table
6458 				    = le32toh(resp2->error_info_addr);
6459 				if (resp2->status == IWM_ALIVE_STATUS_OK)
6460 					sc->sc_uc.uc_ok = 1;
6461 				else
6462 					sc->sc_uc.uc_ok = 0;
6463 			}
6464 
6465 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
6466 				SYNC_RESP_STRUCT(resp3, pkt);
6467 				sc->sc_uc.uc_error_event_table
6468 				    = le32toh(resp3->error_event_table_ptr);
6469 				sc->sc_uc.uc_log_event_table
6470 				    = le32toh(resp3->log_event_table_ptr);
6471 				sc->sched_base = le32toh(resp3->scd_base_ptr);
6472 				sc->sc_uc.uc_umac_error_event_table
6473 				    = le32toh(resp3->error_info_addr);
6474 				if (resp3->status == IWM_ALIVE_STATUS_OK)
6475 					sc->sc_uc.uc_ok = 1;
6476 				else
6477 					sc->sc_uc.uc_ok = 0;
6478 			}
6479 
6480 			sc->sc_uc.uc_intr = 1;
6481 			wakeup(&sc->sc_uc);
6482 			break;
6483 		}
6484 
6485 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
6486 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
6487 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
6488 			iwm_phy_db_set_section(sc, phy_db_notif);
6489 			break;
6490 		}
6491 
6492 		case IWM_STATISTICS_NOTIFICATION: {
6493 			struct iwm_notif_statistics *stats;
6494 			SYNC_RESP_STRUCT(stats, pkt);
6495 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6496 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
6497 			break;
6498 		}
6499 
6500 		case IWM_NVM_ACCESS_CMD:
6501 		case IWM_MCC_UPDATE_CMD:
6502 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6503 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6504 				    sizeof(sc->sc_cmd_resp),
6505 				    BUS_DMASYNC_POSTREAD);
6506 				memcpy(sc->sc_cmd_resp,
6507 				    pkt, sizeof(sc->sc_cmd_resp));
6508 			}
6509 			break;
6510 
6511 		case IWM_MCC_CHUB_UPDATE_CMD: {
6512 			struct iwm_mcc_chub_notif *notif;
6513 			SYNC_RESP_STRUCT(notif, pkt);
6514 
6515 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
6516 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
6517 			sc->sc_fw_mcc[2] = '\0';
6518 		}
6519 
6520 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
6521 			break;
6522 
6523 		case IWM_PHY_CONFIGURATION_CMD:
6524 		case IWM_TX_ANT_CONFIGURATION_CMD:
6525 		case IWM_ADD_STA:
6526 		case IWM_MAC_CONTEXT_CMD:
6527 		case IWM_REPLY_SF_CFG_CMD:
6528 		case IWM_POWER_TABLE_CMD:
6529 		case IWM_PHY_CONTEXT_CMD:
6530 		case IWM_BINDING_CONTEXT_CMD:
6531 		case IWM_TIME_EVENT_CMD:
6532 		case IWM_SCAN_REQUEST_CMD:
6533 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
6534 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
6535 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
6536 		case IWM_REPLY_BEACON_FILTERING_CMD:
6537 		case IWM_MAC_PM_POWER_TABLE:
6538 		case IWM_TIME_QUOTA_CMD:
6539 		case IWM_REMOVE_STA:
6540 		case IWM_TXPATH_FLUSH:
6541 		case IWM_LQ_CMD:
6542 		case IWM_BT_CONFIG:
6543 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
6544 			SYNC_RESP_STRUCT(cresp, pkt);
6545 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6546 				memcpy(sc->sc_cmd_resp,
6547 				    pkt, sizeof(*pkt)+sizeof(*cresp));
6548 			}
6549 			break;
6550 
6551 		/* ignore */
6552 		case 0x6c: /* IWM_PHY_DB_CMD */
6553 			break;
6554 
6555 		case IWM_INIT_COMPLETE_NOTIF:
6556 			sc->sc_init_complete = 1;
6557 			wakeup(&sc->sc_init_complete);
6558 			break;
6559 
6560 		case IWM_SCAN_OFFLOAD_COMPLETE: {
6561 			struct iwm_periodic_scan_complete *notif;
6562 			SYNC_RESP_STRUCT(notif, pkt);
6563 			break;
6564 		}
6565 
6566 		case IWM_SCAN_ITERATION_COMPLETE: {
6567 			struct iwm_lmac_scan_complete_notif *notif;
6568 			SYNC_RESP_STRUCT(notif, pkt);
6569 			task_add(sc->sc_eswq, &sc->sc_eswk);
6570 			break;
6571 		}
6572 
6573 		case IWM_SCAN_COMPLETE_UMAC: {
6574 			struct iwm_umac_scan_complete *notif;
6575 			SYNC_RESP_STRUCT(notif, pkt);
6576 			task_add(sc->sc_eswq, &sc->sc_eswk);
6577 			break;
6578 		}
6579 
6580 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
6581 			struct iwm_umac_scan_iter_complete_notif *notif;
6582 			SYNC_RESP_STRUCT(notif, pkt);
6583 
6584 			task_add(sc->sc_eswq, &sc->sc_eswk);
6585 			break;
6586 		}
6587 
6588 		case IWM_REPLY_ERROR: {
6589 			struct iwm_error_resp *resp;
6590 			SYNC_RESP_STRUCT(resp, pkt);
6591 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
6592 				DEVNAME(sc), le32toh(resp->error_type),
6593 				resp->cmd_id);
6594 			break;
6595 		}
6596 
6597 		case IWM_TIME_EVENT_NOTIFICATION: {
6598 			struct iwm_time_event_notif *notif;
6599 			SYNC_RESP_STRUCT(notif, pkt);
6600 			break;
6601 		}
6602 
6603 		case IWM_MCAST_FILTER_CMD:
6604 			break;
6605 
6606 		case IWM_SCD_QUEUE_CFG: {
6607 			struct iwm_scd_txq_cfg_rsp *rsp;
6608 			SYNC_RESP_STRUCT(rsp, pkt);
6609 
6610 			break;
6611 		}
6612 
6613 		default:
6614 			printf("%s: unhandled firmware response 0x%x/0x%x "
6615 			    "rx ring %d[%d]\n",
6616 			    DEVNAME(sc), pkt->hdr.code, pkt->len_n_flags, qid,
6617 			    idx);
6618 			break;
6619 		}
6620 
6621 		/*
6622 		 * uCode sets bit 0x80 when it originates the notification,
6623 		 * i.e. when the notification is not a direct response to a
6624 		 * command sent by the driver.
6625 		 * For example, uCode issues IWM_REPLY_RX when it sends a
6626 		 * received frame to the driver.
6627 		 */
6628 		if (!(pkt->hdr.qid & (1 << 7))) {
6629 			iwm_cmd_done(sc, pkt);
6630 		}
6631 
6632 		ADVANCE_RXQ(sc);
6633 	}
6634 
6635 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6636 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6637 
6638 	/*
6639 	 * Tell the firmware what we have processed.
6640 	 * Seems like the hardware gets upset unless we align the write by 8??
6641 	 */
6642 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6643 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6644 }
6645 
6646 int
6647 iwm_intr(void *arg)
6648 {
6649 	struct iwm_softc *sc = arg;
6650 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6651 	int handled = 0;
6652 	int r1, r2, rv = 0;
6653 	int isperiodic = 0;
6654 
6655 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6656 
6657 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6658 		uint32_t *ict = sc->ict_dma.vaddr;
6659 		int tmp;
6660 
6661 		tmp = htole32(ict[sc->ict_cur]);
6662 		if (!tmp)
6663 			goto out_ena;
6664 
6665 		/*
6666 		 * ok, there was something.  keep plowing until we have all.
6667 		 */
6668 		r1 = r2 = 0;
6669 		while (tmp) {
6670 			r1 |= tmp;
6671 			ict[sc->ict_cur] = 0;
6672 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6673 			tmp = htole32(ict[sc->ict_cur]);
6674 		}
6675 
6676 		/* this is where the fun begins.  don't ask */
6677 		if (r1 == 0xffffffff)
6678 			r1 = 0;
6679 
6680 		/* i am not expected to understand this */
6681 		if (r1 & 0xc0000)
6682 			r1 |= 0x8000;
6683 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6684 	} else {
6685 		r1 = IWM_READ(sc, IWM_CSR_INT);
6686 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6687 			goto out;
6688 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6689 	}
6690 	if (r1 == 0 && r2 == 0) {
6691 		goto out_ena;
6692 	}
6693 
6694 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6695 
6696 	/* ignored */
6697 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6698 
6699 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6700 #ifdef IWM_DEBUG
6701 		int i;
6702 
6703 		iwm_nic_error(sc);
6704 
6705 		/* Dump driver status (TX and RX rings) while we're here. */
6706 		DPRINTF(("driver status:\n"));
6707 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
6708 			struct iwm_tx_ring *ring = &sc->txq[i];
6709 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
6710 			    "queued=%-3d\n",
6711 			    i, ring->qid, ring->cur, ring->queued));
6712 		}
6713 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
6714 		DPRINTF(("  802.11 state %s\n",
6715 		    ieee80211_state_name[sc->sc_ic.ic_state]));
6716 #endif
6717 
6718 		printf("%s: fatal firmware error\n", DEVNAME(sc));
6719 		iwm_stop(ifp, 1);
6720 		task_add(systq, &sc->init_task);
6721 		rv = 1;
6722 		goto out;
6723 
6724 	}
6725 
6726 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6727 		handled |= IWM_CSR_INT_BIT_HW_ERR;
6728 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
6729 		ifp->if_flags &= ~IFF_UP;
6730 		iwm_stop(ifp, 1);
6731 		rv = 1;
6732 		goto out;
6733 	}
6734 
6735 	/* firmware chunk loaded */
6736 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6737 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6738 		handled |= IWM_CSR_INT_BIT_FH_TX;
6739 
6740 		sc->sc_fw_chunk_done = 1;
6741 		wakeup(&sc->sc_fw);
6742 	}
6743 
6744 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6745 		handled |= IWM_CSR_INT_BIT_RF_KILL;
6746 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6747 			ifp->if_flags &= ~IFF_UP;
6748 			iwm_stop(ifp, 1);
6749 		}
6750 	}
6751 
6752 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6753 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6754 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6755 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6756 			IWM_WRITE_1(sc,
6757 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6758 		isperiodic = 1;
6759 	}
6760 
6761 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
6762 	    isperiodic) {
6763 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6764 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6765 
6766 		iwm_notif_intr(sc);
6767 
6768 		/* enable periodic interrupt, see above */
6769 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
6770 		    !isperiodic)
6771 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6772 			    IWM_CSR_INT_PERIODIC_ENA);
6773 	}
6774 
6775 	rv = 1;
6776 
6777  out_ena:
6778 	iwm_restore_interrupts(sc);
6779  out:
6780 	return rv;
6781 }
6782 
6783 typedef void *iwm_match_t;
6784 
6785 static const struct pci_matchid iwm_devices[] = {
6786 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
6787 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
6788 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
6789 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
6790 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
6791 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
6792 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
6793 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
6794 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
6795 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
6796 };
6797 
6798 int
6799 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
6800 {
6801 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
6802 	    nitems(iwm_devices));
6803 }
6804 
6805 int
6806 iwm_preinit(struct iwm_softc *sc)
6807 {
6808 	struct ieee80211com *ic = &sc->sc_ic;
6809 	struct ifnet *ifp = IC2IFP(ic);
6810 	int err;
6811 	static int attached;
6812 
6813 	err = iwm_prepare_card_hw(sc);
6814 	if (err) {
6815 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6816 		return err;
6817 	}
6818 
6819 	if (attached) {
6820 		/* Update MAC in case the upper layers changed it. */
6821 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
6822 		    ((struct arpcom *)ifp)->ac_enaddr);
6823 		return 0;
6824 	}
6825 
6826 	err = iwm_start_hw(sc);
6827 	if (err) {
6828 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6829 		return err;
6830 	}
6831 
6832 	err = iwm_run_init_mvm_ucode(sc, 1);
6833 	iwm_stop_device(sc);
6834 	if (err)
6835 		return err;
6836 
6837 	/* Print version info and MAC address on first successful fw load. */
6838 	attached = 1;
6839 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
6840 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6841 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
6842 
6843 	if (sc->sc_nvm.sku_cap_11n_enable)
6844 		iwm_setup_ht_rates(sc);
6845 
6846 	/* not all hardware can do 5GHz band */
6847 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6848 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6849 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6850 
6851 	/* Configure channel information obtained from firmware. */
6852 	ieee80211_channel_init(ifp);
6853 
6854 	/* Configure MAC address. */
6855 	err = if_setlladdr(ifp, ic->ic_myaddr);
6856 	if (err)
6857 		printf("%s: could not set MAC address (error %d)\n",
6858 		    DEVNAME(sc), err);
6859 
6860 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
6861 
6862 	return 0;
6863 }
6864 
6865 void
6866 iwm_attach_hook(struct device *self)
6867 {
6868 	struct iwm_softc *sc = (void *)self;
6869 
6870 	KASSERT(!cold);
6871 
6872 	iwm_preinit(sc);
6873 }
6874 
6875 void
6876 iwm_attach(struct device *parent, struct device *self, void *aux)
6877 {
6878 	struct iwm_softc *sc = (void *)self;
6879 	struct pci_attach_args *pa = aux;
6880 	pci_intr_handle_t ih;
6881 	pcireg_t reg, memtype;
6882 	struct ieee80211com *ic = &sc->sc_ic;
6883 	struct ifnet *ifp = &ic->ic_if;
6884 	const char *intrstr;
6885 	int err;
6886 	int txq_i, i;
6887 
6888 	sc->sc_pct = pa->pa_pc;
6889 	sc->sc_pcitag = pa->pa_tag;
6890 	sc->sc_dmat = pa->pa_dmat;
6891 
6892 	task_set(&sc->sc_eswk, iwm_endscan_cb, sc);
6893 	rw_init(&sc->ioctl_rwl, "iwmioctl");
6894 
6895 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6896 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6897 	if (err == 0) {
6898 		printf("%s: PCIe capability structure not found!\n",
6899 		    DEVNAME(sc));
6900 		return;
6901 	}
6902 
6903 	/* Clear device-specific "PCI retry timeout" register (41h). */
6904 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6905 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6906 
6907 	/* Enable bus-mastering and hardware bug workaround. */
6908 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6909 	reg |= PCI_COMMAND_MASTER_ENABLE;
6910 	/* if !MSI */
6911 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6912 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6913 	}
6914 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6915 
6916 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6917 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6918 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
6919 	if (err) {
6920 		printf("%s: can't map mem space\n", DEVNAME(sc));
6921 		return;
6922 	}
6923 
6924 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
6925 		printf("%s: can't map interrupt\n", DEVNAME(sc));
6926 		return;
6927 	}
6928 
6929 	intrstr = pci_intr_string(sc->sc_pct, ih);
6930 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc,
6931 	    DEVNAME(sc));
6932 
6933 	if (sc->sc_ih == NULL) {
6934 		printf("\n");
6935 		printf("%s: can't establish interrupt", DEVNAME(sc));
6936 		if (intrstr != NULL)
6937 			printf(" at %s", intrstr);
6938 		printf("\n");
6939 		return;
6940 	}
6941 	printf(", %s\n", intrstr);
6942 
6943 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
6944 
6945 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6946 	switch (PCI_PRODUCT(pa->pa_id)) {
6947 	case PCI_PRODUCT_INTEL_WL_3160_1:
6948 	case PCI_PRODUCT_INTEL_WL_3160_2:
6949 		sc->sc_fwname = "iwm-3160-16";
6950 		sc->host_interrupt_operation_mode = 1;
6951 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
6952 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6953 		break;
6954 	case PCI_PRODUCT_INTEL_WL_3165_1:
6955 	case PCI_PRODUCT_INTEL_WL_3165_2:
6956 		sc->sc_fwname = "iwm-7265-16";
6957 		sc->host_interrupt_operation_mode = 0;
6958 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
6959 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6960 		break;
6961 	case PCI_PRODUCT_INTEL_WL_7260_1:
6962 	case PCI_PRODUCT_INTEL_WL_7260_2:
6963 		sc->sc_fwname = "iwm-7260-16";
6964 		sc->host_interrupt_operation_mode = 1;
6965 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
6966 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6967 		break;
6968 	case PCI_PRODUCT_INTEL_WL_7265_1:
6969 	case PCI_PRODUCT_INTEL_WL_7265_2:
6970 		sc->sc_fwname = "iwm-7265-16";
6971 		sc->host_interrupt_operation_mode = 0;
6972 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
6973 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6974 		break;
6975 	case PCI_PRODUCT_INTEL_WL_8260_1:
6976 	case PCI_PRODUCT_INTEL_WL_8260_2:
6977 		sc->sc_fwname = "iwm-8000C-16";
6978 		sc->host_interrupt_operation_mode = 0;
6979 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
6980 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
6981 		break;
6982 	default:
6983 		printf("%s: unknown adapter type\n", DEVNAME(sc));
6984 		return;
6985 	}
6986 
6987 	/*
6988 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6989 	 * changed, and now the revision step also includes bit 0-1 (no more
6990 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6991 	 * in the old format.
6992 	 */
6993 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6994 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6995 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6996 
6997 	if (iwm_prepare_card_hw(sc) != 0) {
6998 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6999 		return;
7000 	}
7001 
7002 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7003 		uint32_t hw_step;
7004 
7005 		/*
7006 		 * In order to recognize C step the driver should read the
7007 		 * chip version id located at the AUX bus MISC address.
7008 		 */
7009 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7010 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7011 		DELAY(2);
7012 
7013 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7014 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7015 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7016 				   25000);
7017 		if (!err) {
7018 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
7019 			return;
7020 		}
7021 
7022 		if (iwm_nic_lock(sc)) {
7023 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7024 			hw_step |= IWM_ENABLE_WFPM;
7025 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7026 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7027 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7028 			if (hw_step == 0x3)
7029 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7030 						(IWM_SILICON_C_STEP << 2);
7031 			iwm_nic_unlock(sc);
7032 		} else {
7033 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
7034 			return;
7035 		}
7036 	}
7037 
7038 	/*
7039 	 * Allocate DMA memory for firmware transfers.
7040 	 * Must be aligned on a 16-byte boundary.
7041 	 */
7042 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
7043 	    sc->sc_fwdmasegsz, 16);
7044 	if (err) {
7045 		printf("%s: could not allocate memory for firmware\n",
7046 		    DEVNAME(sc));
7047 		return;
7048 	}
7049 
7050 	/* Allocate "Keep Warm" page, used internally by the card. */
7051 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7052 	if (err) {
7053 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
7054 		goto fail1;
7055 	}
7056 
7057 	/* Allocate interrupt cause table (ICT).*/
7058 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
7059 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
7060 	if (err) {
7061 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
7062 		goto fail2;
7063 	}
7064 
7065 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7066 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7067 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7068 	if (err) {
7069 		printf("%s: could not allocate TX scheduler rings\n",
7070 		    DEVNAME(sc));
7071 		goto fail3;
7072 	}
7073 
7074 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
7075 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7076 		if (err) {
7077 			printf("%s: could not allocate TX ring %d\n",
7078 			    DEVNAME(sc), txq_i);
7079 			goto fail4;
7080 		}
7081 	}
7082 
7083 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
7084 	if (err) {
7085 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
7086 		goto fail4;
7087 	}
7088 
7089 	sc->sc_eswq = taskq_create("iwmes", 1, IPL_NET, 0);
7090 	if (sc->sc_eswq == NULL)
7091 		goto fail4;
7092 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
7093 	if (sc->sc_nswq == NULL)
7094 		goto fail4;
7095 
7096 	/* Clear pending interrupts. */
7097 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7098 
7099 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
7100 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
7101 	ic->ic_state = IEEE80211_S_INIT;
7102 
7103 	/* Set device capabilities. */
7104 	ic->ic_caps =
7105 	    IEEE80211_C_WEP |		/* WEP */
7106 	    IEEE80211_C_RSN |		/* WPA/RSN */
7107 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
7108 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
7109 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
7110 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
7111 
7112 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7113 	ic->ic_htxcaps = 0;
7114 	ic->ic_txbfcaps = 0;
7115 	ic->ic_aselcaps = 0;
7116 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7117 
7118 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7119 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7120 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7121 
7122 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
7123 		sc->sc_phyctxt[i].id = i;
7124 	}
7125 
7126 	sc->sc_amrr.amrr_min_success_threshold =  1;
7127 	sc->sc_amrr.amrr_max_success_threshold = 15;
7128 
7129 	/* IBSS channel undefined for now. */
7130 	ic->ic_ibss_chan = &ic->ic_channels[1];
7131 
7132 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7133 
7134 	ifp->if_softc = sc;
7135 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7136 	ifp->if_ioctl = iwm_ioctl;
7137 	ifp->if_start = iwm_start;
7138 	ifp->if_watchdog = iwm_watchdog;
7139 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7140 
7141 	if_attach(ifp);
7142 	ieee80211_ifattach(ifp);
7143 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
7144 
7145 #if NBPFILTER > 0
7146 	iwm_radiotap_attach(sc);
7147 #endif
7148 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
7149 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7150 	task_set(&sc->init_task, iwm_init_task, sc);
7151 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
7152 	task_set(&sc->setrates_task, iwm_setrates_task, sc);
7153 	task_set(&sc->ba_task, iwm_ba_task, sc);
7154 	task_set(&sc->htprot_task, iwm_htprot_task, sc);
7155 
7156 	ic->ic_node_alloc = iwm_node_alloc;
7157 
7158 	/* Override 802.11 state transition machine. */
7159 	sc->sc_newstate = ic->ic_newstate;
7160 	ic->ic_newstate = iwm_newstate;
7161 	ic->ic_update_htprot = iwm_update_htprot;
7162 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
7163 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
7164 #ifdef notyet
7165 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
7166 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
7167 #endif
7168 	/*
7169 	 * We cannot read the MAC address without loading the
7170 	 * firmware from disk. Postpone until mountroot is done.
7171 	 */
7172 	config_mountroot(self, iwm_attach_hook);
7173 
7174 	return;
7175 
7176 fail4:	while (--txq_i >= 0)
7177 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7178 	iwm_free_rx_ring(sc, &sc->rxq);
7179 	iwm_dma_contig_free(&sc->sched_dma);
7180 fail3:	if (sc->ict_dma.vaddr != NULL)
7181 		iwm_dma_contig_free(&sc->ict_dma);
7182 
7183 fail2:	iwm_dma_contig_free(&sc->kw_dma);
7184 fail1:	iwm_dma_contig_free(&sc->fw_dma);
7185 	return;
7186 }
7187 
7188 #if NBPFILTER > 0
7189 void
7190 iwm_radiotap_attach(struct iwm_softc *sc)
7191 {
7192 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
7193 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
7194 
7195 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7196 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7197 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7198 
7199 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
7200 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7201 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7202 }
7203 #endif
7204 
7205 void
7206 iwm_init_task(void *arg1)
7207 {
7208 	struct iwm_softc *sc = arg1;
7209 	struct ifnet *ifp = &sc->sc_ic.ic_if;
7210 	int s;
7211 
7212 	rw_enter_write(&sc->ioctl_rwl);
7213 	s = splnet();
7214 
7215 	iwm_stop(ifp, 0);
7216 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7217 		iwm_init(ifp);
7218 
7219 	splx(s);
7220 	rw_exit(&sc->ioctl_rwl);
7221 }
7222 
7223 void
7224 iwm_wakeup(struct iwm_softc *sc)
7225 {
7226 	pcireg_t reg;
7227 
7228 	/* Clear device-specific "PCI retry timeout" register (41h). */
7229 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7230 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7231 
7232 	task_add(systq, &sc->init_task);
7233 }
7234 
7235 int
7236 iwm_activate(struct device *self, int act)
7237 {
7238 	struct iwm_softc *sc = (struct iwm_softc *)self;
7239 	struct ifnet *ifp = &sc->sc_ic.ic_if;
7240 
7241 	switch (act) {
7242 	case DVACT_SUSPEND:
7243 		if (ifp->if_flags & IFF_RUNNING)
7244 			iwm_stop(ifp, 0);
7245 		break;
7246 	case DVACT_WAKEUP:
7247 		iwm_wakeup(sc);
7248 		break;
7249 	}
7250 
7251 	return 0;
7252 }
7253 
7254 struct cfdriver iwm_cd = {
7255 	NULL, "iwm", DV_IFNET
7256 };
7257 
7258 struct cfattach iwm_ca = {
7259 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
7260 	NULL, iwm_activate
7261 };
7262