xref: /netbsd-src/sys/dev/pci/if_iwm.c (revision 9616dacfef448e70e3fbbd865bddf60d54b656c5)
1 /*	$NetBSD: if_iwm.c,v 1.60 2017/01/10 08:40:27 nonaka Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
3 #define IEEE80211_NO_HT
4 /*
5  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
6  *   Author: Stefan Sperling <stsp@openbsd.org>
7  * Copyright (c) 2014 Fixup Software Ltd.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.60 2017/01/10 08:40:27 nonaka Exp $");
111 
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123 
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <machine/intr.h>
129 
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134 
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140 
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143 
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
150 
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153 
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x)	do { ; } while (0)
160 #define DPRINTFN(n, x)	do { ; } while (0)
161 #endif
162 
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165 
166 static const uint8_t iwm_nvm_channels[] = {
167 	/* 2.4 GHz */
168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 	/* 5 GHz */
170 	36, 40, 44, 48, 52, 56, 60, 64,
171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 	149, 153, 157, 161, 165
173 };
174 
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 	/* 2.4 GHz */
177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 	/* 5 GHz */
179 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 	149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183 
184 #define IWM_NUM_2GHZ_CHANNELS	14
185 
186 static const struct iwm_rate {
187 	uint8_t rate;
188 	uint8_t plcp;
189 	uint8_t ht_plcp;
190 } iwm_rates[] = {
191 		/* Legacy */		/* HT */
192 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
193 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
195 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
197 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
198 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
199 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
200 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
201 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
202 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
203 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
204 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK	0
207 #define IWM_RIDX_OFDM	4
208 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211 
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 	IWM_RATE_MCS_0_INDEX,
216 	IWM_RATE_MCS_1_INDEX,
217 	IWM_RATE_MCS_2_INDEX,
218 	IWM_RATE_MCS_3_INDEX,
219 	IWM_RATE_MCS_4_INDEX,
220 	IWM_RATE_MCS_5_INDEX,
221 	IWM_RATE_MCS_6_INDEX,
222 	IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225 
226 struct iwm_nvm_section {
227 	uint16_t length;
228 	uint8_t *data;
229 };
230 
231 struct iwm_newstate_state {
232 	struct work ns_wk;
233 	enum ieee80211_state ns_nstate;
234 	int ns_arg;
235 	int ns_generation;
236 };
237 
238 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int	iwm_firmware_store_section(struct iwm_softc *,
240 		    enum iwm_ucode_type, uint8_t *, size_t);
241 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int	iwm_read_firmware(struct iwm_softc *);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int	iwm_nic_lock(struct iwm_softc *);
252 static void	iwm_nic_unlock(struct iwm_softc *);
253 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 		    uint32_t);
255 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 		    bus_size_t, bus_size_t);
259 static void	iwm_dma_contig_free(struct iwm_dma_info *);
260 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void	iwm_disable_rx_dma(struct iwm_softc *);
262 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 		    int);
266 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void	iwm_enable_rfkill_int(struct iwm_softc *);
269 static int	iwm_check_rfkill(struct iwm_softc *);
270 static void	iwm_enable_interrupts(struct iwm_softc *);
271 static void	iwm_restore_interrupts(struct iwm_softc *);
272 static void	iwm_disable_interrupts(struct iwm_softc *);
273 static void	iwm_ict_reset(struct iwm_softc *);
274 static int	iwm_set_hw_ready(struct iwm_softc *);
275 static int	iwm_prepare_card_hw(struct iwm_softc *);
276 static void	iwm_apm_config(struct iwm_softc *);
277 static int	iwm_apm_init(struct iwm_softc *);
278 static void	iwm_apm_stop(struct iwm_softc *);
279 static int	iwm_allow_mcast(struct iwm_softc *);
280 static int	iwm_start_hw(struct iwm_softc *);
281 static void	iwm_stop_device(struct iwm_softc *);
282 static void	iwm_nic_config(struct iwm_softc *);
283 static int	iwm_nic_rx_init(struct iwm_softc *);
284 static int	iwm_nic_tx_init(struct iwm_softc *);
285 static int	iwm_nic_init(struct iwm_softc *);
286 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int	iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 		iwm_phy_db_get_section(struct iwm_softc *,
290 		    enum iwm_phy_db_section_type, uint16_t);
291 static int	iwm_phy_db_set_section(struct iwm_softc *,
292 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int	iwm_is_valid_channel(uint16_t);
294 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 		    uint8_t **, uint16_t *, uint16_t);
299 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 		    void *);
301 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 		    enum iwm_phy_db_section_type, uint8_t);
303 static int	iwm_send_phy_db_data(struct iwm_softc *);
304 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 		    struct iwm_time_event_cmd_v1 *);
306 static int	iwm_send_time_event_cmd(struct iwm_softc *,
307 		    const struct iwm_time_event_cmd_v2 *);
308 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 		    uint32_t, uint32_t);
310 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 		    uint16_t, uint8_t *, uint16_t *);
312 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 		    uint16_t *, size_t);
314 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 		    const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void	iwm_setup_ht_rates(struct iwm_softc *);
318 static void	iwm_htprot_task(void *);
319 static void	iwm_update_htprot(struct ieee80211com *,
320 		    struct ieee80211_node *);
321 static int	iwm_ampdu_rx_start(struct ieee80211com *,
322 		    struct ieee80211_node *, uint8_t);
323 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
324 		    struct ieee80211_node *, uint8_t);
325 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 		    uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int	iwm_ampdu_tx_start(struct ieee80211com *,
329 		    struct ieee80211_node *, uint8_t);
330 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
331 		    struct ieee80211_node *, uint8_t);
332 #endif
333 static void	iwm_ba_task(void *);
334 #endif
335 
336 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 		    const uint16_t *, const uint16_t *, const uint16_t *,
338 		    const uint16_t *, const uint16_t *);
339 static void	iwm_set_hw_address_8000(struct iwm_softc *,
340 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int	iwm_parse_nvm_sections(struct iwm_softc *,
342 		    struct iwm_nvm_section *);
343 static int	iwm_nvm_init(struct iwm_softc *);
344 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 		    const uint8_t *, uint32_t);
346 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 		    const uint8_t *, uint32_t);
348 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
350 		    struct iwm_fw_sects *, int , int *);
351 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
357 		    enum iwm_ucode_type);
358 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int	iwm_get_signal_strength(struct iwm_softc *,
362 		    struct iwm_rx_phy_info *);
363 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 		    struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 		    struct iwm_rx_data *);
368 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
369 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 		    struct iwm_rx_data *);
371 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 		    uint32_t);
373 #if 0
374 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 		    uint8_t, uint8_t);
382 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 		    uint8_t, uint8_t, uint32_t, uint32_t);
384 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 		    uint16_t, const void *);
387 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 		    uint32_t *);
389 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 		    const void *, uint32_t *);
391 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 		    uint16_t);
396 #endif
397 static const struct iwm_rate *
398 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
401 		    struct ieee80211_node *, int);
402 static void	iwm_led_enable(struct iwm_softc *);
403 static void	iwm_led_disable(struct iwm_softc *);
404 static int	iwm_led_is_enabled(struct iwm_softc *);
405 static void	iwm_led_blink_timeout(void *);
406 static void	iwm_led_blink_start(struct iwm_softc *);
407 static void	iwm_led_blink_stop(struct iwm_softc *);
408 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 		    struct iwm_beacon_filter_cmd *);
410 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 		    int);
414 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 		    struct iwm_mac_power_cmd *);
416 static int	iwm_power_mac_update_mode(struct iwm_softc *,
417 		    struct iwm_node *);
418 static int	iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int	iwm_disable_beacon_filter(struct iwm_softc *);
423 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int	iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 		    struct iwm_scan_channel_cfg_lmac *, int);
433 static int	iwm_fill_probe_req(struct iwm_softc *,
434 		    struct iwm_scan_probe_req *);
435 static int	iwm_lmac_scan(struct iwm_softc *);
436 static int	iwm_config_umac_scan(struct iwm_softc *);
437 static int	iwm_umac_scan(struct iwm_softc *);
438 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 		    int *);
441 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 		    struct iwm_mac_data_sta *, int);
445 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 		    uint32_t, int);
447 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int	iwm_auth(struct iwm_softc *);
449 static int	iwm_assoc(struct iwm_softc *);
450 static void	iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void	iwm_setrates_task(void *);
453 static int	iwm_setrates(struct iwm_node *);
454 #endif
455 static int	iwm_media_change(struct ifnet *);
456 static void	iwm_newstate_cb(struct work *, void *);
457 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
458 static void	iwm_endscan(struct iwm_softc *);
459 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
460 		    struct ieee80211_node *);
461 static int	iwm_sf_config(struct iwm_softc *, int);
462 static int	iwm_send_bt_init_conf(struct iwm_softc *);
463 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
464 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
465 static int	iwm_init_hw(struct iwm_softc *);
466 static int	iwm_init(struct ifnet *);
467 static void	iwm_start(struct ifnet *);
468 static void	iwm_stop(struct ifnet *, int);
469 static void	iwm_watchdog(struct ifnet *);
470 static int	iwm_ioctl(struct ifnet *, u_long, void *);
471 #ifdef IWM_DEBUG
472 static const char *iwm_desc_lookup(uint32_t);
473 static void	iwm_nic_error(struct iwm_softc *);
474 static void	iwm_nic_umac_error(struct iwm_softc *);
475 #endif
476 static void	iwm_notif_intr(struct iwm_softc *);
477 static void	iwm_softintr(void *);
478 static int	iwm_intr(void *);
479 static int	iwm_preinit(struct iwm_softc *);
480 static void	iwm_attach_hook(device_t);
481 static void	iwm_attach(device_t, device_t, void *);
482 #if 0
483 static void	iwm_init_task(void *);
484 static int	iwm_activate(device_t, enum devact);
485 static void	iwm_wakeup(struct iwm_softc *);
486 #endif
487 static void	iwm_radiotap_attach(struct iwm_softc *);
488 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
489 
490 static int iwm_sysctl_root_num;
491 static int iwm_lar_disable;
492 
493 static int
494 iwm_firmload(struct iwm_softc *sc)
495 {
496 	struct iwm_fw_info *fw = &sc->sc_fw;
497 	firmware_handle_t fwh;
498 	int err;
499 
500 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
501 		return 0;
502 
503 	/* Open firmware image. */
504 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
505 	if (err) {
506 		aprint_error_dev(sc->sc_dev,
507 		    "could not get firmware handle %s\n", sc->sc_fwname);
508 		return err;
509 	}
510 
511 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
512 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
513 		fw->fw_rawdata = NULL;
514 	}
515 
516 	fw->fw_rawsize = firmware_get_size(fwh);
517 	/*
518 	 * Well, this is how the Linux driver checks it ....
519 	 */
520 	if (fw->fw_rawsize < sizeof(uint32_t)) {
521 		aprint_error_dev(sc->sc_dev,
522 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
523 		err = EINVAL;
524 		goto out;
525 	}
526 
527 	/* Read the firmware. */
528 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
529 	if (fw->fw_rawdata == NULL) {
530 		aprint_error_dev(sc->sc_dev,
531 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
532 		err = ENOMEM;
533 		goto out;
534 	}
535 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
536 	if (err) {
537 		aprint_error_dev(sc->sc_dev,
538 		    "could not read firmware %s\n", sc->sc_fwname);
539 		goto out;
540 	}
541 
542 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
543  out:
544 	/* caller will release memory, if necessary */
545 
546 	firmware_close(fwh);
547 	return err;
548 }
549 
550 /*
551  * just maintaining status quo.
552  */
553 static void
554 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
555 {
556 	struct ieee80211com *ic = &sc->sc_ic;
557 	struct ieee80211_frame *wh;
558 	uint8_t subtype;
559 
560 	wh = mtod(m, struct ieee80211_frame *);
561 
562 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
563 		return;
564 
565 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
566 
567 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
568 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
569 		return;
570 
571 	int chan = le32toh(sc->sc_last_phy_info.channel);
572 	if (chan < __arraycount(ic->ic_channels))
573 		ic->ic_curchan = &ic->ic_channels[chan];
574 }
575 
576 static int
577 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
578 {
579 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
580 
581 	if (dlen < sizeof(*l) ||
582 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
583 		return EINVAL;
584 
585 	/* we don't actually store anything for now, always use s/w crypto */
586 
587 	return 0;
588 }
589 
590 static int
591 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
592     uint8_t *data, size_t dlen)
593 {
594 	struct iwm_fw_sects *fws;
595 	struct iwm_fw_onesect *fwone;
596 
597 	if (type >= IWM_UCODE_TYPE_MAX)
598 		return EINVAL;
599 	if (dlen < sizeof(uint32_t))
600 		return EINVAL;
601 
602 	fws = &sc->sc_fw.fw_sects[type];
603 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
604 		return EINVAL;
605 
606 	fwone = &fws->fw_sect[fws->fw_count];
607 
608 	/* first 32bit are device load offset */
609 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
610 
611 	/* rest is data */
612 	fwone->fws_data = data + sizeof(uint32_t);
613 	fwone->fws_len = dlen - sizeof(uint32_t);
614 
615 	/* for freeing the buffer during driver unload */
616 	fwone->fws_alloc = data;
617 	fwone->fws_allocsize = dlen;
618 
619 	fws->fw_count++;
620 	fws->fw_totlen += fwone->fws_len;
621 
622 	return 0;
623 }
624 
625 struct iwm_tlv_calib_data {
626 	uint32_t ucode_type;
627 	struct iwm_tlv_calib_ctrl calib;
628 } __packed;
629 
630 static int
631 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
632 {
633 	const struct iwm_tlv_calib_data *def_calib = data;
634 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
635 
636 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
637 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
638 		    DEVNAME(sc), ucode_type));
639 		return EINVAL;
640 	}
641 
642 	sc->sc_default_calib[ucode_type].flow_trigger =
643 	    def_calib->calib.flow_trigger;
644 	sc->sc_default_calib[ucode_type].event_trigger =
645 	    def_calib->calib.event_trigger;
646 
647 	return 0;
648 }
649 
650 static int
651 iwm_read_firmware(struct iwm_softc *sc)
652 {
653 	struct iwm_fw_info *fw = &sc->sc_fw;
654 	struct iwm_tlv_ucode_header *uhdr;
655 	struct iwm_ucode_tlv tlv;
656 	enum iwm_ucode_tlv_type tlv_type;
657 	uint8_t *data;
658 	int err, status;
659 	size_t len;
660 
661 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
662 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
663 	} else {
664 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
665 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
666 	}
667 	status = fw->fw_status;
668 
669 	if (status == IWM_FW_STATUS_DONE)
670 		return 0;
671 
672 	err = iwm_firmload(sc);
673 	if (err) {
674 		aprint_error_dev(sc->sc_dev,
675 		    "could not read firmware %s (error %d)\n",
676 		    sc->sc_fwname, err);
677 		goto out;
678 	}
679 
680 	sc->sc_capaflags = 0;
681 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
682 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
683 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
684 
685 	uhdr = (void *)fw->fw_rawdata;
686 	if (*(uint32_t *)fw->fw_rawdata != 0
687 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
688 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
689 		    sc->sc_fwname);
690 		err = EINVAL;
691 		goto out;
692 	}
693 
694 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
695 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
696 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
697 	    IWM_UCODE_API(le32toh(uhdr->ver)));
698 	data = uhdr->data;
699 	len = fw->fw_rawsize - sizeof(*uhdr);
700 
701 	while (len >= sizeof(tlv)) {
702 		size_t tlv_len;
703 		void *tlv_data;
704 
705 		memcpy(&tlv, data, sizeof(tlv));
706 		tlv_len = le32toh(tlv.length);
707 		tlv_type = le32toh(tlv.type);
708 
709 		len -= sizeof(tlv);
710 		data += sizeof(tlv);
711 		tlv_data = data;
712 
713 		if (len < tlv_len) {
714 			aprint_error_dev(sc->sc_dev,
715 			    "firmware too short: %zu bytes\n", len);
716 			err = EINVAL;
717 			goto parse_out;
718 		}
719 
720 		switch (tlv_type) {
721 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
722 			if (tlv_len < sizeof(uint32_t)) {
723 				err = EINVAL;
724 				goto parse_out;
725 			}
726 			sc->sc_capa_max_probe_len
727 			    = le32toh(*(uint32_t *)tlv_data);
728 			/* limit it to something sensible */
729 			if (sc->sc_capa_max_probe_len >
730 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
731 				err = EINVAL;
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_PAN:
736 			if (tlv_len) {
737 				err = EINVAL;
738 				goto parse_out;
739 			}
740 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
741 			break;
742 		case IWM_UCODE_TLV_FLAGS:
743 			if (tlv_len < sizeof(uint32_t)) {
744 				err = EINVAL;
745 				goto parse_out;
746 			}
747 			/*
748 			 * Apparently there can be many flags, but Linux driver
749 			 * parses only the first one, and so do we.
750 			 *
751 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
752 			 * Intentional or a bug?  Observations from
753 			 * current firmware file:
754 			 *  1) TLV_PAN is parsed first
755 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
756 			 * ==> this resets TLV_PAN to itself... hnnnk
757 			 */
758 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
759 			break;
760 		case IWM_UCODE_TLV_CSCHEME:
761 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
762 			if (err)
763 				goto parse_out;
764 			break;
765 		case IWM_UCODE_TLV_NUM_OF_CPU: {
766 			uint32_t num_cpu;
767 			if (tlv_len != sizeof(uint32_t)) {
768 				err = EINVAL;
769 				goto parse_out;
770 			}
771 			num_cpu = le32toh(*(uint32_t *)tlv_data);
772 			if (num_cpu < 1 || num_cpu > 2) {
773 				err = EINVAL;
774 				goto parse_out;
775 			}
776 			break;
777 		}
778 		case IWM_UCODE_TLV_SEC_RT:
779 			err = iwm_firmware_store_section(sc,
780 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
781 			if (err)
782 				goto parse_out;
783 			break;
784 		case IWM_UCODE_TLV_SEC_INIT:
785 			err = iwm_firmware_store_section(sc,
786 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
787 			if (err)
788 				goto parse_out;
789 			break;
790 		case IWM_UCODE_TLV_SEC_WOWLAN:
791 			err = iwm_firmware_store_section(sc,
792 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
793 			if (err)
794 				goto parse_out;
795 			break;
796 		case IWM_UCODE_TLV_DEF_CALIB:
797 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
798 				err = EINVAL;
799 				goto parse_out;
800 			}
801 			err = iwm_set_default_calib(sc, tlv_data);
802 			if (err)
803 				goto parse_out;
804 			break;
805 		case IWM_UCODE_TLV_PHY_SKU:
806 			if (tlv_len != sizeof(uint32_t)) {
807 				err = EINVAL;
808 				goto parse_out;
809 			}
810 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
811 			break;
812 
813 		case IWM_UCODE_TLV_API_CHANGES_SET: {
814 			struct iwm_ucode_api *api;
815 			if (tlv_len != sizeof(*api)) {
816 				err = EINVAL;
817 				goto parse_out;
818 			}
819 			api = (struct iwm_ucode_api *)tlv_data;
820 			/* Flags may exceed 32 bits in future firmware. */
821 			if (le32toh(api->api_index) > 0) {
822 				goto parse_out;
823 			}
824 			sc->sc_ucode_api = le32toh(api->api_flags);
825 			break;
826 		}
827 
828 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
829 			struct iwm_ucode_capa *capa;
830 			int idx, i;
831 			if (tlv_len != sizeof(*capa)) {
832 				err = EINVAL;
833 				goto parse_out;
834 			}
835 			capa = (struct iwm_ucode_capa *)tlv_data;
836 			idx = le32toh(capa->api_index);
837 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
838 				goto parse_out;
839 			}
840 			for (i = 0; i < 32; i++) {
841 				if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
842 					continue;
843 				setbit(sc->sc_enabled_capa, i + (32 * idx));
844 			}
845 			break;
846 		}
847 
848 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
849 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
850 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
851 			/* ignore, not used by current driver */
852 			break;
853 
854 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
855 			err = iwm_firmware_store_section(sc,
856 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
857 			    tlv_len);
858 			if (err)
859 				goto parse_out;
860 			break;
861 
862 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863 			if (tlv_len != sizeof(uint32_t)) {
864 				err = EINVAL;
865 				goto parse_out;
866 			}
867 			sc->sc_capa_n_scan_channels =
868 			  le32toh(*(uint32_t *)tlv_data);
869 			break;
870 
871 		case IWM_UCODE_TLV_FW_VERSION:
872 			if (tlv_len != sizeof(uint32_t) * 3) {
873 				err = EINVAL;
874 				goto parse_out;
875 			}
876 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877 			    "%d.%d.%d",
878 			    le32toh(((uint32_t *)tlv_data)[0]),
879 			    le32toh(((uint32_t *)tlv_data)[1]),
880 			    le32toh(((uint32_t *)tlv_data)[2]));
881 			break;
882 
883 		default:
884 			DPRINTF(("%s: unknown firmware section %d, abort\n",
885 			    DEVNAME(sc), tlv_type));
886 			err = EINVAL;
887 			goto parse_out;
888 		}
889 
890 		len -= roundup(tlv_len, 4);
891 		data += roundup(tlv_len, 4);
892 	}
893 
894 	KASSERT(err == 0);
895 
896  parse_out:
897 	if (err) {
898 		aprint_error_dev(sc->sc_dev,
899 		    "firmware parse error, section type %d\n", tlv_type);
900 	}
901 
902 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
903 		aprint_error_dev(sc->sc_dev,
904 		    "device uses unsupported power ops\n");
905 		err = ENOTSUP;
906 	}
907 
908  out:
909 	if (err)
910 		fw->fw_status = IWM_FW_STATUS_NONE;
911 	else
912 		fw->fw_status = IWM_FW_STATUS_DONE;
913 	wakeup(&sc->sc_fw);
914 
915 	if (err && fw->fw_rawdata != NULL) {
916 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
917 		fw->fw_rawdata = NULL;
918 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
919 		/* don't touch fw->fw_status */
920 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
921 	}
922 	return err;
923 }
924 
925 static uint32_t
926 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
927 {
928 	IWM_WRITE(sc,
929 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
930 	IWM_BARRIER_READ_WRITE(sc);
931 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
932 }
933 
934 static void
935 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
936 {
937 	IWM_WRITE(sc,
938 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
939 	IWM_BARRIER_WRITE(sc);
940 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
941 }
942 
943 #ifdef IWM_DEBUG
944 static int
945 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
946 {
947 	int offs;
948 	uint32_t *vals = buf;
949 
950 	if (iwm_nic_lock(sc)) {
951 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
952 		for (offs = 0; offs < dwords; offs++)
953 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
954 		iwm_nic_unlock(sc);
955 		return 0;
956 	}
957 	return EBUSY;
958 }
959 #endif
960 
961 static int
962 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
963 {
964 	int offs;
965 	const uint32_t *vals = buf;
966 
967 	if (iwm_nic_lock(sc)) {
968 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
969 		/* WADDR auto-increments */
970 		for (offs = 0; offs < dwords; offs++) {
971 			uint32_t val = vals ? vals[offs] : 0;
972 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
973 		}
974 		iwm_nic_unlock(sc);
975 		return 0;
976 	}
977 	return EBUSY;
978 }
979 
980 static int
981 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
982 {
983 	return iwm_write_mem(sc, addr, &val, 1);
984 }
985 
986 static int
987 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
988     int timo)
989 {
990 	for (;;) {
991 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
992 			return 1;
993 		}
994 		if (timo < 10) {
995 			return 0;
996 		}
997 		timo -= 10;
998 		DELAY(10);
999 	}
1000 }
1001 
1002 static int
1003 iwm_nic_lock(struct iwm_softc *sc)
1004 {
1005 	int rv = 0;
1006 
1007 	if (sc->sc_cmd_hold_nic_awake)
1008 		return 1;
1009 
1010 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1011 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1012 
1013 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1014 		DELAY(2);
1015 
1016 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1017 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1018 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1019 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1020 		rv = 1;
1021 	} else {
1022 		aprint_error_dev(sc->sc_dev, "device timeout\n");
1023 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1024 	}
1025 
1026 	return rv;
1027 }
1028 
1029 static void
1030 iwm_nic_unlock(struct iwm_softc *sc)
1031 {
1032 
1033 	if (sc->sc_cmd_hold_nic_awake)
1034 		return;
1035 
1036 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1037 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1038 }
1039 
1040 static void
1041 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1042     uint32_t mask)
1043 {
1044 	uint32_t val;
1045 
1046 	/* XXX: no error path? */
1047 	if (iwm_nic_lock(sc)) {
1048 		val = iwm_read_prph(sc, reg) & mask;
1049 		val |= bits;
1050 		iwm_write_prph(sc, reg, val);
1051 		iwm_nic_unlock(sc);
1052 	}
1053 }
1054 
1055 static void
1056 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1057 {
1058 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1059 }
1060 
1061 static void
1062 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1063 {
1064 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1065 }
1066 
1067 static int
1068 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1069     bus_size_t size, bus_size_t alignment)
1070 {
1071 	int nsegs, err;
1072 	void *va;
1073 
1074 	dma->tag = tag;
1075 	dma->size = size;
1076 
1077 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1078 	    &dma->map);
1079 	if (err)
1080 		goto fail;
1081 
1082 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1083 	    BUS_DMA_NOWAIT);
1084 	if (err)
1085 		goto fail;
1086 
1087 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1088 	if (err)
1089 		goto fail;
1090 	dma->vaddr = va;
1091 
1092 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1093 	    BUS_DMA_NOWAIT);
1094 	if (err)
1095 		goto fail;
1096 
1097 	memset(dma->vaddr, 0, size);
1098 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1099 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1100 
1101 	return 0;
1102 
1103 fail:	iwm_dma_contig_free(dma);
1104 	return err;
1105 }
1106 
1107 static void
1108 iwm_dma_contig_free(struct iwm_dma_info *dma)
1109 {
1110 	if (dma->map != NULL) {
1111 		if (dma->vaddr != NULL) {
1112 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1113 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1114 			bus_dmamap_unload(dma->tag, dma->map);
1115 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1116 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1117 			dma->vaddr = NULL;
1118 		}
1119 		bus_dmamap_destroy(dma->tag, dma->map);
1120 		dma->map = NULL;
1121 	}
1122 }
1123 
1124 static int
1125 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1126 {
1127 	bus_size_t size;
1128 	int i, err;
1129 
1130 	ring->cur = 0;
1131 
1132 	/* Allocate RX descriptors (256-byte aligned). */
1133 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1134 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1135 	if (err) {
1136 		aprint_error_dev(sc->sc_dev,
1137 		    "could not allocate RX ring DMA memory\n");
1138 		goto fail;
1139 	}
1140 	ring->desc = ring->desc_dma.vaddr;
1141 
1142 	/* Allocate RX status area (16-byte aligned). */
1143 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1144 	    sizeof(*ring->stat), 16);
1145 	if (err) {
1146 		aprint_error_dev(sc->sc_dev,
1147 		    "could not allocate RX status DMA memory\n");
1148 		goto fail;
1149 	}
1150 	ring->stat = ring->stat_dma.vaddr;
1151 
1152 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1153 		struct iwm_rx_data *data = &ring->data[i];
1154 
1155 		memset(data, 0, sizeof(*data));
1156 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1157 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1158 		    &data->map);
1159 		if (err) {
1160 			aprint_error_dev(sc->sc_dev,
1161 			    "could not create RX buf DMA map\n");
1162 			goto fail;
1163 		}
1164 
1165 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1166 		if (err)
1167 			goto fail;
1168 	}
1169 	return 0;
1170 
1171 fail:	iwm_free_rx_ring(sc, ring);
1172 	return err;
1173 }
1174 
1175 static void
1176 iwm_disable_rx_dma(struct iwm_softc *sc)
1177 {
1178 	int ntries;
1179 
1180 	if (iwm_nic_lock(sc)) {
1181 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1182 		for (ntries = 0; ntries < 1000; ntries++) {
1183 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1184 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1185 				break;
1186 			DELAY(10);
1187 		}
1188 		iwm_nic_unlock(sc);
1189 	}
1190 }
1191 
1192 void
1193 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1194 {
1195 	ring->cur = 0;
1196 	memset(ring->stat, 0, sizeof(*ring->stat));
1197 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1198 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1199 }
1200 
1201 static void
1202 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1203 {
1204 	int i;
1205 
1206 	iwm_dma_contig_free(&ring->desc_dma);
1207 	iwm_dma_contig_free(&ring->stat_dma);
1208 
1209 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1210 		struct iwm_rx_data *data = &ring->data[i];
1211 
1212 		if (data->m != NULL) {
1213 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1214 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1215 			bus_dmamap_unload(sc->sc_dmat, data->map);
1216 			m_freem(data->m);
1217 			data->m = NULL;
1218 		}
1219 		if (data->map != NULL) {
1220 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1221 			data->map = NULL;
1222 		}
1223 	}
1224 }
1225 
1226 static int
1227 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1228 {
1229 	bus_addr_t paddr;
1230 	bus_size_t size;
1231 	int i, err;
1232 
1233 	ring->qid = qid;
1234 	ring->queued = 0;
1235 	ring->cur = 0;
1236 
1237 	/* Allocate TX descriptors (256-byte aligned). */
1238 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1239 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1240 	if (err) {
1241 		aprint_error_dev(sc->sc_dev,
1242 		    "could not allocate TX ring DMA memory\n");
1243 		goto fail;
1244 	}
1245 	ring->desc = ring->desc_dma.vaddr;
1246 
1247 	/*
1248 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1249 	 * to allocate commands space for other rings.
1250 	 */
1251 	if (qid > IWM_CMD_QUEUE)
1252 		return 0;
1253 
1254 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1255 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1256 	if (err) {
1257 		aprint_error_dev(sc->sc_dev,
1258 		    "could not allocate TX cmd DMA memory\n");
1259 		goto fail;
1260 	}
1261 	ring->cmd = ring->cmd_dma.vaddr;
1262 
1263 	paddr = ring->cmd_dma.paddr;
1264 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1265 		struct iwm_tx_data *data = &ring->data[i];
1266 		size_t mapsize;
1267 
1268 		data->cmd_paddr = paddr;
1269 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1270 		    + offsetof(struct iwm_tx_cmd, scratch);
1271 		paddr += sizeof(struct iwm_device_cmd);
1272 
1273 		/* FW commands may require more mapped space than packets. */
1274 		if (qid == IWM_CMD_QUEUE)
1275 			mapsize = (sizeof(struct iwm_cmd_header) +
1276 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1277 		else
1278 			mapsize = MCLBYTES;
1279 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1280 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
1281 		if (err) {
1282 			aprint_error_dev(sc->sc_dev,
1283 			    "could not create TX buf DMA map\n");
1284 			goto fail;
1285 		}
1286 	}
1287 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1288 	return 0;
1289 
1290 fail:	iwm_free_tx_ring(sc, ring);
1291 	return err;
1292 }
1293 
1294 static void
1295 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1296 {
1297 
1298 	if (!sc->apmg_wake_up_wa)
1299 		return;
1300 
1301 	if (!sc->sc_cmd_hold_nic_awake) {
1302 		aprint_error_dev(sc->sc_dev,
1303 		    "cmd_hold_nic_awake not set\n");
1304 		return;
1305 	}
1306 
1307 	sc->sc_cmd_hold_nic_awake = 0;
1308 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1309 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1310 }
1311 
1312 static int
1313 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1314 {
1315 	int ret;
1316 
1317 	/*
1318 	 * wake up the NIC to make sure that the firmware will see the host
1319 	 * command - we will let the NIC sleep once all the host commands
1320 	 * returned. This needs to be done only on NICs that have
1321 	 * apmg_wake_up_wa set.
1322 	 */
1323 	if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1324 
1325 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1326 		    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1327 
1328 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1329 		    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1330 		    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1331 		     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1332 		    15000);
1333 		if (ret == 0) {
1334 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1335 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1336 			aprint_error_dev(sc->sc_dev,
1337 			    "failed to wake NIC for hcmd\n");
1338 			return EIO;
1339 		}
1340 		sc->sc_cmd_hold_nic_awake = 1;
1341 	}
1342 
1343 	return 0;
1344 }
1345 static void
1346 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1347 {
1348 	int i;
1349 
1350 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1351 		struct iwm_tx_data *data = &ring->data[i];
1352 
1353 		if (data->m != NULL) {
1354 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1355 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1356 			bus_dmamap_unload(sc->sc_dmat, data->map);
1357 			m_freem(data->m);
1358 			data->m = NULL;
1359 		}
1360 	}
1361 	/* Clear TX descriptors. */
1362 	memset(ring->desc, 0, ring->desc_dma.size);
1363 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1364 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1365 	sc->qfullmsk &= ~(1 << ring->qid);
1366 	ring->queued = 0;
1367 	ring->cur = 0;
1368 
1369 	if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1370 		iwm_clear_cmd_in_flight(sc);
1371 }
1372 
1373 static void
1374 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1375 {
1376 	int i;
1377 
1378 	iwm_dma_contig_free(&ring->desc_dma);
1379 	iwm_dma_contig_free(&ring->cmd_dma);
1380 
1381 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1382 		struct iwm_tx_data *data = &ring->data[i];
1383 
1384 		if (data->m != NULL) {
1385 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1386 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1387 			bus_dmamap_unload(sc->sc_dmat, data->map);
1388 			m_freem(data->m);
1389 			data->m = NULL;
1390 		}
1391 		if (data->map != NULL) {
1392 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1393 			data->map = NULL;
1394 		}
1395 	}
1396 }
1397 
1398 static void
1399 iwm_enable_rfkill_int(struct iwm_softc *sc)
1400 {
1401 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1402 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1403 }
1404 
1405 static int
1406 iwm_check_rfkill(struct iwm_softc *sc)
1407 {
1408 	uint32_t v;
1409 	int s;
1410 	int rv;
1411 
1412 	s = splnet();
1413 
1414 	/*
1415 	 * "documentation" is not really helpful here:
1416 	 *  27:	HW_RF_KILL_SW
1417 	 *	Indicates state of (platform's) hardware RF-Kill switch
1418 	 *
1419 	 * But apparently when it's off, it's on ...
1420 	 */
1421 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1422 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1423 	if (rv) {
1424 		sc->sc_flags |= IWM_FLAG_RFKILL;
1425 	} else {
1426 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1427 	}
1428 
1429 	splx(s);
1430 	return rv;
1431 }
1432 
1433 static void
1434 iwm_enable_interrupts(struct iwm_softc *sc)
1435 {
1436 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1437 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1438 }
1439 
1440 static void
1441 iwm_restore_interrupts(struct iwm_softc *sc)
1442 {
1443 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1444 }
1445 
1446 static void
1447 iwm_disable_interrupts(struct iwm_softc *sc)
1448 {
1449 	int s = splnet();
1450 
1451 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1452 
1453 	/* acknowledge all interrupts */
1454 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1455 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1456 
1457 	splx(s);
1458 }
1459 
1460 static void
1461 iwm_ict_reset(struct iwm_softc *sc)
1462 {
1463 	iwm_disable_interrupts(sc);
1464 
1465 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1466 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1467 	    BUS_DMASYNC_PREWRITE);
1468 	sc->ict_cur = 0;
1469 
1470 	/* Set physical address of ICT (4KB aligned). */
1471 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1472 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1473 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1474 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1475 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1476 
1477 	/* Switch to ICT interrupt mode in driver. */
1478 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1479 
1480 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1481 	iwm_enable_interrupts(sc);
1482 }
1483 
1484 #define IWM_HW_READY_TIMEOUT 50
1485 static int
1486 iwm_set_hw_ready(struct iwm_softc *sc)
1487 {
1488 	int ready;
1489 
1490 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1491 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1492 
1493 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1494 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1495 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1496 	    IWM_HW_READY_TIMEOUT);
1497 	if (ready)
1498 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1499 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1500 
1501 	return ready;
1502 }
1503 #undef IWM_HW_READY_TIMEOUT
1504 
1505 static int
1506 iwm_prepare_card_hw(struct iwm_softc *sc)
1507 {
1508 	int t = 0;
1509 
1510 	if (iwm_set_hw_ready(sc))
1511 		return 0;
1512 
1513 	DELAY(100);
1514 
1515 	/* If HW is not ready, prepare the conditions to check again */
1516 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1517 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1518 
1519 	do {
1520 		if (iwm_set_hw_ready(sc))
1521 			return 0;
1522 		DELAY(200);
1523 		t += 200;
1524 	} while (t < 150000);
1525 
1526 	return ETIMEDOUT;
1527 }
1528 
1529 static void
1530 iwm_apm_config(struct iwm_softc *sc)
1531 {
1532 	pcireg_t reg;
1533 
1534 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1535 	    sc->sc_cap_off + PCIE_LCSR);
1536 	if (reg & PCIE_LCSR_ASPM_L1) {
1537 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1538 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1539 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1540 	} else {
1541 		/* ... and "Enabling" here */
1542 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1543 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1544 	}
1545 }
1546 
1547 /*
1548  * Start up NIC's basic functionality after it has been reset
1549  * e.g. after platform boot or shutdown.
1550  * NOTE:  This does not load uCode nor start the embedded processor
1551  */
1552 static int
1553 iwm_apm_init(struct iwm_softc *sc)
1554 {
1555 	int err = 0;
1556 
1557 	/* Disable L0S exit timer (platform NMI workaround) */
1558 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1559 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1560 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1561 
1562 	/*
1563 	 * Disable L0s without affecting L1;
1564 	 *  don't wait for ICH L0s (ICH bug W/A)
1565 	 */
1566 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1567 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1568 
1569 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1570 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1571 
1572 	/*
1573 	 * Enable HAP INTA (interrupt from management bus) to
1574 	 * wake device's PCI Express link L1a -> L0s
1575 	 */
1576 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1577 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1578 
1579 	iwm_apm_config(sc);
1580 
1581 #if 0 /* not for 7k/8k */
1582 	/* Configure analog phase-lock-loop before activating to D0A */
1583 	if (trans->cfg->base_params->pll_cfg_val)
1584 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1585 		    trans->cfg->base_params->pll_cfg_val);
1586 #endif
1587 
1588 	/*
1589 	 * Set "initialization complete" bit to move adapter from
1590 	 * D0U* --> D0A* (powered-up active) state.
1591 	 */
1592 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1593 
1594 	/*
1595 	 * Wait for clock stabilization; once stabilized, access to
1596 	 * device-internal resources is supported, e.g. iwm_write_prph()
1597 	 * and accesses to uCode SRAM.
1598 	 */
1599 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1600 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1601 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1602 		aprint_error_dev(sc->sc_dev,
1603 		    "timeout waiting for clock stabilization\n");
1604 		err = ETIMEDOUT;
1605 		goto out;
1606 	}
1607 
1608 	if (sc->host_interrupt_operation_mode) {
1609 		/*
1610 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1611 		 * only check host_interrupt_operation_mode even if this is
1612 		 * not related to host_interrupt_operation_mode.
1613 		 *
1614 		 * Enable the oscillator to count wake up time for L1 exit. This
1615 		 * consumes slightly more power (100uA) - but allows to be sure
1616 		 * that we wake up from L1 on time.
1617 		 *
1618 		 * This looks weird: read twice the same register, discard the
1619 		 * value, set a bit, and yet again, read that same register
1620 		 * just to discard the value. But that's the way the hardware
1621 		 * seems to like it.
1622 		 */
1623 		iwm_read_prph(sc, IWM_OSC_CLK);
1624 		iwm_read_prph(sc, IWM_OSC_CLK);
1625 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1626 		iwm_read_prph(sc, IWM_OSC_CLK);
1627 		iwm_read_prph(sc, IWM_OSC_CLK);
1628 	}
1629 
1630 	/*
1631 	 * Enable DMA clock and wait for it to stabilize.
1632 	 *
1633 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1634 	 * do not disable clocks.  This preserves any hardware bits already
1635 	 * set by default in "CLK_CTRL_REG" after reset.
1636 	 */
1637 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1638 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1639 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1640 		DELAY(20);
1641 
1642 		/* Disable L1-Active */
1643 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1644 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1645 
1646 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1647 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1648 		    IWM_APMG_RTC_INT_STT_RFKILL);
1649 	}
1650  out:
1651 	if (err)
1652 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1653 	return err;
1654 }
1655 
1656 static void
1657 iwm_apm_stop(struct iwm_softc *sc)
1658 {
1659 	/* stop device's busmaster DMA activity */
1660 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1661 
1662 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1663 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1664 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1665 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1666 	DPRINTF(("iwm apm stop\n"));
1667 }
1668 
1669 static int
1670 iwm_start_hw(struct iwm_softc *sc)
1671 {
1672 	int err;
1673 
1674 	err = iwm_prepare_card_hw(sc);
1675 	if (err)
1676 		return err;
1677 
1678 	/* Reset the entire device */
1679 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1680 	DELAY(10);
1681 
1682 	err = iwm_apm_init(sc);
1683 	if (err)
1684 		return err;
1685 
1686 	iwm_enable_rfkill_int(sc);
1687 	iwm_check_rfkill(sc);
1688 
1689 	return 0;
1690 }
1691 
1692 static void
1693 iwm_stop_device(struct iwm_softc *sc)
1694 {
1695 	int chnl, ntries;
1696 	int qid;
1697 
1698 	iwm_disable_interrupts(sc);
1699 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1700 
1701 	/* Deactivate TX scheduler. */
1702 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1703 
1704 	/* Stop all DMA channels. */
1705 	if (iwm_nic_lock(sc)) {
1706 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1707 			IWM_WRITE(sc,
1708 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1709 			for (ntries = 0; ntries < 200; ntries++) {
1710 				uint32_t r;
1711 
1712 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1713 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1714 				    chnl))
1715 					break;
1716 				DELAY(20);
1717 			}
1718 		}
1719 		iwm_nic_unlock(sc);
1720 	}
1721 	iwm_disable_rx_dma(sc);
1722 
1723 	iwm_reset_rx_ring(sc, &sc->rxq);
1724 
1725 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1726 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1727 
1728 	/*
1729 	 * Power-down device's busmaster DMA clocks
1730 	 */
1731 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1732 	DELAY(5);
1733 
1734 	/* Make sure (redundant) we've released our request to stay awake */
1735 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1736 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1737 
1738 	/* Stop the device, and put it in low power state */
1739 	iwm_apm_stop(sc);
1740 
1741 	/*
1742 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1743 	 * Clean again the interrupt here
1744 	 */
1745 	iwm_disable_interrupts(sc);
1746 
1747 	/* Reset the on-board processor. */
1748 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1749 
1750 	/* Even though we stop the HW we still want the RF kill interrupt. */
1751 	iwm_enable_rfkill_int(sc);
1752 	iwm_check_rfkill(sc);
1753 }
1754 
1755 static void
1756 iwm_nic_config(struct iwm_softc *sc)
1757 {
1758 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1759 	uint32_t reg_val = 0;
1760 
1761 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1762 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1763 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1764 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1765 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1766 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1767 
1768 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1769 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1770 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1771 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1772 
1773 	/* radio configuration */
1774 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1775 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1776 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1777 
1778 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1779 
1780 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1781 	    radio_cfg_step, radio_cfg_dash));
1782 
1783 	/*
1784 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1785 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1786 	 * to lose ownership and not being able to obtain it back.
1787 	 */
1788 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1789 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1790 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1791 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1792 }
1793 
1794 static int
1795 iwm_nic_rx_init(struct iwm_softc *sc)
1796 {
1797 	if (!iwm_nic_lock(sc))
1798 		return EBUSY;
1799 
1800 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1801 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1802 	    0, sc->rxq.stat_dma.size,
1803 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1804 
1805 	iwm_disable_rx_dma(sc);
1806 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1807 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1808 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1809 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1810 
1811 	/* Set physical address of RX ring (256-byte aligned). */
1812 	IWM_WRITE(sc,
1813 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1814 
1815 	/* Set physical address of RX status (16-byte aligned). */
1816 	IWM_WRITE(sc,
1817 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1818 
1819 	/* Enable RX. */
1820 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1821 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1822 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1823 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1824 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1825 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1826 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1827 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1828 
1829 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1830 
1831 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1832 	if (sc->host_interrupt_operation_mode)
1833 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1834 
1835 	/*
1836 	 * This value should initially be 0 (before preparing any RBs),
1837 	 * and should be 8 after preparing the first 8 RBs (for example).
1838 	 */
1839 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1840 
1841 	iwm_nic_unlock(sc);
1842 
1843 	return 0;
1844 }
1845 
1846 static int
1847 iwm_nic_tx_init(struct iwm_softc *sc)
1848 {
1849 	int qid;
1850 
1851 	if (!iwm_nic_lock(sc))
1852 		return EBUSY;
1853 
1854 	/* Deactivate TX scheduler. */
1855 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1856 
1857 	/* Set physical address of "keep warm" page (16-byte aligned). */
1858 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1859 
1860 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1861 		struct iwm_tx_ring *txq = &sc->txq[qid];
1862 
1863 		/* Set physical address of TX ring (256-byte aligned). */
1864 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1865 		    txq->desc_dma.paddr >> 8);
1866 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1867 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1868 	}
1869 
1870 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1871 
1872 	iwm_nic_unlock(sc);
1873 
1874 	return 0;
1875 }
1876 
1877 static int
1878 iwm_nic_init(struct iwm_softc *sc)
1879 {
1880 	int err;
1881 
1882 	iwm_apm_init(sc);
1883 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1884 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1885 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1886 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1887 
1888 	iwm_nic_config(sc);
1889 
1890 	err = iwm_nic_rx_init(sc);
1891 	if (err)
1892 		return err;
1893 
1894 	err = iwm_nic_tx_init(sc);
1895 	if (err)
1896 		return err;
1897 
1898 	DPRINTF(("shadow registers enabled\n"));
1899 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1900 
1901 	return 0;
1902 }
1903 
1904 static const uint8_t iwm_ac_to_tx_fifo[] = {
1905 	IWM_TX_FIFO_VO,
1906 	IWM_TX_FIFO_VI,
1907 	IWM_TX_FIFO_BE,
1908 	IWM_TX_FIFO_BK,
1909 };
1910 
1911 static int
1912 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1913 {
1914 	if (!iwm_nic_lock(sc)) {
1915 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1916 		return EBUSY;
1917 	}
1918 
1919 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1920 
1921 	if (qid == IWM_CMD_QUEUE) {
1922 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1923 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1924 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1925 
1926 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1927 
1928 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1929 
1930 		iwm_write_mem32(sc,
1931 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1932 
1933 		/* Set scheduler window size and frame limit. */
1934 		iwm_write_mem32(sc,
1935 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1936 		    sizeof(uint32_t),
1937 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1938 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1939 		    ((IWM_FRAME_LIMIT
1940 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1941 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1942 
1943 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1944 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1945 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1946 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1947 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1948 	} else {
1949 		struct iwm_scd_txq_cfg_cmd cmd;
1950 		int err;
1951 
1952 		iwm_nic_unlock(sc);
1953 
1954 		memset(&cmd, 0, sizeof(cmd));
1955 		cmd.scd_queue = qid;
1956 		cmd.enable = 1;
1957 		cmd.sta_id = sta_id;
1958 		cmd.tx_fifo = fifo;
1959 		cmd.aggregate = 0;
1960 		cmd.window = IWM_FRAME_LIMIT;
1961 
1962 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1963 		    &cmd);
1964 		if (err)
1965 			return err;
1966 
1967 		if (!iwm_nic_lock(sc))
1968 			return EBUSY;
1969 	}
1970 
1971 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1972 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1973 
1974 	iwm_nic_unlock(sc);
1975 
1976 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1977 
1978 	return 0;
1979 }
1980 
1981 static int
1982 iwm_post_alive(struct iwm_softc *sc)
1983 {
1984 	int nwords;
1985 	int err, chnl;
1986 	uint32_t base;
1987 
1988 	if (!iwm_nic_lock(sc))
1989 		return EBUSY;
1990 
1991 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1992 	if (sc->sched_base != base) {
1993 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
1994 		    DEVNAME(sc), sc->sched_base, base));
1995 		err = EINVAL;
1996 		goto out;
1997 	}
1998 
1999 	iwm_ict_reset(sc);
2000 
2001 	/* Clear TX scheduler state in SRAM. */
2002 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2003 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2004 	    / sizeof(uint32_t);
2005 	err = iwm_write_mem(sc,
2006 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2007 	    NULL, nwords);
2008 	if (err)
2009 		goto out;
2010 
2011 	/* Set physical address of TX scheduler rings (1KB aligned). */
2012 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2013 
2014 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2015 
2016 	iwm_nic_unlock(sc);
2017 
2018 	/* enable command channel */
2019 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2020 	if (err)
2021 		return err;
2022 
2023 	if (!iwm_nic_lock(sc))
2024 		return EBUSY;
2025 
2026 	/* Activate TX scheduler. */
2027 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2028 
2029 	/* Enable DMA channels. */
2030 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2031 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2032 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2033 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2034 	}
2035 
2036 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2037 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2038 
2039 	/* Enable L1-Active */
2040 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2041 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2042 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2043 
2044  out:
2045 	iwm_nic_unlock(sc);
2046 	return err;
2047 }
2048 
2049 static struct iwm_phy_db_entry *
2050 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2051     uint16_t chg_id)
2052 {
2053 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2054 
2055 	if (type >= IWM_PHY_DB_MAX)
2056 		return NULL;
2057 
2058 	switch (type) {
2059 	case IWM_PHY_DB_CFG:
2060 		return &phy_db->cfg;
2061 	case IWM_PHY_DB_CALIB_NCH:
2062 		return &phy_db->calib_nch;
2063 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2064 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2065 			return NULL;
2066 		return &phy_db->calib_ch_group_papd[chg_id];
2067 	case IWM_PHY_DB_CALIB_CHG_TXP:
2068 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2069 			return NULL;
2070 		return &phy_db->calib_ch_group_txp[chg_id];
2071 	default:
2072 		return NULL;
2073 	}
2074 	return NULL;
2075 }
2076 
2077 static int
2078 iwm_phy_db_set_section(struct iwm_softc *sc,
2079     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2080 {
2081 	struct iwm_phy_db_entry *entry;
2082 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2083 	uint16_t chg_id = 0;
2084 
2085 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2086 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2087 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2088 
2089 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2090 	if (!entry)
2091 		return EINVAL;
2092 
2093 	if (entry->data)
2094 		kmem_intr_free(entry->data, entry->size);
2095 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2096 	if (!entry->data) {
2097 		entry->size = 0;
2098 		return ENOMEM;
2099 	}
2100 	memcpy(entry->data, phy_db_notif->data, size);
2101 	entry->size = size;
2102 
2103 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2104 	    __func__, __LINE__, type, size, entry->data));
2105 
2106 	return 0;
2107 }
2108 
2109 static int
2110 iwm_is_valid_channel(uint16_t ch_id)
2111 {
2112 	if (ch_id <= 14 ||
2113 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2114 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2115 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2116 		return 1;
2117 	return 0;
2118 }
2119 
2120 static uint8_t
2121 iwm_ch_id_to_ch_index(uint16_t ch_id)
2122 {
2123 	if (!iwm_is_valid_channel(ch_id))
2124 		return 0xff;
2125 
2126 	if (ch_id <= 14)
2127 		return ch_id - 1;
2128 	if (ch_id <= 64)
2129 		return (ch_id + 20) / 4;
2130 	if (ch_id <= 140)
2131 		return (ch_id - 12) / 4;
2132 	return (ch_id - 13) / 4;
2133 }
2134 
2135 
2136 static uint16_t
2137 iwm_channel_id_to_papd(uint16_t ch_id)
2138 {
2139 	if (!iwm_is_valid_channel(ch_id))
2140 		return 0xff;
2141 
2142 	if (1 <= ch_id && ch_id <= 14)
2143 		return 0;
2144 	if (36 <= ch_id && ch_id <= 64)
2145 		return 1;
2146 	if (100 <= ch_id && ch_id <= 140)
2147 		return 2;
2148 	return 3;
2149 }
2150 
2151 static uint16_t
2152 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2153 {
2154 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2155 	struct iwm_phy_db_chg_txp *txp_chg;
2156 	int i;
2157 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2158 
2159 	if (ch_index == 0xff)
2160 		return 0xff;
2161 
2162 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2163 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2164 		if (!txp_chg)
2165 			return 0xff;
2166 		/*
2167 		 * Looking for the first channel group the max channel
2168 		 * of which is higher than the requested channel.
2169 		 */
2170 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2171 			return i;
2172 	}
2173 	return 0xff;
2174 }
2175 
2176 static int
2177 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2178     uint16_t *size, uint16_t ch_id)
2179 {
2180 	struct iwm_phy_db_entry *entry;
2181 	uint16_t ch_group_id = 0;
2182 
2183 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2184 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2185 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2186 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2187 
2188 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2189 	if (!entry)
2190 		return EINVAL;
2191 
2192 	*data = entry->data;
2193 	*size = entry->size;
2194 
2195 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2196 		       __func__, __LINE__, type, *size));
2197 
2198 	return 0;
2199 }
2200 
2201 static int
2202 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2203     void *data)
2204 {
2205 	struct iwm_phy_db_cmd phy_db_cmd;
2206 	struct iwm_host_cmd cmd = {
2207 		.id = IWM_PHY_DB_CMD,
2208 		.flags = IWM_CMD_ASYNC,
2209 	};
2210 
2211 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2212 	    type, length));
2213 
2214 	phy_db_cmd.type = le16toh(type);
2215 	phy_db_cmd.length = le16toh(length);
2216 
2217 	cmd.data[0] = &phy_db_cmd;
2218 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2219 	cmd.data[1] = data;
2220 	cmd.len[1] = length;
2221 
2222 	return iwm_send_cmd(sc, &cmd);
2223 }
2224 
2225 static int
2226 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2227     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2228 {
2229 	uint16_t i;
2230 	int err;
2231 	struct iwm_phy_db_entry *entry;
2232 
2233 	/* Send all the channel-specific groups to operational fw */
2234 	for (i = 0; i < max_ch_groups; i++) {
2235 		entry = iwm_phy_db_get_section(sc, type, i);
2236 		if (!entry)
2237 			return EINVAL;
2238 
2239 		if (!entry->size)
2240 			continue;
2241 
2242 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2243 		if (err) {
2244 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2245 			    "err %d\n", DEVNAME(sc), type, i, err));
2246 			return err;
2247 		}
2248 
2249 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2250 		    DEVNAME(sc), type, i));
2251 
2252 		DELAY(1000);
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 static int
2259 iwm_send_phy_db_data(struct iwm_softc *sc)
2260 {
2261 	uint8_t *data = NULL;
2262 	uint16_t size = 0;
2263 	int err;
2264 
2265 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2266 	if (err)
2267 		return err;
2268 
2269 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2270 	if (err)
2271 		return err;
2272 
2273 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2274 	    &data, &size, 0);
2275 	if (err)
2276 		return err;
2277 
2278 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2279 	if (err)
2280 		return err;
2281 
2282 	err = iwm_phy_db_send_all_channel_groups(sc,
2283 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2284 	if (err)
2285 		return err;
2286 
2287 	err = iwm_phy_db_send_all_channel_groups(sc,
2288 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2289 	if (err)
2290 		return err;
2291 
2292 	return 0;
2293 }
2294 
2295 /*
2296  * For the high priority TE use a time event type that has similar priority to
2297  * the FW's action scan priority.
2298  */
2299 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2300 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2301 
2302 /* used to convert from time event API v2 to v1 */
2303 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2304 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2305 static inline uint16_t
2306 iwm_te_v2_get_notify(uint16_t policy)
2307 {
2308 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2309 }
2310 
2311 static inline uint16_t
2312 iwm_te_v2_get_dep_policy(uint16_t policy)
2313 {
2314 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2315 		IWM_TE_V2_PLACEMENT_POS;
2316 }
2317 
2318 static inline uint16_t
2319 iwm_te_v2_get_absence(uint16_t policy)
2320 {
2321 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2322 }
2323 
2324 static void
2325 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2326     struct iwm_time_event_cmd_v1 *cmd_v1)
2327 {
2328 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2329 	cmd_v1->action = cmd_v2->action;
2330 	cmd_v1->id = cmd_v2->id;
2331 	cmd_v1->apply_time = cmd_v2->apply_time;
2332 	cmd_v1->max_delay = cmd_v2->max_delay;
2333 	cmd_v1->depends_on = cmd_v2->depends_on;
2334 	cmd_v1->interval = cmd_v2->interval;
2335 	cmd_v1->duration = cmd_v2->duration;
2336 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2337 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2338 	else
2339 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2340 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2341 	cmd_v1->interval_reciprocal = 0; /* unused */
2342 
2343 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2344 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2345 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2346 }
2347 
2348 static int
2349 iwm_send_time_event_cmd(struct iwm_softc *sc,
2350     const struct iwm_time_event_cmd_v2 *cmd)
2351 {
2352 	struct iwm_time_event_cmd_v1 cmd_v1;
2353 
2354 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2355 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2356 		    cmd);
2357 
2358 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2359 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2360 	    &cmd_v1);
2361 }
2362 
2363 static void
2364 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2365     uint32_t duration, uint32_t max_delay)
2366 {
2367 	struct iwm_time_event_cmd_v2 time_cmd;
2368 
2369 	memset(&time_cmd, 0, sizeof(time_cmd));
2370 
2371 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2372 	time_cmd.id_and_color =
2373 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2374 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2375 
2376 	time_cmd.apply_time = htole32(0);
2377 
2378 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2379 	time_cmd.max_delay = htole32(max_delay);
2380 	/* TODO: why do we need to interval = bi if it is not periodic? */
2381 	time_cmd.interval = htole32(1);
2382 	time_cmd.duration = htole32(duration);
2383 	time_cmd.repeat = 1;
2384 	time_cmd.policy
2385 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2386 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2387 		IWM_T2_V2_START_IMMEDIATELY);
2388 
2389 	iwm_send_time_event_cmd(sc, &time_cmd);
2390 }
2391 
2392 /*
2393  * NVM read access and content parsing.  We do not support
2394  * external NVM or writing NVM.
2395  */
2396 
2397 /* list of NVM sections we are allowed/need to read */
2398 static const int iwm_nvm_to_read[] = {
2399 	IWM_NVM_SECTION_TYPE_HW,
2400 	IWM_NVM_SECTION_TYPE_SW,
2401 	IWM_NVM_SECTION_TYPE_REGULATORY,
2402 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2403 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2404 	IWM_NVM_SECTION_TYPE_HW_8000,
2405 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2406 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2407 };
2408 
2409 /* Default NVM size to read */
2410 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2411 #define IWM_MAX_NVM_SECTION_SIZE	8192
2412 
2413 #define IWM_NVM_WRITE_OPCODE 1
2414 #define IWM_NVM_READ_OPCODE 0
2415 
2416 static int
2417 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2418     uint16_t length, uint8_t *data, uint16_t *len)
2419 {
2420 	offset = 0;
2421 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2422 		.offset = htole16(offset),
2423 		.length = htole16(length),
2424 		.type = htole16(section),
2425 		.op_code = IWM_NVM_READ_OPCODE,
2426 	};
2427 	struct iwm_nvm_access_resp *nvm_resp;
2428 	struct iwm_rx_packet *pkt;
2429 	struct iwm_host_cmd cmd = {
2430 		.id = IWM_NVM_ACCESS_CMD,
2431 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2432 		.data = { &nvm_access_cmd, },
2433 	};
2434 	int err, offset_read;
2435 	size_t bytes_read;
2436 	uint8_t *resp_data;
2437 
2438 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2439 
2440 	err = iwm_send_cmd(sc, &cmd);
2441 	if (err) {
2442 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2443 		    DEVNAME(sc), err));
2444 		return err;
2445 	}
2446 
2447 	pkt = cmd.resp_pkt;
2448 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2449 		err = EIO;
2450 		goto exit;
2451 	}
2452 
2453 	/* Extract NVM response */
2454 	nvm_resp = (void *)pkt->data;
2455 
2456 	err = le16toh(nvm_resp->status);
2457 	bytes_read = le16toh(nvm_resp->length);
2458 	offset_read = le16toh(nvm_resp->offset);
2459 	resp_data = nvm_resp->data;
2460 	if (err) {
2461 		err = EINVAL;
2462 		goto exit;
2463 	}
2464 
2465 	if (offset_read != offset) {
2466 		err = EINVAL;
2467 		goto exit;
2468 	}
2469 	if (bytes_read > length) {
2470 		err = EINVAL;
2471 		goto exit;
2472 	}
2473 
2474 	memcpy(data + offset, resp_data, bytes_read);
2475 	*len = bytes_read;
2476 
2477  exit:
2478 	iwm_free_resp(sc, &cmd);
2479 	return err;
2480 }
2481 
2482 /*
2483  * Reads an NVM section completely.
2484  * NICs prior to 7000 family doesn't have a real NVM, but just read
2485  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2486  * by uCode, we need to manually check in this case that we don't
2487  * overflow and try to read more than the EEPROM size.
2488  */
2489 static int
2490 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2491     uint16_t *len, size_t max_len)
2492 {
2493 	uint16_t chunklen, seglen;
2494 	int err;
2495 
2496 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2497 	*len = 0;
2498 
2499 	/* Read NVM chunks until exhausted (reading less than requested) */
2500 	while (seglen == chunklen && *len < max_len) {
2501 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2502 		    &seglen);
2503 		if (err) {
2504 			DPRINTF(("%s:Cannot read NVM from section %d "
2505 			    "offset %d, length %d\n",
2506 			    DEVNAME(sc), section, *len, chunklen));
2507 			return err;
2508 		}
2509 		*len += seglen;
2510 	}
2511 
2512 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2513 	return 0;
2514 }
2515 
2516 static uint8_t
2517 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2518 {
2519 	uint8_t tx_ant;
2520 
2521 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2522 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2523 
2524 	if (sc->sc_nvm.valid_tx_ant)
2525 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2526 
2527 	return tx_ant;
2528 }
2529 
2530 static uint8_t
2531 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2532 {
2533 	uint8_t rx_ant;
2534 
2535 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2536 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2537 
2538 	if (sc->sc_nvm.valid_rx_ant)
2539 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2540 
2541 	return rx_ant;
2542 }
2543 
2544 static void
2545 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2546     const uint8_t *nvm_channels, size_t nchan)
2547 {
2548 	struct ieee80211com *ic = &sc->sc_ic;
2549 	struct iwm_nvm_data *data = &sc->sc_nvm;
2550 	int ch_idx;
2551 	struct ieee80211_channel *channel;
2552 	uint16_t ch_flags;
2553 	int is_5ghz;
2554 	int flags, hw_value;
2555 
2556 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2557 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2558 
2559 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2560 		    !data->sku_cap_band_52GHz_enable)
2561 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2562 
2563 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2564 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2565 			    iwm_nvm_channels[ch_idx],
2566 			    ch_flags,
2567 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2568 			    "5.2" : "2.4"));
2569 			continue;
2570 		}
2571 
2572 		hw_value = nvm_channels[ch_idx];
2573 		channel = &ic->ic_channels[hw_value];
2574 
2575 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2576 		if (!is_5ghz) {
2577 			flags = IEEE80211_CHAN_2GHZ;
2578 			channel->ic_flags
2579 			    = IEEE80211_CHAN_CCK
2580 			    | IEEE80211_CHAN_OFDM
2581 			    | IEEE80211_CHAN_DYN
2582 			    | IEEE80211_CHAN_2GHZ;
2583 		} else {
2584 			flags = IEEE80211_CHAN_5GHZ;
2585 			channel->ic_flags =
2586 			    IEEE80211_CHAN_A;
2587 		}
2588 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2589 
2590 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2591 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2592 
2593 #ifndef IEEE80211_NO_HT
2594 		if (data->sku_cap_11n_enable)
2595 			channel->ic_flags |= IEEE80211_CHAN_HT;
2596 #endif
2597 	}
2598 }
2599 
2600 #ifndef IEEE80211_NO_HT
2601 static void
2602 iwm_setup_ht_rates(struct iwm_softc *sc)
2603 {
2604 	struct ieee80211com *ic = &sc->sc_ic;
2605 
2606 	/* TX is supported with the same MCS as RX. */
2607 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2608 
2609 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2610 
2611 #ifdef notyet
2612 	if (sc->sc_nvm.sku_cap_mimo_disable)
2613 		return;
2614 
2615 	if (iwm_fw_valid_rx_ant(sc) > 1)
2616 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2617 	if (iwm_fw_valid_rx_ant(sc) > 2)
2618 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2619 #endif
2620 }
2621 
2622 #define IWM_MAX_RX_BA_SESSIONS 16
2623 
2624 static void
2625 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2626     uint16_t ssn, int start)
2627 {
2628 	struct ieee80211com *ic = &sc->sc_ic;
2629 	struct iwm_add_sta_cmd_v7 cmd;
2630 	struct iwm_node *in = (struct iwm_node *)ni;
2631 	int err, s;
2632 	uint32_t status;
2633 
2634 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2635 		ieee80211_addba_req_refuse(ic, ni, tid);
2636 		return;
2637 	}
2638 
2639 	memset(&cmd, 0, sizeof(cmd));
2640 
2641 	cmd.sta_id = IWM_STATION_ID;
2642 	cmd.mac_id_n_color
2643 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2644 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2645 
2646 	if (start) {
2647 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2648 		cmd.add_immediate_ba_ssn = ssn;
2649 	} else {
2650 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2651 	}
2652 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2653 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2654 
2655 	status = IWM_ADD_STA_SUCCESS;
2656 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2657 	    &status);
2658 
2659 	s = splnet();
2660 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2661 		if (start) {
2662 			sc->sc_rx_ba_sessions++;
2663 			ieee80211_addba_req_accept(ic, ni, tid);
2664 		} else if (sc->sc_rx_ba_sessions > 0)
2665 			sc->sc_rx_ba_sessions--;
2666 	} else if (start)
2667 		ieee80211_addba_req_refuse(ic, ni, tid);
2668 
2669 	splx(s);
2670 }
2671 
2672 static void
2673 iwm_htprot_task(void *arg)
2674 {
2675 	struct iwm_softc *sc = arg;
2676 	struct ieee80211com *ic = &sc->sc_ic;
2677 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2678 	int err;
2679 
2680 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2681 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2682 	if (err)
2683 		aprint_error_dev(sc->sc_dev,
2684 		    "could not change HT protection: error %d\n", err);
2685 }
2686 
2687 /*
2688  * This function is called by upper layer when HT protection settings in
2689  * beacons have changed.
2690  */
2691 static void
2692 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2693 {
2694 	struct iwm_softc *sc = ic->ic_softc;
2695 
2696 	/* assumes that ni == ic->ic_bss */
2697 	task_add(systq, &sc->htprot_task);
2698 }
2699 
2700 static void
2701 iwm_ba_task(void *arg)
2702 {
2703 	struct iwm_softc *sc = arg;
2704 	struct ieee80211com *ic = &sc->sc_ic;
2705 	struct ieee80211_node *ni = ic->ic_bss;
2706 
2707 	if (sc->ba_start)
2708 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2709 	else
2710 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2711 }
2712 
2713 /*
2714  * This function is called by upper layer when an ADDBA request is received
2715  * from another STA and before the ADDBA response is sent.
2716  */
2717 static int
2718 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2719     uint8_t tid)
2720 {
2721 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2722 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2723 
2724 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2725 		return ENOSPC;
2726 
2727 	sc->ba_start = 1;
2728 	sc->ba_tid = tid;
2729 	sc->ba_ssn = htole16(ba->ba_winstart);
2730 	task_add(systq, &sc->ba_task);
2731 
2732 	return EBUSY;
2733 }
2734 
2735 /*
2736  * This function is called by upper layer on teardown of an HT-immediate
2737  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2738  */
2739 static void
2740 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2741     uint8_t tid)
2742 {
2743 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2744 
2745 	sc->ba_start = 0;
2746 	sc->ba_tid = tid;
2747 	task_add(systq, &sc->ba_task);
2748 }
2749 #endif
2750 
2751 static void
2752 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2753     const uint16_t *mac_override, const uint16_t *nvm_hw)
2754 {
2755 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2756 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2757 	};
2758 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2759 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2760 	};
2761 	const uint8_t *hw_addr;
2762 
2763 	if (mac_override) {
2764 		hw_addr = (const uint8_t *)(mac_override +
2765 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
2766 
2767 		/*
2768 		 * Store the MAC address from MAO section.
2769 		 * No byte swapping is required in MAO section
2770 		 */
2771 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2772 
2773 		/*
2774 		 * Force the use of the OTP MAC address in case of reserved MAC
2775 		 * address in the NVM, or if address is given but invalid.
2776 		 */
2777 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2778 		    (memcmp(etherbroadcastaddr, data->hw_addr,
2779 		    sizeof(etherbroadcastaddr)) != 0) &&
2780 		    (memcmp(etheranyaddr, data->hw_addr,
2781 		    sizeof(etheranyaddr)) != 0) &&
2782 		    !ETHER_IS_MULTICAST(data->hw_addr))
2783 			return;
2784 	}
2785 
2786 	if (nvm_hw) {
2787 		/* Read the mac address from WFMP registers. */
2788 		uint32_t mac_addr0 =
2789 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2790 		uint32_t mac_addr1 =
2791 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2792 
2793 		hw_addr = (const uint8_t *)&mac_addr0;
2794 		data->hw_addr[0] = hw_addr[3];
2795 		data->hw_addr[1] = hw_addr[2];
2796 		data->hw_addr[2] = hw_addr[1];
2797 		data->hw_addr[3] = hw_addr[0];
2798 
2799 		hw_addr = (const uint8_t *)&mac_addr1;
2800 		data->hw_addr[4] = hw_addr[1];
2801 		data->hw_addr[5] = hw_addr[0];
2802 
2803 		return;
2804 	}
2805 
2806 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
2807 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2808 }
2809 
2810 static int
2811 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2812     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2813     const uint16_t *mac_override, const uint16_t *phy_sku,
2814     const uint16_t *regulatory)
2815 {
2816 	struct iwm_nvm_data *data = &sc->sc_nvm;
2817 	uint8_t hw_addr[ETHER_ADDR_LEN];
2818 	uint32_t sku;
2819 
2820 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2821 
2822 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2823 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2824 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2825 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2826 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2827 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2828 
2829 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
2830 	} else {
2831 		uint32_t radio_cfg = le32_to_cpup(
2832 		    (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2833 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2834 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2835 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2836 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2837 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2838 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2839 
2840 		sku = le32_to_cpup(
2841 		    (const uint32_t *)(phy_sku + IWM_SKU_8000));
2842 	}
2843 
2844 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2845 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2846 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2847 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2848 
2849 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2850 
2851 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2852 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2853 		data->hw_addr[0] = hw_addr[1];
2854 		data->hw_addr[1] = hw_addr[0];
2855 		data->hw_addr[2] = hw_addr[3];
2856 		data->hw_addr[3] = hw_addr[2];
2857 		data->hw_addr[4] = hw_addr[5];
2858 		data->hw_addr[5] = hw_addr[4];
2859 	} else
2860 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2861 
2862 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2863 		uint16_t lar_offset, lar_config;
2864 		lar_offset = data->nvm_version < 0xE39 ?
2865 		    IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
2866 		lar_config = le16_to_cpup(regulatory + lar_offset);
2867                 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
2868 	}
2869 
2870 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2871 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2872 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2873 	else
2874 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
2875 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2876 
2877 	data->calib_version = 255;   /* TODO:
2878 					this value will prevent some checks from
2879 					failing, we need to check if this
2880 					field is still needed, and if it does,
2881 					where is it in the NVM */
2882 
2883 	return 0;
2884 }
2885 
2886 static int
2887 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2888 {
2889 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2890 	const uint16_t *regulatory = NULL;
2891 
2892 	/* Checking for required sections */
2893 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2894 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2895 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2896 			return ENOENT;
2897 		}
2898 
2899 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2900 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2901 		/* SW and REGULATORY sections are mandatory */
2902 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2903 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2904 			return ENOENT;
2905 		}
2906 		/* MAC_OVERRIDE or at least HW section must exist */
2907 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2908 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2909 			return ENOENT;
2910 		}
2911 
2912 		/* PHY_SKU section is mandatory in B0 */
2913 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2914 			return ENOENT;
2915 		}
2916 
2917 		regulatory = (const uint16_t *)
2918 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2919 		hw = (const uint16_t *)
2920 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2921 		mac_override =
2922 			(const uint16_t *)
2923 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2924 		phy_sku = (const uint16_t *)
2925 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2926 	} else {
2927 		panic("unknown device family %d\n", sc->sc_device_family);
2928 	}
2929 
2930 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2931 	calib = (const uint16_t *)
2932 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2933 
2934 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2935 	    phy_sku, regulatory);
2936 }
2937 
2938 static int
2939 iwm_nvm_init(struct iwm_softc *sc)
2940 {
2941 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2942 	int i, section, err;
2943 	uint16_t len;
2944 	uint8_t *buf;
2945 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2946 
2947 	/* Read From FW NVM */
2948 	DPRINTF(("Read NVM\n"));
2949 
2950 	memset(nvm_sections, 0, sizeof(nvm_sections));
2951 
2952 	buf = kmem_alloc(bufsz, KM_SLEEP);
2953 	if (buf == NULL)
2954 		return ENOMEM;
2955 
2956 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2957 		section = iwm_nvm_to_read[i];
2958 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
2959 
2960 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2961 		if (err) {
2962 			err = 0;
2963 			continue;
2964 		}
2965 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
2966 		if (nvm_sections[section].data == NULL) {
2967 			err = ENOMEM;
2968 			break;
2969 		}
2970 		memcpy(nvm_sections[section].data, buf, len);
2971 		nvm_sections[section].length = len;
2972 	}
2973 	kmem_free(buf, bufsz);
2974 	if (err == 0)
2975 		err = iwm_parse_nvm_sections(sc, nvm_sections);
2976 
2977 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2978 		if (nvm_sections[i].data != NULL)
2979 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
2980 	}
2981 
2982 	return err;
2983 }
2984 
2985 static int
2986 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2987     const uint8_t *section, uint32_t byte_cnt)
2988 {
2989 	int err = EINVAL;
2990 	uint32_t chunk_sz, offset;
2991 
2992 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2993 
2994 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2995 		uint32_t addr, len;
2996 		const uint8_t *data;
2997 
2998 		addr = dst_addr + offset;
2999 		len = MIN(chunk_sz, byte_cnt - offset);
3000 		data = section + offset;
3001 
3002 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3003 		if (err)
3004 			break;
3005 	}
3006 
3007 	return err;
3008 }
3009 
3010 static int
3011 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3012     const uint8_t *section, uint32_t byte_cnt)
3013 {
3014 	struct iwm_dma_info *dma = &sc->fw_dma;
3015 	bool is_extended = false;
3016 	int err;
3017 
3018 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3019 	memcpy(dma->vaddr, section, byte_cnt);
3020 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3021 	    BUS_DMASYNC_PREWRITE);
3022 
3023 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3024 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
3025 		is_extended = true;
3026 
3027 	if (is_extended) {
3028 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3029 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3030 	}
3031 
3032 	sc->sc_fw_chunk_done = 0;
3033 
3034 	if (!iwm_nic_lock(sc)) {
3035 		if (is_extended)
3036 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3037 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3038 		return EBUSY;
3039 	}
3040 
3041 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3042 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3043 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3044 	    dst_addr);
3045 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3046 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3047 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3048 	    (iwm_get_dma_hi_addr(dma->paddr)
3049 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3050 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3051 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3052 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3053 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3054 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3055 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3056 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3057 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3058 
3059 	iwm_nic_unlock(sc);
3060 
3061 	/* Wait for this segment to load. */
3062 	err = 0;
3063 	while (!sc->sc_fw_chunk_done) {
3064 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3065 		if (err)
3066 			break;
3067 	}
3068 	if (!sc->sc_fw_chunk_done) {
3069 		aprint_error_dev(sc->sc_dev,
3070 		    "fw chunk addr 0x%x len %d failed to load\n",
3071 		    dst_addr, byte_cnt);
3072 	}
3073 
3074 	if (is_extended) {
3075 		int rv = iwm_nic_lock(sc);
3076 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3077 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3078 		if (rv == 0)
3079 			iwm_nic_unlock(sc);
3080 	}
3081 
3082 	return err;
3083 }
3084 
3085 static int
3086 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3087 {
3088 	struct iwm_fw_sects *fws;
3089 	int err, i;
3090 	void *data;
3091 	uint32_t dlen;
3092 	uint32_t offset;
3093 
3094 	fws = &sc->sc_fw.fw_sects[ucode_type];
3095 	for (i = 0; i < fws->fw_count; i++) {
3096 		data = fws->fw_sect[i].fws_data;
3097 		dlen = fws->fw_sect[i].fws_len;
3098 		offset = fws->fw_sect[i].fws_devoff;
3099 		if (dlen > sc->sc_fwdmasegsz) {
3100 			err = EFBIG;
3101 		} else
3102 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3103 		if (err) {
3104 			aprint_error_dev(sc->sc_dev,
3105 			    "could not load firmware chunk %u of %u\n",
3106 			    i, fws->fw_count);
3107 			return err;
3108 		}
3109 	}
3110 
3111 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3112 
3113 	return 0;
3114 }
3115 
3116 static int
3117 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3118     int cpu, int *first_ucode_section)
3119 {
3120 	int shift_param;
3121 	int i, err = 0, sec_num = 0x1;
3122 	uint32_t val, last_read_idx = 0;
3123 	void *data;
3124 	uint32_t dlen;
3125 	uint32_t offset;
3126 
3127 	if (cpu == 1) {
3128 		shift_param = 0;
3129 		*first_ucode_section = 0;
3130 	} else {
3131 		shift_param = 16;
3132 		(*first_ucode_section)++;
3133 	}
3134 
3135 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3136 		last_read_idx = i;
3137 		data = fws->fw_sect[i].fws_data;
3138 		dlen = fws->fw_sect[i].fws_len;
3139 		offset = fws->fw_sect[i].fws_devoff;
3140 
3141 		/*
3142 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3143 		 * CPU1 to CPU2.
3144 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3145 		 * CPU2 non paged to CPU2 paging sec.
3146 		 */
3147 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3148 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3149 			break;
3150 
3151 		if (dlen > sc->sc_fwdmasegsz) {
3152 			err = EFBIG;
3153 		} else
3154 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3155 		if (err) {
3156 			aprint_error_dev(sc->sc_dev,
3157 			    "could not load firmware chunk %d (error %d)\n",
3158 			    i, err);
3159 			return err;
3160 		}
3161 
3162 		/* Notify the ucode of the loaded section number and status */
3163 		if (iwm_nic_lock(sc)) {
3164 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3165 			val = val | (sec_num << shift_param);
3166 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3167 			sec_num = (sec_num << 1) | 0x1;
3168 			iwm_nic_unlock(sc);
3169 
3170 			/*
3171 			 * The firmware won't load correctly without this delay.
3172 			 */
3173 			DELAY(8000);
3174 		}
3175 	}
3176 
3177 	*first_ucode_section = last_read_idx;
3178 
3179 	if (iwm_nic_lock(sc)) {
3180 		if (cpu == 1)
3181 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3182 		else
3183 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3184 		iwm_nic_unlock(sc);
3185 	}
3186 
3187 	return 0;
3188 }
3189 
3190 static int
3191 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3192 {
3193 	struct iwm_fw_sects *fws;
3194 	int err = 0;
3195 	int first_ucode_section;
3196 
3197 	fws = &sc->sc_fw.fw_sects[ucode_type];
3198 
3199 	/* configure the ucode to be ready to get the secured image */
3200 	/* release CPU reset */
3201 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
3202 
3203 	/* load to FW the binary Secured sections of CPU1 */
3204 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3205 	if (err)
3206 		return err;
3207 
3208 	/* load to FW the binary sections of CPU2 */
3209 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3210 }
3211 
3212 static int
3213 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3214 {
3215 	int err, w;
3216 
3217 	sc->sc_uc.uc_intr = 0;
3218 
3219 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3220 		err = iwm_load_firmware_8000(sc, ucode_type);
3221 	else
3222 		err = iwm_load_firmware_7000(sc, ucode_type);
3223 
3224 	if (err)
3225 		return err;
3226 
3227 	/* wait for the firmware to load */
3228 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3229 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3230 	if (err || !sc->sc_uc.uc_ok)
3231 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
3232 
3233 	return err;
3234 }
3235 
3236 static int
3237 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3238 {
3239 	int err;
3240 
3241 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3242 
3243 	err = iwm_nic_init(sc);
3244 	if (err) {
3245 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3246 		return err;
3247 	}
3248 
3249 	/* make sure rfkill handshake bits are cleared */
3250 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3251 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3252 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3253 
3254 	/* clear (again), then enable host interrupts */
3255 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3256 	iwm_enable_interrupts(sc);
3257 
3258 	/* really make sure rfkill handshake bits are cleared */
3259 	/* maybe we should write a few times more?  just to make sure */
3260 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3261 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3262 
3263 	return iwm_load_firmware(sc, ucode_type);
3264 }
3265 
3266 static int
3267 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3268 {
3269 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3270 		.valid = htole32(valid_tx_ant),
3271 	};
3272 
3273 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3274 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
3275 }
3276 
3277 static int
3278 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3279 {
3280 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3281 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3282 
3283 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3284 	phy_cfg_cmd.calib_control.event_trigger =
3285 	    sc->sc_default_calib[ucode_type].event_trigger;
3286 	phy_cfg_cmd.calib_control.flow_trigger =
3287 	    sc->sc_default_calib[ucode_type].flow_trigger;
3288 
3289 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3290 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3291 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3292 }
3293 
3294 static int
3295 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3296 {
3297 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3298 	int err;
3299 
3300 	err = iwm_read_firmware(sc);
3301 	if (err)
3302 		return err;
3303 
3304 	sc->sc_uc_current = ucode_type;
3305 	err = iwm_start_fw(sc, ucode_type);
3306 	if (err) {
3307 		sc->sc_uc_current = old_type;
3308 		return err;
3309 	}
3310 
3311 	return iwm_post_alive(sc);
3312 }
3313 
3314 static int
3315 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3316 {
3317 	int err;
3318 
3319 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3320 		aprint_error_dev(sc->sc_dev,
3321 		    "radio is disabled by hardware switch\n");
3322 		return EPERM;
3323 	}
3324 
3325 	sc->sc_init_complete = 0;
3326 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3327 	if (err) {
3328 		aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
3329 		return err;
3330 	}
3331 
3332 	if (justnvm) {
3333 		err = iwm_nvm_init(sc);
3334 		if (err) {
3335 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3336 			return err;
3337 		}
3338 
3339 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3340 		    ETHER_ADDR_LEN);
3341 		return 0;
3342 	}
3343 
3344 	err = iwm_send_bt_init_conf(sc);
3345 	if (err)
3346 		return err;
3347 
3348 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3349 	if (err)
3350 		return err;
3351 
3352 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3353 	if (err)
3354 		return err;
3355 
3356 	/*
3357 	 * Send phy configurations command to init uCode
3358 	 * to start the 16.0 uCode init image internal calibrations.
3359 	 */
3360 	err = iwm_send_phy_cfg_cmd(sc);
3361 	if (err)
3362 		return err;
3363 
3364 	/*
3365 	 * Nothing to do but wait for the init complete notification
3366 	 * from the firmware
3367 	 */
3368 	while (!sc->sc_init_complete) {
3369 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3370 		if (err)
3371 			break;
3372 	}
3373 
3374 	return err;
3375 }
3376 
3377 static int
3378 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3379 {
3380 	struct iwm_rx_ring *ring = &sc->rxq;
3381 	struct iwm_rx_data *data = &ring->data[idx];
3382 	struct mbuf *m;
3383 	int err;
3384 	int fatal = 0;
3385 
3386 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3387 	if (m == NULL)
3388 		return ENOBUFS;
3389 
3390 	if (size <= MCLBYTES) {
3391 		MCLGET(m, M_DONTWAIT);
3392 	} else {
3393 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3394 	}
3395 	if ((m->m_flags & M_EXT) == 0) {
3396 		m_freem(m);
3397 		return ENOBUFS;
3398 	}
3399 
3400 	if (data->m != NULL) {
3401 		bus_dmamap_unload(sc->sc_dmat, data->map);
3402 		fatal = 1;
3403 	}
3404 
3405 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3406 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3407 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3408 	if (err) {
3409 		/* XXX */
3410 		if (fatal)
3411 			panic("iwm: could not load RX mbuf");
3412 		m_freem(m);
3413 		return err;
3414 	}
3415 	data->m = m;
3416 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3417 
3418 	/* Update RX descriptor. */
3419 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3420 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3421 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3422 
3423 	return 0;
3424 }
3425 
3426 #define IWM_RSSI_OFFSET 50
3427 static int
3428 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3429 {
3430 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3431 	uint32_t agc_a, agc_b;
3432 	uint32_t val;
3433 
3434 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3435 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3436 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3437 
3438 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3439 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3440 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3441 
3442 	/*
3443 	 * dBm = rssi dB - agc dB - constant.
3444 	 * Higher AGC (higher radio gain) means lower signal.
3445 	 */
3446 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3447 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3448 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3449 
3450 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3451 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3452 
3453 	return max_rssi_dbm;
3454 }
3455 
3456 /*
3457  * RSSI values are reported by the FW as positive values - need to negate
3458  * to obtain their dBM.  Account for missing antennas by replacing 0
3459  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3460  */
3461 static int
3462 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3463 {
3464 	int energy_a, energy_b, energy_c, max_energy;
3465 	uint32_t val;
3466 
3467 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3468 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3469 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3470 	energy_a = energy_a ? -energy_a : -256;
3471 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3472 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3473 	energy_b = energy_b ? -energy_b : -256;
3474 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3475 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3476 	energy_c = energy_c ? -energy_c : -256;
3477 	max_energy = MAX(energy_a, energy_b);
3478 	max_energy = MAX(max_energy, energy_c);
3479 
3480 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3481 	    energy_a, energy_b, energy_c, max_energy));
3482 
3483 	return max_energy;
3484 }
3485 
3486 static void
3487 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3488     struct iwm_rx_data *data)
3489 {
3490 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3491 
3492 	DPRINTFN(20, ("received PHY stats\n"));
3493 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3494 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3495 
3496 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3497 }
3498 
3499 /*
3500  * Retrieve the average noise (in dBm) among receivers.
3501  */
3502 static int
3503 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3504 {
3505 	int i, total, nbant, noise;
3506 
3507 	total = nbant = noise = 0;
3508 	for (i = 0; i < 3; i++) {
3509 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3510 		if (noise) {
3511 			total += noise;
3512 			nbant++;
3513 		}
3514 	}
3515 
3516 	/* There should be at least one antenna but check anyway. */
3517 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3518 }
3519 
3520 static void
3521 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3522     struct iwm_rx_data *data)
3523 {
3524 	struct ieee80211com *ic = &sc->sc_ic;
3525 	struct ieee80211_frame *wh;
3526 	struct ieee80211_node *ni;
3527 	struct ieee80211_channel *c = NULL;
3528 	struct mbuf *m;
3529 	struct iwm_rx_phy_info *phy_info;
3530 	struct iwm_rx_mpdu_res_start *rx_res;
3531 	int device_timestamp;
3532 	uint32_t len;
3533 	uint32_t rx_pkt_status;
3534 	int rssi;
3535 	int s;
3536 
3537 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3538 	    BUS_DMASYNC_POSTREAD);
3539 
3540 	phy_info = &sc->sc_last_phy_info;
3541 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3542 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3543 	len = le16toh(rx_res->byte_count);
3544 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3545 	    sizeof(*rx_res) + len));
3546 
3547 	m = data->m;
3548 	m->m_data = pkt->data + sizeof(*rx_res);
3549 	m->m_pkthdr.len = m->m_len = len;
3550 
3551 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3552 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3553 		    phy_info->cfg_phy_cnt));
3554 		return;
3555 	}
3556 
3557 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3558 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3559 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3560 		return; /* drop */
3561 	}
3562 
3563 	device_timestamp = le32toh(phy_info->system_timestamp);
3564 
3565 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3566 		rssi = iwm_get_signal_strength(sc, phy_info);
3567 	} else {
3568 		rssi = iwm_calc_rssi(sc, phy_info);
3569 	}
3570 	rssi = -rssi;
3571 
3572 	if (ic->ic_state == IEEE80211_S_SCAN)
3573 		iwm_fix_channel(sc, m);
3574 
3575 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3576 		return;
3577 
3578 	m_set_rcvif(m, IC2IFP(ic));
3579 
3580 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3581 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3582 
3583 	s = splnet();
3584 
3585 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3586 	if (c)
3587 		ni->ni_chan = c;
3588 
3589 	if (__predict_false(sc->sc_drvbpf != NULL)) {
3590 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3591 
3592 		tap->wr_flags = 0;
3593 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3594 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3595 		tap->wr_chan_freq =
3596 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3597 		tap->wr_chan_flags =
3598 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3599 		tap->wr_dbm_antsignal = (int8_t)rssi;
3600 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3601 		tap->wr_tsft = phy_info->system_timestamp;
3602 		if (phy_info->phy_flags &
3603 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3604 			uint8_t mcs = (phy_info->rate_n_flags &
3605 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
3606 			tap->wr_rate = (0x80 | mcs);
3607 		} else {
3608 			uint8_t rate = (phy_info->rate_n_flags &
3609 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3610 			switch (rate) {
3611 			/* CCK rates. */
3612 			case  10: tap->wr_rate =   2; break;
3613 			case  20: tap->wr_rate =   4; break;
3614 			case  55: tap->wr_rate =  11; break;
3615 			case 110: tap->wr_rate =  22; break;
3616 			/* OFDM rates. */
3617 			case 0xd: tap->wr_rate =  12; break;
3618 			case 0xf: tap->wr_rate =  18; break;
3619 			case 0x5: tap->wr_rate =  24; break;
3620 			case 0x7: tap->wr_rate =  36; break;
3621 			case 0x9: tap->wr_rate =  48; break;
3622 			case 0xb: tap->wr_rate =  72; break;
3623 			case 0x1: tap->wr_rate =  96; break;
3624 			case 0x3: tap->wr_rate = 108; break;
3625 			/* Unknown rate: should not happen. */
3626 			default:  tap->wr_rate =   0;
3627 			}
3628 		}
3629 
3630 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3631 	}
3632 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
3633 	ieee80211_free_node(ni);
3634 
3635 	splx(s);
3636 }
3637 
3638 static void
3639 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3640     struct iwm_node *in)
3641 {
3642 	struct ieee80211com *ic = &sc->sc_ic;
3643 	struct ifnet *ifp = IC2IFP(ic);
3644 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3645 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3646 	int failack = tx_resp->failure_frame;
3647 
3648 	KASSERT(tx_resp->frame_count == 1);
3649 
3650 	/* Update rate control statistics. */
3651 	in->in_amn.amn_txcnt++;
3652 	if (failack > 0) {
3653 		in->in_amn.amn_retrycnt++;
3654 	}
3655 
3656 	if (status != IWM_TX_STATUS_SUCCESS &&
3657 	    status != IWM_TX_STATUS_DIRECT_DONE)
3658 		ifp->if_oerrors++;
3659 	else
3660 		ifp->if_opackets++;
3661 }
3662 
3663 static void
3664 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3665     struct iwm_rx_data *data)
3666 {
3667 	struct ieee80211com *ic = &sc->sc_ic;
3668 	struct ifnet *ifp = IC2IFP(ic);
3669 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3670 	int idx = cmd_hdr->idx;
3671 	int qid = cmd_hdr->qid;
3672 	struct iwm_tx_ring *ring = &sc->txq[qid];
3673 	struct iwm_tx_data *txd = &ring->data[idx];
3674 	struct iwm_node *in = txd->in;
3675 
3676 	if (txd->done) {
3677 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3678 		    DEVNAME(sc)));
3679 		return;
3680 	}
3681 
3682 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3683 	    BUS_DMASYNC_POSTREAD);
3684 
3685 	sc->sc_tx_timer = 0;
3686 
3687 	iwm_rx_tx_cmd_single(sc, pkt, in);
3688 
3689 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3690 	    BUS_DMASYNC_POSTWRITE);
3691 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3692 	m_freem(txd->m);
3693 
3694 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3695 	KASSERT(txd->done == 0);
3696 	txd->done = 1;
3697 	KASSERT(txd->in);
3698 
3699 	txd->m = NULL;
3700 	txd->in = NULL;
3701 	ieee80211_free_node(&in->in_ni);
3702 
3703 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3704 		sc->qfullmsk &= ~(1 << ring->qid);
3705 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3706 			ifp->if_flags &= ~IFF_OACTIVE;
3707 			if_start_lock(ifp);
3708 		}
3709 	}
3710 }
3711 
3712 static int
3713 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3714 {
3715 	struct iwm_binding_cmd cmd;
3716 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3717 	int i, err;
3718 	uint32_t status;
3719 
3720 	memset(&cmd, 0, sizeof(cmd));
3721 
3722 	cmd.id_and_color
3723 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3724 	cmd.action = htole32(action);
3725 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3726 
3727 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3728 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3729 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3730 
3731 	status = 0;
3732 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3733 	    sizeof(cmd), &cmd, &status);
3734 	if (err == 0 && status != 0)
3735 		err = EIO;
3736 
3737 	return err;
3738 }
3739 
3740 static void
3741 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3742     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3743 {
3744 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3745 
3746 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3747 	    ctxt->color));
3748 	cmd->action = htole32(action);
3749 	cmd->apply_time = htole32(apply_time);
3750 }
3751 
3752 static void
3753 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3754     struct ieee80211_channel *chan, uint8_t chains_static,
3755     uint8_t chains_dynamic)
3756 {
3757 	struct ieee80211com *ic = &sc->sc_ic;
3758 	uint8_t active_cnt, idle_cnt;
3759 
3760 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3761 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3762 
3763 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3764 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3765 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3766 
3767 	/* Set rx the chains */
3768 	idle_cnt = chains_static;
3769 	active_cnt = chains_dynamic;
3770 
3771 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3772 	    IWM_PHY_RX_CHAIN_VALID_POS);
3773 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3774 	cmd->rxchain_info |= htole32(active_cnt <<
3775 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3776 
3777 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3778 }
3779 
3780 static int
3781 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3782     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3783     uint32_t apply_time)
3784 {
3785 	struct iwm_phy_context_cmd cmd;
3786 
3787 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3788 
3789 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3790 	    chains_static, chains_dynamic);
3791 
3792 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3793 	    sizeof(struct iwm_phy_context_cmd), &cmd);
3794 }
3795 
3796 static int
3797 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3798 {
3799 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3800 	struct iwm_tfd *desc;
3801 	struct iwm_tx_data *txdata;
3802 	struct iwm_device_cmd *cmd;
3803 	struct mbuf *m;
3804 	bus_addr_t paddr;
3805 	uint32_t addr_lo;
3806 	int err = 0, i, paylen, off, s;
3807 	int code;
3808 	int async, wantresp;
3809 	int group_id;
3810 	size_t hdrlen, datasz;
3811 	uint8_t *data;
3812 
3813 	code = hcmd->id;
3814 	async = hcmd->flags & IWM_CMD_ASYNC;
3815 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3816 
3817 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3818 		paylen += hcmd->len[i];
3819 	}
3820 
3821 	/* if the command wants an answer, busy sc_cmd_resp */
3822 	if (wantresp) {
3823 		KASSERT(!async);
3824 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3825 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3826 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
3827 	}
3828 
3829 	/*
3830 	 * Is the hardware still available?  (after e.g. above wait).
3831 	 */
3832 	s = splnet();
3833 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
3834 		err = ENXIO;
3835 		goto out;
3836 	}
3837 
3838 	desc = &ring->desc[ring->cur];
3839 	txdata = &ring->data[ring->cur];
3840 
3841 	group_id = iwm_cmd_groupid(code);
3842 	if (group_id != 0) {
3843 		hdrlen = sizeof(cmd->hdr_wide);
3844 		datasz = sizeof(cmd->data_wide);
3845 	} else {
3846 		hdrlen = sizeof(cmd->hdr);
3847 		datasz = sizeof(cmd->data);
3848 	}
3849 
3850 	if (paylen > datasz) {
3851 		/* Command is too large to fit in pre-allocated space. */
3852 		size_t totlen = hdrlen + paylen;
3853 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3854 			aprint_error_dev(sc->sc_dev,
3855 			    "firmware command too long (%zd bytes)\n", totlen);
3856 			err = EINVAL;
3857 			goto out;
3858 		}
3859 		m = m_gethdr(M_DONTWAIT, MT_DATA);
3860 		if (m == NULL) {
3861 			err = ENOMEM;
3862 			goto out;
3863 		}
3864 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3865 		if (!(m->m_flags & M_EXT)) {
3866 			aprint_error_dev(sc->sc_dev,
3867 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3868 			m_freem(m);
3869 			err = ENOMEM;
3870 			goto out;
3871 		}
3872 		cmd = mtod(m, struct iwm_device_cmd *);
3873 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3874 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3875 		if (err) {
3876 			aprint_error_dev(sc->sc_dev,
3877 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3878 			m_freem(m);
3879 			goto out;
3880 		}
3881 		txdata->m = m;
3882 		paddr = txdata->map->dm_segs[0].ds_addr;
3883 	} else {
3884 		cmd = &ring->cmd[ring->cur];
3885 		paddr = txdata->cmd_paddr;
3886 	}
3887 
3888 	if (group_id != 0) {
3889 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3890 		cmd->hdr_wide.group_id = group_id;
3891 		cmd->hdr_wide.qid = ring->qid;
3892 		cmd->hdr_wide.idx = ring->cur;
3893 		cmd->hdr_wide.length = htole16(paylen);
3894 		cmd->hdr_wide.version = iwm_cmd_version(code);
3895 		data = cmd->data_wide;
3896 	} else {
3897 		cmd->hdr.code = code;
3898 		cmd->hdr.flags = 0;
3899 		cmd->hdr.qid = ring->qid;
3900 		cmd->hdr.idx = ring->cur;
3901 		data = cmd->data;
3902 	}
3903 
3904 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3905 		if (hcmd->len[i] == 0)
3906 			continue;
3907 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3908 		off += hcmd->len[i];
3909 	}
3910 	KASSERT(off == paylen);
3911 
3912 	/* lo field is not aligned */
3913 	addr_lo = htole32((uint32_t)paddr);
3914 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3915 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
3916 	    | ((hdrlen + paylen) << 4));
3917 	desc->num_tbs = 1;
3918 
3919 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3920 	    code, hdrlen + paylen, async ? " (async)" : ""));
3921 
3922 	if (paylen > datasz) {
3923 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3924 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3925 	} else {
3926 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3927 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3928 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3929 	}
3930 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3931 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3932 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
3933 
3934 	err = iwm_set_cmd_in_flight(sc);
3935 	if (err)
3936 		goto out;
3937 	ring->queued++;
3938 
3939 #if 0
3940 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3941 #endif
3942 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3943 	    code, ring->qid, ring->cur));
3944 
3945 	/* Kick command ring. */
3946 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3947 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3948 
3949 	if (!async) {
3950 		int generation = sc->sc_generation;
3951 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
3952 		if (err == 0) {
3953 			/* if hardware is no longer up, return error */
3954 			if (generation != sc->sc_generation) {
3955 				err = ENXIO;
3956 			} else {
3957 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3958 			}
3959 		}
3960 	}
3961  out:
3962 	if (wantresp && err) {
3963 		iwm_free_resp(sc, hcmd);
3964 	}
3965 	splx(s);
3966 
3967 	return err;
3968 }
3969 
3970 static int
3971 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
3972     uint16_t len, const void *data)
3973 {
3974 	struct iwm_host_cmd cmd = {
3975 		.id = id,
3976 		.len = { len, },
3977 		.data = { data, },
3978 		.flags = flags,
3979 	};
3980 
3981 	return iwm_send_cmd(sc, &cmd);
3982 }
3983 
3984 static int
3985 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
3986     uint32_t *status)
3987 {
3988 	struct iwm_rx_packet *pkt;
3989 	struct iwm_cmd_response *resp;
3990 	int err, resp_len;
3991 
3992 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3993 	cmd->flags |= IWM_CMD_WANT_SKB;
3994 
3995 	err = iwm_send_cmd(sc, cmd);
3996 	if (err)
3997 		return err;
3998 	pkt = cmd->resp_pkt;
3999 
4000 	/* Can happen if RFKILL is asserted */
4001 	if (!pkt) {
4002 		err = 0;
4003 		goto out_free_resp;
4004 	}
4005 
4006 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4007 		err = EIO;
4008 		goto out_free_resp;
4009 	}
4010 
4011 	resp_len = iwm_rx_packet_payload_len(pkt);
4012 	if (resp_len != sizeof(*resp)) {
4013 		err = EIO;
4014 		goto out_free_resp;
4015 	}
4016 
4017 	resp = (void *)pkt->data;
4018 	*status = le32toh(resp->status);
4019  out_free_resp:
4020 	iwm_free_resp(sc, cmd);
4021 	return err;
4022 }
4023 
4024 static int
4025 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4026     const void *data, uint32_t *status)
4027 {
4028 	struct iwm_host_cmd cmd = {
4029 		.id = id,
4030 		.len = { len, },
4031 		.data = { data, },
4032 	};
4033 
4034 	return iwm_send_cmd_status(sc, &cmd, status);
4035 }
4036 
4037 static void
4038 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4039 {
4040 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4041 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4042 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4043 	wakeup(&sc->sc_wantresp);
4044 }
4045 
4046 static void
4047 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4048 {
4049 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4050 	struct iwm_tx_data *data;
4051 
4052 	if (qid != IWM_CMD_QUEUE) {
4053 		return;	/* Not a command ack. */
4054 	}
4055 
4056 	data = &ring->data[idx];
4057 
4058 	if (data->m != NULL) {
4059 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4060 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4061 		bus_dmamap_unload(sc->sc_dmat, data->map);
4062 		m_freem(data->m);
4063 		data->m = NULL;
4064 	}
4065 	wakeup(&ring->desc[idx]);
4066 
4067 	if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4068 		aprint_error_dev(sc->sc_dev,
4069 		    "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4070 		    idx, ring->queued, ring->cur);
4071 	}
4072 
4073 	KASSERT(ring->queued > 0);
4074 	if (--ring->queued == 0)
4075 		iwm_clear_cmd_in_flight(sc);
4076 }
4077 
4078 #if 0
4079 /*
4080  * necessary only for block ack mode
4081  */
4082 void
4083 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4084     uint16_t len)
4085 {
4086 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4087 	uint16_t w_val;
4088 
4089 	scd_bc_tbl = sc->sched_dma.vaddr;
4090 
4091 	len += 8; /* magic numbers came naturally from paris */
4092 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4093 		len = roundup(len, 4) / 4;
4094 
4095 	w_val = htole16(sta_id << 12 | len);
4096 
4097 	/* Update TX scheduler. */
4098 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4099 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4100 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4101 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4102 
4103 	/* I really wonder what this is ?!? */
4104 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4105 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4106 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4107 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4108 		    (char *)(void *)sc->sched_dma.vaddr,
4109 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4110 	}
4111 }
4112 #endif
4113 
4114 /*
4115  * Fill in various bit for management frames, and leave them
4116  * unfilled for data frames (firmware takes care of that).
4117  * Return the selected TX rate.
4118  */
4119 static const struct iwm_rate *
4120 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4121     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4122 {
4123 	struct ieee80211com *ic = &sc->sc_ic;
4124 	struct ieee80211_node *ni = &in->in_ni;
4125 	const struct iwm_rate *rinfo;
4126 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4127 	int ridx, rate_flags, i;
4128 	int nrates = ni->ni_rates.rs_nrates;
4129 
4130 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4131 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4132 
4133 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4134 	    type != IEEE80211_FC0_TYPE_DATA) {
4135 		/* for non-data, use the lowest supported rate */
4136 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4137 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4138 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4139 #ifndef IEEE80211_NO_HT
4140 	} else if (ic->ic_fixed_mcs != -1) {
4141 		ridx = sc->sc_fixed_ridx;
4142 #endif
4143 	} else if (ic->ic_fixed_rate != -1) {
4144 		ridx = sc->sc_fixed_ridx;
4145 	} else {
4146 		/* for data frames, use RS table */
4147 		tx->initial_rate_index = 0;
4148 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4149 		DPRINTFN(12, ("start with txrate %d\n",
4150 		    tx->initial_rate_index));
4151 #ifndef IEEE80211_NO_HT
4152 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4153 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4154 			return &iwm_rates[ridx];
4155 		}
4156 #endif
4157 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4158 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4159 		for (i = 0; i < nrates; i++) {
4160 			if (iwm_rates[i].rate == (ni->ni_txrate &
4161 			    IEEE80211_RATE_VAL)) {
4162 				ridx = i;
4163 				break;
4164 			}
4165 		}
4166 		return &iwm_rates[ridx];
4167 	}
4168 
4169 	rinfo = &iwm_rates[ridx];
4170 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
4171 	if (IWM_RIDX_IS_CCK(ridx))
4172 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4173 #ifndef IEEE80211_NO_HT
4174 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4175 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4176 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4177 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4178 	} else
4179 #endif
4180 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4181 
4182 	return rinfo;
4183 }
4184 
4185 #define TB0_SIZE 16
4186 static int
4187 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4188 {
4189 	struct ieee80211com *ic = &sc->sc_ic;
4190 	struct iwm_node *in = (struct iwm_node *)ni;
4191 	struct iwm_tx_ring *ring;
4192 	struct iwm_tx_data *data;
4193 	struct iwm_tfd *desc;
4194 	struct iwm_device_cmd *cmd;
4195 	struct iwm_tx_cmd *tx;
4196 	struct ieee80211_frame *wh;
4197 	struct ieee80211_key *k = NULL;
4198 	struct mbuf *m1;
4199 	const struct iwm_rate *rinfo;
4200 	uint32_t flags;
4201 	u_int hdrlen;
4202 	bus_dma_segment_t *seg;
4203 	uint8_t tid, type;
4204 	int i, totlen, err, pad;
4205 
4206 	wh = mtod(m, struct ieee80211_frame *);
4207 	hdrlen = ieee80211_anyhdrsize(wh);
4208 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4209 
4210 	tid = 0;
4211 
4212 	ring = &sc->txq[ac];
4213 	desc = &ring->desc[ring->cur];
4214 	memset(desc, 0, sizeof(*desc));
4215 	data = &ring->data[ring->cur];
4216 
4217 	cmd = &ring->cmd[ring->cur];
4218 	cmd->hdr.code = IWM_TX_CMD;
4219 	cmd->hdr.flags = 0;
4220 	cmd->hdr.qid = ring->qid;
4221 	cmd->hdr.idx = ring->cur;
4222 
4223 	tx = (void *)cmd->data;
4224 	memset(tx, 0, sizeof(*tx));
4225 
4226 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4227 
4228 	if (__predict_false(sc->sc_drvbpf != NULL)) {
4229 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4230 
4231 		tap->wt_flags = 0;
4232 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4233 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4234 #ifndef IEEE80211_NO_HT
4235 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4236 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4237 		    type == IEEE80211_FC0_TYPE_DATA &&
4238 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
4239 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4240 		} else
4241 #endif
4242 			tap->wt_rate = rinfo->rate;
4243 		tap->wt_hwqueue = ac;
4244 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4245 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4246 
4247 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4248 	}
4249 
4250 	/* Encrypt the frame if need be. */
4251 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4252 		k = ieee80211_crypto_encap(ic, ni, m);
4253 		if (k == NULL) {
4254 			m_freem(m);
4255 			return ENOBUFS;
4256 		}
4257 		/* Packet header may have moved, reset our local pointer. */
4258 		wh = mtod(m, struct ieee80211_frame *);
4259 	}
4260 	totlen = m->m_pkthdr.len;
4261 
4262 	flags = 0;
4263 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4264 		flags |= IWM_TX_CMD_FLG_ACK;
4265 	}
4266 
4267 	if (type == IEEE80211_FC0_TYPE_DATA &&
4268 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4269 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4270 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
4271 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4272 
4273 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4274 	    type != IEEE80211_FC0_TYPE_DATA)
4275 		tx->sta_id = IWM_AUX_STA_ID;
4276 	else
4277 		tx->sta_id = IWM_STATION_ID;
4278 
4279 	if (type == IEEE80211_FC0_TYPE_MGT) {
4280 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4281 
4282 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4283 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4284 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4285 		else
4286 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4287 	} else {
4288 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4289 	}
4290 
4291 	if (hdrlen & 3) {
4292 		/* First segment length must be a multiple of 4. */
4293 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4294 		pad = 4 - (hdrlen & 3);
4295 	} else
4296 		pad = 0;
4297 
4298 	tx->driver_txop = 0;
4299 	tx->next_frame_len = 0;
4300 
4301 	tx->len = htole16(totlen);
4302 	tx->tid_tspec = tid;
4303 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4304 
4305 	/* Set physical address of "scratch area". */
4306 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4307 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4308 
4309 	/* Copy 802.11 header in TX command. */
4310 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4311 
4312 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4313 
4314 	tx->sec_ctl = 0;
4315 	tx->tx_flags |= htole32(flags);
4316 
4317 	/* Trim 802.11 header. */
4318 	m_adj(m, hdrlen);
4319 
4320 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4321 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4322 	if (err) {
4323 		if (err != EFBIG) {
4324 			aprint_error_dev(sc->sc_dev,
4325 			    "can't map mbuf (error %d)\n", err);
4326 			m_freem(m);
4327 			return err;
4328 		}
4329 		/* Too many DMA segments, linearize mbuf. */
4330 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
4331 		if (m1 == NULL) {
4332 			m_freem(m);
4333 			return ENOBUFS;
4334 		}
4335 		if (m->m_pkthdr.len > MHLEN) {
4336 			MCLGET(m1, M_DONTWAIT);
4337 			if (!(m1->m_flags & M_EXT)) {
4338 				m_freem(m);
4339 				m_freem(m1);
4340 				return ENOBUFS;
4341 			}
4342 		}
4343 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4344 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4345 		m_freem(m);
4346 		m = m1;
4347 
4348 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4349 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4350 		if (err) {
4351 			aprint_error_dev(sc->sc_dev,
4352 			    "can't map mbuf (error %d)\n", err);
4353 			m_freem(m);
4354 			return err;
4355 		}
4356 	}
4357 	data->m = m;
4358 	data->in = in;
4359 	data->done = 0;
4360 
4361 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4362 	KASSERT(data->in != NULL);
4363 
4364 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4365 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4366 
4367 	/* Fill TX descriptor. */
4368 	desc->num_tbs = 2 + data->map->dm_nsegs;
4369 
4370 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4371 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4372 	    (TB0_SIZE << 4);
4373 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4374 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4375 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4376 	      + hdrlen + pad - TB0_SIZE) << 4);
4377 
4378 	/* Other DMA segments are for data payload. */
4379 	seg = data->map->dm_segs;
4380 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4381 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4382 		desc->tbs[i+2].hi_n_len =
4383 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4384 		    | ((seg->ds_len) << 4);
4385 	}
4386 
4387 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4388 	    BUS_DMASYNC_PREWRITE);
4389 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4390 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4391 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4392 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4393 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4394 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4395 
4396 #if 0
4397 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4398 	    le16toh(tx->len));
4399 #endif
4400 
4401 	/* Kick TX ring. */
4402 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4403 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4404 
4405 	/* Mark TX ring as full if we reach a certain threshold. */
4406 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4407 		sc->qfullmsk |= 1 << ring->qid;
4408 	}
4409 
4410 	return 0;
4411 }
4412 
4413 #if 0
4414 /* not necessary? */
4415 static int
4416 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4417 {
4418 	struct iwm_tx_path_flush_cmd flush_cmd = {
4419 		.queues_ctl = htole32(tfd_msk),
4420 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4421 	};
4422 	int err;
4423 
4424 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4425 	    sizeof(flush_cmd), &flush_cmd);
4426 	if (err)
4427 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4428 		    err);
4429 	return err;
4430 }
4431 #endif
4432 
4433 static void
4434 iwm_led_enable(struct iwm_softc *sc)
4435 {
4436 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4437 }
4438 
4439 static void
4440 iwm_led_disable(struct iwm_softc *sc)
4441 {
4442 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4443 }
4444 
4445 static int
4446 iwm_led_is_enabled(struct iwm_softc *sc)
4447 {
4448 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4449 }
4450 
4451 static void
4452 iwm_led_blink_timeout(void *arg)
4453 {
4454 	struct iwm_softc *sc = arg;
4455 
4456 	if (iwm_led_is_enabled(sc))
4457 		iwm_led_disable(sc);
4458 	else
4459 		iwm_led_enable(sc);
4460 
4461 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4462 }
4463 
4464 static void
4465 iwm_led_blink_start(struct iwm_softc *sc)
4466 {
4467 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4468 }
4469 
4470 static void
4471 iwm_led_blink_stop(struct iwm_softc *sc)
4472 {
4473 	callout_stop(&sc->sc_led_blink_to);
4474 	iwm_led_disable(sc);
4475 }
4476 
4477 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4478 
4479 static int
4480 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4481     struct iwm_beacon_filter_cmd *cmd)
4482 {
4483 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4484 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4485 }
4486 
4487 static void
4488 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4489     struct iwm_beacon_filter_cmd *cmd)
4490 {
4491 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4492 }
4493 
4494 static int
4495 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4496 {
4497 	struct iwm_beacon_filter_cmd cmd = {
4498 		IWM_BF_CMD_CONFIG_DEFAULTS,
4499 		.bf_enable_beacon_filter = htole32(1),
4500 		.ba_enable_beacon_abort = htole32(enable),
4501 	};
4502 
4503 	if (!sc->sc_bf.bf_enabled)
4504 		return 0;
4505 
4506 	sc->sc_bf.ba_enabled = enable;
4507 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4508 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4509 }
4510 
4511 static void
4512 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4513     struct iwm_mac_power_cmd *cmd)
4514 {
4515 	struct ieee80211_node *ni = &in->in_ni;
4516 	int dtim_period, dtim_msec, keep_alive;
4517 
4518 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4519 	    in->in_color));
4520 	if (ni->ni_dtim_period)
4521 		dtim_period = ni->ni_dtim_period;
4522 	else
4523 		dtim_period = 1;
4524 
4525 	/*
4526 	 * Regardless of power management state the driver must set
4527 	 * keep alive period. FW will use it for sending keep alive NDPs
4528 	 * immediately after association. Check that keep alive period
4529 	 * is at least 3 * DTIM.
4530 	 */
4531 	dtim_msec = dtim_period * ni->ni_intval;
4532 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4533 	keep_alive = roundup(keep_alive, 1000) / 1000;
4534 	cmd->keep_alive_seconds = htole16(keep_alive);
4535 
4536 #ifdef notyet
4537 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4538 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4539 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4540 #endif
4541 }
4542 
4543 static int
4544 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4545 {
4546 	int err;
4547 	int ba_enable;
4548 	struct iwm_mac_power_cmd cmd;
4549 
4550 	memset(&cmd, 0, sizeof(cmd));
4551 
4552 	iwm_power_build_cmd(sc, in, &cmd);
4553 
4554 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4555 	    sizeof(cmd), &cmd);
4556 	if (err)
4557 		return err;
4558 
4559 	ba_enable = !!(cmd.flags &
4560 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4561 	return iwm_update_beacon_abort(sc, in, ba_enable);
4562 }
4563 
4564 static int
4565 iwm_power_update_device(struct iwm_softc *sc)
4566 {
4567 	struct iwm_device_power_cmd cmd = {
4568 #ifdef notyet
4569 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4570 #else
4571 		.flags = 0,
4572 #endif
4573 	};
4574 
4575 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4576 		return 0;
4577 
4578 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4579 	DPRINTF(("Sending device power command with flags = 0x%X\n",
4580 	    cmd.flags));
4581 
4582 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4583 }
4584 
4585 #ifdef notyet
4586 static int
4587 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4588 {
4589 	struct iwm_beacon_filter_cmd cmd = {
4590 		IWM_BF_CMD_CONFIG_DEFAULTS,
4591 		.bf_enable_beacon_filter = htole32(1),
4592 	};
4593 	int err;
4594 
4595 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4596 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4597 
4598 	if (err == 0)
4599 		sc->sc_bf.bf_enabled = 1;
4600 
4601 	return err;
4602 }
4603 #endif
4604 
4605 static int
4606 iwm_disable_beacon_filter(struct iwm_softc *sc)
4607 {
4608 	struct iwm_beacon_filter_cmd cmd;
4609 	int err;
4610 
4611 	memset(&cmd, 0, sizeof(cmd));
4612 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4613 		return 0;
4614 
4615 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4616 	if (err == 0)
4617 		sc->sc_bf.bf_enabled = 0;
4618 
4619 	return err;
4620 }
4621 
4622 static int
4623 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4624 {
4625 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
4626 	int err;
4627 	uint32_t status;
4628 
4629 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4630 
4631 	add_sta_cmd.sta_id = IWM_STATION_ID;
4632 	add_sta_cmd.mac_id_n_color
4633 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4634 	if (!update) {
4635 		int ac;
4636 		for (ac = 0; ac < WME_NUM_AC; ac++) {
4637 			add_sta_cmd.tfd_queue_msk |=
4638 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4639 		}
4640 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4641 	}
4642 	add_sta_cmd.add_modify = update ? 1 : 0;
4643 	add_sta_cmd.station_flags_msk
4644 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4645 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4646 	if (update)
4647 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4648 
4649 #ifndef IEEE80211_NO_HT
4650 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4651 		add_sta_cmd.station_flags_msk
4652 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4653 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4654 
4655 		add_sta_cmd.station_flags
4656 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4657 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4658 		case IEEE80211_AMPDU_PARAM_SS_2:
4659 			add_sta_cmd.station_flags
4660 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4661 			break;
4662 		case IEEE80211_AMPDU_PARAM_SS_4:
4663 			add_sta_cmd.station_flags
4664 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4665 			break;
4666 		case IEEE80211_AMPDU_PARAM_SS_8:
4667 			add_sta_cmd.station_flags
4668 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4669 			break;
4670 		case IEEE80211_AMPDU_PARAM_SS_16:
4671 			add_sta_cmd.station_flags
4672 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4673 			break;
4674 		default:
4675 			break;
4676 		}
4677 	}
4678 #endif
4679 
4680 	status = IWM_ADD_STA_SUCCESS;
4681 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4682 	    &add_sta_cmd, &status);
4683 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4684 		err = EIO;
4685 
4686 	return err;
4687 }
4688 
4689 static int
4690 iwm_add_aux_sta(struct iwm_softc *sc)
4691 {
4692 	struct iwm_add_sta_cmd_v7 cmd;
4693 	int err;
4694 	uint32_t status;
4695 
4696 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4697 	if (err)
4698 		return err;
4699 
4700 	memset(&cmd, 0, sizeof(cmd));
4701 	cmd.sta_id = IWM_AUX_STA_ID;
4702 	cmd.mac_id_n_color =
4703 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4704 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4705 	cmd.tid_disable_tx = htole16(0xffff);
4706 
4707 	status = IWM_ADD_STA_SUCCESS;
4708 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4709 	    &status);
4710 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4711 		err = EIO;
4712 
4713 	return err;
4714 }
4715 
4716 #define IWM_PLCP_QUIET_THRESH 1
4717 #define IWM_ACTIVE_QUIET_TIME 10
4718 #define LONG_OUT_TIME_PERIOD 600
4719 #define SHORT_OUT_TIME_PERIOD 200
4720 #define SUSPEND_TIME_PERIOD 100
4721 
4722 static uint16_t
4723 iwm_scan_rx_chain(struct iwm_softc *sc)
4724 {
4725 	uint16_t rx_chain;
4726 	uint8_t rx_ant;
4727 
4728 	rx_ant = iwm_fw_valid_rx_ant(sc);
4729 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4730 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4731 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4732 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4733 	return htole16(rx_chain);
4734 }
4735 
4736 static uint32_t
4737 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4738 {
4739 	uint32_t tx_ant;
4740 	int i, ind;
4741 
4742 	for (i = 0, ind = sc->sc_scan_last_antenna;
4743 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4744 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4745 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4746 			sc->sc_scan_last_antenna = ind;
4747 			break;
4748 		}
4749 	}
4750 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4751 
4752 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4753 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4754 				   tx_ant);
4755 	else
4756 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
4757 }
4758 
4759 #ifdef notyet
4760 /*
4761  * If req->n_ssids > 0, it means we should do an active scan.
4762  * In case of active scan w/o directed scan, we receive a zero-length SSID
4763  * just to notify that this scan is active and not passive.
4764  * In order to notify the FW of the number of SSIDs we wish to scan (including
4765  * the zero-length one), we need to set the corresponding bits in chan->type,
4766  * one for each SSID, and set the active bit (first). If the first SSID is
4767  * already included in the probe template, so we need to set only
4768  * req->n_ssids - 1 bits in addition to the first bit.
4769  */
4770 static uint16_t
4771 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4772 {
4773 	if (flags & IEEE80211_CHAN_2GHZ)
4774 		return 30  + 3 * (n_ssids + 1);
4775 	return 20  + 2 * (n_ssids + 1);
4776 }
4777 
4778 static uint16_t
4779 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4780 {
4781 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4782 }
4783 #endif
4784 
4785 static uint8_t
4786 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4787     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4788 {
4789 	struct ieee80211com *ic = &sc->sc_ic;
4790 	struct ieee80211_channel *c;
4791 	uint8_t nchan;
4792 
4793 	for (nchan = 0, c = &ic->ic_channels[1];
4794 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4795 	    nchan < sc->sc_capa_n_scan_channels;
4796 	    c++) {
4797 		if (c->ic_flags == 0)
4798 			continue;
4799 
4800 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4801 		chan->iter_count = htole16(1);
4802 		chan->iter_interval = 0;
4803 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4804 #if 0 /* makes scanning while associated less useful */
4805 		if (n_ssids != 0)
4806 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
4807 #endif
4808 		chan++;
4809 		nchan++;
4810 	}
4811 
4812 	return nchan;
4813 }
4814 
4815 static uint8_t
4816 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4817     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4818 {
4819 	struct ieee80211com *ic = &sc->sc_ic;
4820 	struct ieee80211_channel *c;
4821 	uint8_t nchan;
4822 
4823 	for (nchan = 0, c = &ic->ic_channels[1];
4824 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4825 	    nchan < sc->sc_capa_n_scan_channels;
4826 	    c++) {
4827 		if (c->ic_flags == 0)
4828 			continue;
4829 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4830 		chan->iter_count = 1;
4831 		chan->iter_interval = htole16(0);
4832 #if 0 /* makes scanning while associated less useful */
4833 		if (n_ssids != 0)
4834 			chan->flags = htole32(1 << 0); /* select SSID 0 */
4835 #endif
4836 		chan++;
4837 		nchan++;
4838 	}
4839 
4840 	return nchan;
4841 }
4842 
4843 static int
4844 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4845 {
4846 	struct ieee80211com *ic = &sc->sc_ic;
4847 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4848 	struct ieee80211_rateset *rs;
4849 	size_t remain = sizeof(preq->buf);
4850 	uint8_t *frm, *pos;
4851 
4852 	memset(preq, 0, sizeof(*preq));
4853 
4854 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4855 		return ENOBUFS;
4856 
4857 	/*
4858 	 * Build a probe request frame.  Most of the following code is a
4859 	 * copy & paste of what is done in net80211.
4860 	 */
4861 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4862 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4863 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4864 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4865 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4866 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4867 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4868 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4869 
4870 	frm = (uint8_t *)(wh + 1);
4871 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4872 
4873 	/* Tell the firmware where the MAC header is. */
4874 	preq->mac_header.offset = 0;
4875 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4876 	remain -= frm - (uint8_t *)wh;
4877 
4878 	/* Fill in 2GHz IEs and tell firmware where they are. */
4879 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4880 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4881 		if (remain < 4 + rs->rs_nrates)
4882 			return ENOBUFS;
4883 	} else if (remain < 2 + rs->rs_nrates)
4884 		return ENOBUFS;
4885 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4886 	pos = frm;
4887 	frm = ieee80211_add_rates(frm, rs);
4888 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4889 		frm = ieee80211_add_xrates(frm, rs);
4890 	preq->band_data[0].len = htole16(frm - pos);
4891 	remain -= frm - pos;
4892 
4893 	if (isset(sc->sc_enabled_capa,
4894 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4895 		if (remain < 3)
4896 			return ENOBUFS;
4897 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4898 		*frm++ = 1;
4899 		*frm++ = 0;
4900 		remain -= 3;
4901 	}
4902 
4903 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4904 		/* Fill in 5GHz IEs. */
4905 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4906 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4907 			if (remain < 4 + rs->rs_nrates)
4908 				return ENOBUFS;
4909 		} else if (remain < 2 + rs->rs_nrates)
4910 			return ENOBUFS;
4911 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4912 		pos = frm;
4913 		frm = ieee80211_add_rates(frm, rs);
4914 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4915 			frm = ieee80211_add_xrates(frm, rs);
4916 		preq->band_data[1].len = htole16(frm - pos);
4917 		remain -= frm - pos;
4918 	}
4919 
4920 #ifndef IEEE80211_NO_HT
4921 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4922 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4923 	pos = frm;
4924 	if (ic->ic_flags & IEEE80211_F_HTON) {
4925 		if (remain < 28)
4926 			return ENOBUFS;
4927 		frm = ieee80211_add_htcaps(frm, ic);
4928 		/* XXX add WME info? */
4929 	}
4930 #endif
4931 
4932 	preq->common_data.len = htole16(frm - pos);
4933 
4934 	return 0;
4935 }
4936 
4937 static int
4938 iwm_lmac_scan(struct iwm_softc *sc)
4939 {
4940 	struct ieee80211com *ic = &sc->sc_ic;
4941 	struct iwm_host_cmd hcmd = {
4942 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4943 		.len = { 0, },
4944 		.data = { NULL, },
4945 		.flags = 0,
4946 	};
4947 	struct iwm_scan_req_lmac *req;
4948 	size_t req_len;
4949 	int err;
4950 
4951 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
4952 
4953 	req_len = sizeof(struct iwm_scan_req_lmac) +
4954 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
4955 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4956 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4957 		return ENOMEM;
4958 	req = kmem_zalloc(req_len, KM_SLEEP);
4959 	if (req == NULL)
4960 		return ENOMEM;
4961 
4962 	hcmd.len[0] = (uint16_t)req_len;
4963 	hcmd.data[0] = (void *)req;
4964 
4965 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4966 	req->active_dwell = 10;
4967 	req->passive_dwell = 110;
4968 	req->fragmented_dwell = 44;
4969 	req->extended_dwell = 90;
4970 	req->max_out_time = 0;
4971 	req->suspend_time = 0;
4972 
4973 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4974 	req->rx_chain_select = iwm_scan_rx_chain(sc);
4975 	req->iter_num = htole32(1);
4976 	req->delay = 0;
4977 
4978 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4979 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4980 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4981 	if (ic->ic_des_esslen == 0)
4982 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4983 	else
4984 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4985 	if (isset(sc->sc_enabled_capa,
4986 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4987 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4988 
4989 	req->flags = htole32(IWM_PHY_BAND_24);
4990 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4991 		req->flags |= htole32(IWM_PHY_BAND_5);
4992 	req->filter_flags =
4993 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4994 
4995 	/* Tx flags 2 GHz. */
4996 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4997 	    IWM_TX_CMD_FLG_BT_DIS);
4998 	req->tx_cmd[0].rate_n_flags =
4999 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5000 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5001 
5002 	/* Tx flags 5 GHz. */
5003 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5004 	    IWM_TX_CMD_FLG_BT_DIS);
5005 	req->tx_cmd[1].rate_n_flags =
5006 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5007 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5008 
5009 	/* Check if we're doing an active directed scan. */
5010 	if (ic->ic_des_esslen != 0) {
5011 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5012 		req->direct_scan[0].len = ic->ic_des_esslen;
5013 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5014 		    ic->ic_des_esslen);
5015 	}
5016 
5017 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5018 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5019 	    ic->ic_des_esslen != 0);
5020 
5021 	err = iwm_fill_probe_req(sc,
5022 	    (struct iwm_scan_probe_req *)(req->data +
5023 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5024 	     sc->sc_capa_n_scan_channels)));
5025 	if (err) {
5026 		kmem_free(req, req_len);
5027 		return err;
5028 	}
5029 
5030 	/* Specify the scan plan: We'll do one iteration. */
5031 	req->schedule[0].iterations = 1;
5032 	req->schedule[0].full_scan_mul = 1;
5033 
5034 	/* Disable EBS. */
5035 	req->channel_opt[0].non_ebs_ratio = 1;
5036 	req->channel_opt[1].non_ebs_ratio = 1;
5037 
5038 	err = iwm_send_cmd(sc, &hcmd);
5039 	kmem_free(req, req_len);
5040 	return err;
5041 }
5042 
5043 static int
5044 iwm_config_umac_scan(struct iwm_softc *sc)
5045 {
5046 	struct ieee80211com *ic = &sc->sc_ic;
5047 	struct iwm_scan_config *scan_config;
5048 	int err, nchan;
5049 	size_t cmd_size;
5050 	struct ieee80211_channel *c;
5051 	struct iwm_host_cmd hcmd = {
5052 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5053 		.flags = 0,
5054 	};
5055 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5056 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5057 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5058 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5059 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5060 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5061 	    IWM_SCAN_CONFIG_RATE_54M);
5062 
5063 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5064 
5065 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5066 	if (scan_config == NULL)
5067 		return ENOMEM;
5068 
5069 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5070 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5071 	scan_config->legacy_rates = htole32(rates |
5072 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5073 
5074 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5075 	scan_config->dwell_active = 10;
5076 	scan_config->dwell_passive = 110;
5077 	scan_config->dwell_fragmented = 44;
5078 	scan_config->dwell_extended = 90;
5079 	scan_config->out_of_channel_time = htole32(0);
5080 	scan_config->suspend_time = htole32(0);
5081 
5082 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5083 
5084 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5085 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5086 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5087 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5088 
5089 	for (c = &ic->ic_channels[1], nchan = 0;
5090 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5091 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5092 		if (c->ic_flags == 0)
5093 			continue;
5094 		scan_config->channel_array[nchan++] =
5095 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5096 	}
5097 
5098 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5099 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5100 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5101 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5102 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5103 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5104 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5105 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5106 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5107 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5108 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5109 
5110 	hcmd.data[0] = scan_config;
5111 	hcmd.len[0] = cmd_size;
5112 
5113 	err = iwm_send_cmd(sc, &hcmd);
5114 	kmem_free(scan_config, cmd_size);
5115 	return err;
5116 }
5117 
5118 static int
5119 iwm_umac_scan(struct iwm_softc *sc)
5120 {
5121 	struct ieee80211com *ic = &sc->sc_ic;
5122 	struct iwm_host_cmd hcmd = {
5123 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5124 		.len = { 0, },
5125 		.data = { NULL, },
5126 		.flags = 0,
5127 	};
5128 	struct iwm_scan_req_umac *req;
5129 	struct iwm_scan_req_umac_tail *tail;
5130 	size_t req_len;
5131 	int err;
5132 
5133 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5134 
5135 	req_len = sizeof(struct iwm_scan_req_umac) +
5136 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5137 	    sc->sc_capa_n_scan_channels) +
5138 	    sizeof(struct iwm_scan_req_umac_tail);
5139 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5140 		return ENOMEM;
5141 	req = kmem_zalloc(req_len, KM_SLEEP);
5142 	if (req == NULL)
5143 		return ENOMEM;
5144 
5145 	hcmd.len[0] = (uint16_t)req_len;
5146 	hcmd.data[0] = (void *)req;
5147 
5148 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5149 	req->active_dwell = 10;
5150 	req->passive_dwell = 110;
5151 	req->fragmented_dwell = 44;
5152 	req->extended_dwell = 90;
5153 	req->max_out_time = 0;
5154 	req->suspend_time = 0;
5155 
5156 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5157 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5158 
5159 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5160 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5161 	    ic->ic_des_esslen != 0);
5162 
5163 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5164 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5165 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5166 
5167 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
5168 		sizeof(struct iwm_scan_channel_cfg_umac) *
5169 			sc->sc_capa_n_scan_channels);
5170 
5171 	/* Check if we're doing an active directed scan. */
5172 	if (ic->ic_des_esslen != 0) {
5173 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5174 		tail->direct_scan[0].len = ic->ic_des_esslen;
5175 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5176 		    ic->ic_des_esslen);
5177 		req->general_flags |=
5178 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5179 	} else
5180 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5181 
5182 	if (isset(sc->sc_enabled_capa,
5183 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5184 		req->general_flags |=
5185 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5186 
5187 	err = iwm_fill_probe_req(sc, &tail->preq);
5188 	if (err) {
5189 		kmem_free(req, req_len);
5190 		return err;
5191 	}
5192 
5193 	/* Specify the scan plan: We'll do one iteration. */
5194 	tail->schedule[0].interval = 0;
5195 	tail->schedule[0].iter_count = 1;
5196 
5197 	err = iwm_send_cmd(sc, &hcmd);
5198 	kmem_free(req, req_len);
5199 	return err;
5200 }
5201 
5202 static uint8_t
5203 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5204 {
5205 	int i;
5206 	uint8_t rval;
5207 
5208 	for (i = 0; i < rs->rs_nrates; i++) {
5209 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5210 		if (rval == iwm_rates[ridx].rate)
5211 			return rs->rs_rates[i];
5212 	}
5213 	return 0;
5214 }
5215 
5216 static void
5217 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5218     int *ofdm_rates)
5219 {
5220 	struct ieee80211_node *ni = &in->in_ni;
5221 	struct ieee80211_rateset *rs = &ni->ni_rates;
5222 	int lowest_present_ofdm = -1;
5223 	int lowest_present_cck = -1;
5224 	uint8_t cck = 0;
5225 	uint8_t ofdm = 0;
5226 	int i;
5227 
5228 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5229 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5230 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5231 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5232 				continue;
5233 			cck |= (1 << i);
5234 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5235 				lowest_present_cck = i;
5236 		}
5237 	}
5238 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5239 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5240 			continue;
5241 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5242 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5243 			lowest_present_ofdm = i;
5244 	}
5245 
5246 	/*
5247 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5248 	 * variables. This isn't sufficient though, as there might not
5249 	 * be all the right rates in the bitmap. E.g. if the only basic
5250 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5251 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5252 	 *
5253 	 *    [...] a STA responding to a received frame shall transmit
5254 	 *    its Control Response frame [...] at the highest rate in the
5255 	 *    BSSBasicRateSet parameter that is less than or equal to the
5256 	 *    rate of the immediately previous frame in the frame exchange
5257 	 *    sequence ([...]) and that is of the same modulation class
5258 	 *    ([...]) as the received frame. If no rate contained in the
5259 	 *    BSSBasicRateSet parameter meets these conditions, then the
5260 	 *    control frame sent in response to a received frame shall be
5261 	 *    transmitted at the highest mandatory rate of the PHY that is
5262 	 *    less than or equal to the rate of the received frame, and
5263 	 *    that is of the same modulation class as the received frame.
5264 	 *
5265 	 * As a consequence, we need to add all mandatory rates that are
5266 	 * lower than all of the basic rates to these bitmaps.
5267 	 */
5268 
5269 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5270 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5271 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5272 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5273 	/* 6M already there or needed so always add */
5274 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5275 
5276 	/*
5277 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5278 	 * Note, however:
5279 	 *  - if no CCK rates are basic, it must be ERP since there must
5280 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5281 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5282 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5283 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5284 	 *  - if 2M is basic, 1M is mandatory
5285 	 *  - if 1M is basic, that's the only valid ACK rate.
5286 	 * As a consequence, it's not as complicated as it sounds, just add
5287 	 * any lower rates to the ACK rate bitmap.
5288 	 */
5289 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5290 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5291 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5292 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5293 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5294 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5295 	/* 1M already there or needed so always add */
5296 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5297 
5298 	*cck_rates = cck;
5299 	*ofdm_rates = ofdm;
5300 }
5301 
5302 static void
5303 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5304     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5305 {
5306 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5307 	struct ieee80211com *ic = &sc->sc_ic;
5308 	struct ieee80211_node *ni = ic->ic_bss;
5309 	int cck_ack_rates, ofdm_ack_rates;
5310 	int i;
5311 
5312 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5313 	    in->in_color));
5314 	cmd->action = htole32(action);
5315 
5316 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5317 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5318 
5319 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5320 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5321 
5322 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5323 	cmd->cck_rates = htole32(cck_ack_rates);
5324 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5325 
5326 	cmd->cck_short_preamble
5327 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5328 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5329 	cmd->short_slot
5330 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5331 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5332 
5333 	for (i = 0; i < WME_NUM_AC; i++) {
5334 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5335 		int txf = iwm_ac_to_tx_fifo[i];
5336 
5337 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5338 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5339 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5340 		cmd->ac[txf].fifos_mask = (1 << txf);
5341 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5342 	}
5343 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5344 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5345 
5346 #ifndef IEEE80211_NO_HT
5347 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5348 		enum ieee80211_htprot htprot =
5349 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5350 		switch (htprot) {
5351 		case IEEE80211_HTPROT_NONE:
5352 			break;
5353 		case IEEE80211_HTPROT_NONMEMBER:
5354 		case IEEE80211_HTPROT_NONHT_MIXED:
5355 			cmd->protection_flags |=
5356 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5357 		case IEEE80211_HTPROT_20MHZ:
5358 			cmd->protection_flags |=
5359 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5360 			    IWM_MAC_PROT_FLG_FAT_PROT);
5361 			break;
5362 		default:
5363 			break;
5364 		}
5365 
5366 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5367 	}
5368 #endif
5369 
5370 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5371 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5372 
5373 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5374 #undef IWM_EXP2
5375 }
5376 
5377 static void
5378 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5379     struct iwm_mac_data_sta *sta, int assoc)
5380 {
5381 	struct ieee80211_node *ni = &in->in_ni;
5382 	uint32_t dtim_off;
5383 	uint64_t tsf;
5384 
5385 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5386 	tsf = le64toh(ni->ni_tstamp.tsf);
5387 
5388 	sta->is_assoc = htole32(assoc);
5389 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5390 	sta->dtim_tsf = htole64(tsf + dtim_off);
5391 	sta->bi = htole32(ni->ni_intval);
5392 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5393 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5394 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5395 	sta->listen_interval = htole32(10);
5396 	sta->assoc_id = htole32(ni->ni_associd);
5397 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5398 }
5399 
5400 static int
5401 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5402     int assoc)
5403 {
5404 	struct ieee80211_node *ni = &in->in_ni;
5405 	struct iwm_mac_ctx_cmd cmd;
5406 
5407 	memset(&cmd, 0, sizeof(cmd));
5408 
5409 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5410 
5411 	/* Allow beacons to pass through as long as we are not associated or we
5412 	 * do not have dtim period information */
5413 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5414 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5415 	else
5416 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5417 
5418 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5419 }
5420 
5421 #define IWM_MISSED_BEACONS_THRESHOLD 8
5422 
5423 static void
5424 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5425 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5426 {
5427 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5428 
5429 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5430 	    le32toh(mb->mac_id),
5431 	    le32toh(mb->consec_missed_beacons),
5432 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5433 	    le32toh(mb->num_recvd_beacons),
5434 	    le32toh(mb->num_expected_beacons)));
5435 
5436 	/*
5437 	 * TODO: the threshold should be adjusted based on latency conditions,
5438 	 * and/or in case of a CS flow on one of the other AP vifs.
5439 	 */
5440 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5441 	    IWM_MISSED_BEACONS_THRESHOLD)
5442 		ieee80211_beacon_miss(&sc->sc_ic);
5443 }
5444 
5445 static int
5446 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5447 {
5448 	struct iwm_time_quota_cmd cmd;
5449 	int i, idx, num_active_macs, quota, quota_rem;
5450 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5451 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5452 	uint16_t id;
5453 
5454 	memset(&cmd, 0, sizeof(cmd));
5455 
5456 	/* currently, PHY ID == binding ID */
5457 	if (in) {
5458 		id = in->in_phyctxt->id;
5459 		KASSERT(id < IWM_MAX_BINDINGS);
5460 		colors[id] = in->in_phyctxt->color;
5461 
5462 		if (1)
5463 			n_ifs[id] = 1;
5464 	}
5465 
5466 	/*
5467 	 * The FW's scheduling session consists of
5468 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5469 	 * equally between all the bindings that require quota
5470 	 */
5471 	num_active_macs = 0;
5472 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5473 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5474 		num_active_macs += n_ifs[i];
5475 	}
5476 
5477 	quota = 0;
5478 	quota_rem = 0;
5479 	if (num_active_macs) {
5480 		quota = IWM_MAX_QUOTA / num_active_macs;
5481 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5482 	}
5483 
5484 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5485 		if (colors[i] < 0)
5486 			continue;
5487 
5488 		cmd.quotas[idx].id_and_color =
5489 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5490 
5491 		if (n_ifs[i] <= 0) {
5492 			cmd.quotas[idx].quota = htole32(0);
5493 			cmd.quotas[idx].max_duration = htole32(0);
5494 		} else {
5495 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5496 			cmd.quotas[idx].max_duration = htole32(0);
5497 		}
5498 		idx++;
5499 	}
5500 
5501 	/* Give the remainder of the session to the first binding */
5502 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5503 
5504 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5505 }
5506 
5507 static int
5508 iwm_auth(struct iwm_softc *sc)
5509 {
5510 	struct ieee80211com *ic = &sc->sc_ic;
5511 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5512 	uint32_t duration;
5513 	int err;
5514 
5515 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5516 	if (err)
5517 		return err;
5518 
5519 	err = iwm_allow_mcast(sc);
5520 	if (err)
5521 		return err;
5522 
5523 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5524 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5525 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5526 	if (err)
5527 		return err;
5528 	in->in_phyctxt = &sc->sc_phyctxt[0];
5529 
5530 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5531 	if (err) {
5532 		aprint_error_dev(sc->sc_dev,
5533 		    "could not add MAC context (error %d)\n", err);
5534 		return err;
5535 	}
5536 
5537 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5538 	if (err)
5539 		return err;
5540 
5541 	err = iwm_add_sta_cmd(sc, in, 0);
5542 	if (err)
5543 		return err;
5544 
5545 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5546 	if (err) {
5547 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5548 		return err;
5549 	}
5550 
5551 	/*
5552 	 * Prevent the FW from wandering off channel during association
5553 	 * by "protecting" the session with a time event.
5554 	 */
5555 	if (in->in_ni.ni_intval)
5556 		duration = in->in_ni.ni_intval * 2;
5557 	else
5558 		duration = IEEE80211_DUR_TU;
5559 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5560 	DELAY(100);
5561 
5562 	return 0;
5563 }
5564 
5565 static int
5566 iwm_assoc(struct iwm_softc *sc)
5567 {
5568 	struct ieee80211com *ic = &sc->sc_ic;
5569 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5570 	int err;
5571 
5572 	err = iwm_add_sta_cmd(sc, in, 1);
5573 	if (err)
5574 		return err;
5575 
5576 	return 0;
5577 }
5578 
5579 static struct ieee80211_node *
5580 iwm_node_alloc(struct ieee80211_node_table *nt)
5581 {
5582 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5583 }
5584 
5585 static void
5586 iwm_calib_timeout(void *arg)
5587 {
5588 	struct iwm_softc *sc = arg;
5589 	struct ieee80211com *ic = &sc->sc_ic;
5590 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5591 #ifndef IEEE80211_NO_HT
5592 	struct ieee80211_node *ni = &in->in_ni;
5593 	int otxrate;
5594 #endif
5595 	int s;
5596 
5597 	s = splnet();
5598 	if ((ic->ic_fixed_rate == -1
5599 #ifndef IEEE80211_NO_HT
5600 	    || ic->ic_fixed_mcs == -1
5601 #endif
5602 	    ) &&
5603 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5604 #ifndef IEEE80211_NO_HT
5605 		if (ni->ni_flags & IEEE80211_NODE_HT)
5606 			otxrate = ni->ni_txmcs;
5607 		else
5608 			otxrate = ni->ni_txrate;
5609 #endif
5610 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5611 
5612 #ifndef IEEE80211_NO_HT
5613 		/*
5614 		 * If AMRR has chosen a new TX rate we must update
5615 		 * the firwmare's LQ rate table from process context.
5616 		 */
5617 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5618 		    otxrate != ni->ni_txmcs)
5619 			softint_schedule(sc->setrates_task);
5620 		else if (otxrate != ni->ni_txrate)
5621 			softint_schedule(sc->setrates_task);
5622 #endif
5623 	}
5624 	splx(s);
5625 
5626 	callout_schedule(&sc->sc_calib_to, mstohz(500));
5627 }
5628 
5629 #ifndef IEEE80211_NO_HT
5630 static void
5631 iwm_setrates_task(void *arg)
5632 {
5633 	struct iwm_softc *sc = arg;
5634 	struct ieee80211com *ic = &sc->sc_ic;
5635 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5636 
5637 	/* Update rates table based on new TX rate determined by AMRR. */
5638 	iwm_setrates(in);
5639 }
5640 
5641 static int
5642 iwm_setrates(struct iwm_node *in)
5643 {
5644 	struct ieee80211_node *ni = &in->in_ni;
5645 	struct ieee80211com *ic = ni->ni_ic;
5646 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5647 	struct iwm_lq_cmd *lq = &in->in_lq;
5648 	struct ieee80211_rateset *rs = &ni->ni_rates;
5649 	int i, j, ridx, ridx_min, tab = 0;
5650 #ifndef IEEE80211_NO_HT
5651 	int sgi_ok;
5652 #endif
5653 	struct iwm_host_cmd cmd = {
5654 		.id = IWM_LQ_CMD,
5655 		.len = { sizeof(in->in_lq), },
5656 	};
5657 
5658 	memset(lq, 0, sizeof(*lq));
5659 	lq->sta_id = IWM_STATION_ID;
5660 
5661 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5662 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5663 
5664 #ifndef IEEE80211_NO_HT
5665 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5666 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5667 #endif
5668 
5669 
5670 	/*
5671 	 * Fill the LQ rate selection table with legacy and/or HT rates
5672 	 * in descending order, i.e. with the node's current TX rate first.
5673 	 * In cases where throughput of an HT rate corresponds to a legacy
5674 	 * rate it makes no sense to add both. We rely on the fact that
5675 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
5676 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5677 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5678 	 */
5679 	j = 0;
5680 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5681 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
5682 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5683 		if (j >= __arraycount(lq->rs_table))
5684 			break;
5685 		tab = 0;
5686 #ifndef IEEE80211_NO_HT
5687 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5688 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5689 			for (i = ni->ni_txmcs; i >= 0; i--) {
5690 				if (isclr(ni->ni_rxmcs, i))
5691 					continue;
5692 				if (ridx == iwm_mcs2ridx[i]) {
5693 					tab = iwm_rates[ridx].ht_plcp;
5694 					tab |= IWM_RATE_MCS_HT_MSK;
5695 					if (sgi_ok)
5696 						tab |= IWM_RATE_MCS_SGI_MSK;
5697 					break;
5698 				}
5699 			}
5700 		}
5701 #endif
5702 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5703 			for (i = ni->ni_txrate; i >= 0; i--) {
5704 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5705 				    IEEE80211_RATE_VAL)) {
5706 					tab = iwm_rates[ridx].plcp;
5707 					break;
5708 				}
5709 			}
5710 		}
5711 
5712 		if (tab == 0)
5713 			continue;
5714 
5715 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
5716 		if (IWM_RIDX_IS_CCK(ridx))
5717 			tab |= IWM_RATE_MCS_CCK_MSK;
5718 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
5719 		lq->rs_table[j++] = htole32(tab);
5720 	}
5721 
5722 	/* Fill the rest with the lowest possible rate */
5723 	i = j > 0 ? j - 1 : 0;
5724 	while (j < __arraycount(lq->rs_table))
5725 		lq->rs_table[j++] = lq->rs_table[i];
5726 
5727 	lq->single_stream_ant_msk = IWM_ANT_A;
5728 	lq->dual_stream_ant_msk = IWM_ANT_AB;
5729 
5730 	lq->agg_time_limit = htole16(4000);	/* 4ms */
5731 	lq->agg_disable_start_th = 3;
5732 #ifdef notyet
5733 	lq->agg_frame_cnt_limit = 0x3f;
5734 #else
5735 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5736 #endif
5737 
5738 	cmd.data[0] = &in->in_lq;
5739 	return iwm_send_cmd(sc, &cmd);
5740 }
5741 #endif
5742 
5743 static int
5744 iwm_media_change(struct ifnet *ifp)
5745 {
5746 	struct iwm_softc *sc = ifp->if_softc;
5747 	struct ieee80211com *ic = &sc->sc_ic;
5748 	uint8_t rate, ridx;
5749 	int err;
5750 
5751 	err = ieee80211_media_change(ifp);
5752 	if (err != ENETRESET)
5753 		return err;
5754 
5755 #ifndef IEEE80211_NO_HT
5756 	if (ic->ic_fixed_mcs != -1)
5757 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5758 	else
5759 #endif
5760 	if (ic->ic_fixed_rate != -1) {
5761 		rate = ic->ic_sup_rates[ic->ic_curmode].
5762 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5763 		/* Map 802.11 rate to HW rate index. */
5764 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5765 			if (iwm_rates[ridx].rate == rate)
5766 				break;
5767 		sc->sc_fixed_ridx = ridx;
5768 	}
5769 
5770 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5771 	    (IFF_UP | IFF_RUNNING)) {
5772 		iwm_stop(ifp, 0);
5773 		err = iwm_init(ifp);
5774 	}
5775 	return err;
5776 }
5777 
5778 static void
5779 iwm_newstate_cb(struct work *wk, void *v)
5780 {
5781 	struct iwm_softc *sc = v;
5782 	struct ieee80211com *ic = &sc->sc_ic;
5783 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5784 	enum ieee80211_state nstate = iwmns->ns_nstate;
5785 	enum ieee80211_state ostate = ic->ic_state;
5786 	int generation = iwmns->ns_generation;
5787 	struct iwm_node *in;
5788 	int arg = iwmns->ns_arg;
5789 	int err;
5790 
5791 	kmem_free(iwmns, sizeof(*iwmns));
5792 
5793 	DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
5794 	if (sc->sc_generation != generation) {
5795 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5796 		if (nstate == IEEE80211_S_INIT) {
5797 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5798 			sc->sc_newstate(ic, nstate, arg);
5799 		}
5800 		return;
5801 	}
5802 
5803 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5804 	    ieee80211_state_name[nstate]));
5805 
5806 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5807 		iwm_led_blink_stop(sc);
5808 
5809 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
5810 		iwm_disable_beacon_filter(sc);
5811 
5812 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5813 	/* XXX Is there a way to switch states without a full reset? */
5814 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5815 		iwm_stop_device(sc);
5816 		iwm_init_hw(sc);
5817 
5818 		/*
5819 		 * Upon receiving a deauth frame from AP the net80211 stack
5820 		 * puts the driver into AUTH state. This will fail with this
5821 		 * driver so bring the FSM from RUN to SCAN in this case.
5822 		 */
5823 		if (nstate == IEEE80211_S_SCAN ||
5824 		    nstate == IEEE80211_S_AUTH ||
5825 		    nstate == IEEE80211_S_ASSOC) {
5826 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5827 			/* Always pass arg as -1 since we can't Tx right now. */
5828 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5829 			DPRINTF(("Going INIT->SCAN\n"));
5830 			nstate = IEEE80211_S_SCAN;
5831 		}
5832 	}
5833 
5834 	switch (nstate) {
5835 	case IEEE80211_S_INIT:
5836 		break;
5837 
5838 	case IEEE80211_S_SCAN:
5839 		if (ostate == nstate &&
5840 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5841 			return;
5842 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5843 			err = iwm_umac_scan(sc);
5844 		else
5845 			err = iwm_lmac_scan(sc);
5846 		if (err) {
5847 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5848 			return;
5849 		}
5850 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
5851 		ic->ic_state = nstate;
5852 		iwm_led_blink_start(sc);
5853 		return;
5854 
5855 	case IEEE80211_S_AUTH:
5856 		err = iwm_auth(sc);
5857 		if (err) {
5858 			DPRINTF(("%s: could not move to auth state: %d\n",
5859 			    DEVNAME(sc), err));
5860 			return;
5861 		}
5862 		break;
5863 
5864 	case IEEE80211_S_ASSOC:
5865 		err = iwm_assoc(sc);
5866 		if (err) {
5867 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5868 			    err));
5869 			return;
5870 		}
5871 		break;
5872 
5873 	case IEEE80211_S_RUN:
5874 		in = (struct iwm_node *)ic->ic_bss;
5875 
5876 		/* We have now been assigned an associd by the AP. */
5877 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5878 		if (err) {
5879 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5880 			return;
5881 		}
5882 
5883 		err = iwm_power_update_device(sc);
5884 		if (err) {
5885 			aprint_error_dev(sc->sc_dev,
5886 			    "could send power command (error %d)\n", err);
5887 			return;
5888 		}
5889 #ifdef notyet
5890 		/*
5891 		 * Disabled for now. Default beacon filter settings
5892 		 * prevent net80211 from getting ERP and HT protection
5893 		 * updates from beacons.
5894 		 */
5895 		err = iwm_enable_beacon_filter(sc, in);
5896 		if (err) {
5897 			aprint_error_dev(sc->sc_dev,
5898 			    "could not enable beacon filter\n");
5899 			return;
5900 		}
5901 #endif
5902 		err = iwm_power_mac_update_mode(sc, in);
5903 		if (err) {
5904 			aprint_error_dev(sc->sc_dev,
5905 			    "could not update MAC power (error %d)\n", err);
5906 			return;
5907 		}
5908 
5909 		err = iwm_update_quotas(sc, in);
5910 		if (err) {
5911 			aprint_error_dev(sc->sc_dev,
5912 			    "could not update quotas (error %d)\n", err);
5913 			return;
5914 		}
5915 
5916 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5917 
5918 		/* Start at lowest available bit-rate, AMRR will raise. */
5919 		in->in_ni.ni_txrate = 0;
5920 #ifndef IEEE80211_NO_HT
5921 		in->in_ni.ni_txmcs = 0;
5922 		iwm_setrates(in);
5923 #endif
5924 
5925 		callout_schedule(&sc->sc_calib_to, mstohz(500));
5926 		iwm_led_enable(sc);
5927 		break;
5928 
5929 	default:
5930 		break;
5931 	}
5932 
5933 	sc->sc_newstate(ic, nstate, arg);
5934 }
5935 
5936 static int
5937 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5938 {
5939 	struct iwm_newstate_state *iwmns;
5940 	struct ifnet *ifp = IC2IFP(ic);
5941 	struct iwm_softc *sc = ifp->if_softc;
5942 
5943 	callout_stop(&sc->sc_calib_to);
5944 
5945 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5946 	if (!iwmns) {
5947 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5948 		return ENOMEM;
5949 	}
5950 
5951 	iwmns->ns_nstate = nstate;
5952 	iwmns->ns_arg = arg;
5953 	iwmns->ns_generation = sc->sc_generation;
5954 
5955 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5956 
5957 	return 0;
5958 }
5959 
5960 static void
5961 iwm_endscan(struct iwm_softc *sc)
5962 {
5963 	struct ieee80211com *ic = &sc->sc_ic;
5964 
5965 	DPRINTF(("scan ended\n"));
5966 
5967 	CLR(sc->sc_flags, IWM_FLAG_SCANNING);
5968 	ieee80211_end_scan(ic);
5969 }
5970 
5971 /*
5972  * Aging and idle timeouts for the different possible scenarios
5973  * in default configuration
5974  */
5975 static const uint32_t
5976 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5977 	{
5978 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
5979 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
5980 	},
5981 	{
5982 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
5983 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
5984 	},
5985 	{
5986 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
5987 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
5988 	},
5989 	{
5990 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
5991 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
5992 	},
5993 	{
5994 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
5995 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
5996 	},
5997 };
5998 
5999 /*
6000  * Aging and idle timeouts for the different possible scenarios
6001  * in single BSS MAC configuration.
6002  */
6003 static const uint32_t
6004 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6005 	{
6006 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6007 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6008 	},
6009 	{
6010 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6011 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6012 	},
6013 	{
6014 		htole32(IWM_SF_MCAST_AGING_TIMER),
6015 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6016 	},
6017 	{
6018 		htole32(IWM_SF_BA_AGING_TIMER),
6019 		htole32(IWM_SF_BA_IDLE_TIMER)
6020 	},
6021 	{
6022 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6023 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6024 	},
6025 };
6026 
6027 static void
6028 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6029     struct ieee80211_node *ni)
6030 {
6031 	int i, j, watermark;
6032 
6033 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6034 
6035 	/*
6036 	 * If we are in association flow - check antenna configuration
6037 	 * capabilities of the AP station, and choose the watermark accordingly.
6038 	 */
6039 	if (ni) {
6040 #ifndef IEEE80211_NO_HT
6041 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6042 #ifdef notyet
6043 			if (ni->ni_rxmcs[2] != 0)
6044 				watermark = IWM_SF_W_MARK_MIMO3;
6045 			else if (ni->ni_rxmcs[1] != 0)
6046 				watermark = IWM_SF_W_MARK_MIMO2;
6047 			else
6048 #endif
6049 				watermark = IWM_SF_W_MARK_SISO;
6050 		} else
6051 #endif
6052 			watermark = IWM_SF_W_MARK_LEGACY;
6053 	/* default watermark value for unassociated mode. */
6054 	} else {
6055 		watermark = IWM_SF_W_MARK_MIMO2;
6056 	}
6057 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6058 
6059 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6060 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6061 			sf_cmd->long_delay_timeouts[i][j] =
6062 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6063 		}
6064 	}
6065 
6066 	if (ni) {
6067 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6068 		       sizeof(iwm_sf_full_timeout));
6069 	} else {
6070 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6071 		       sizeof(iwm_sf_full_timeout_def));
6072 	}
6073 }
6074 
6075 static int
6076 iwm_sf_config(struct iwm_softc *sc, int new_state)
6077 {
6078 	struct ieee80211com *ic = &sc->sc_ic;
6079 	struct iwm_sf_cfg_cmd sf_cmd = {
6080 		.state = htole32(IWM_SF_FULL_ON),
6081 	};
6082 
6083 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6084 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6085 
6086 	switch (new_state) {
6087 	case IWM_SF_UNINIT:
6088 	case IWM_SF_INIT_OFF:
6089 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6090 		break;
6091 	case IWM_SF_FULL_ON:
6092 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6093 		break;
6094 	default:
6095 		return EINVAL;
6096 	}
6097 
6098 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6099 	    sizeof(sf_cmd), &sf_cmd);
6100 }
6101 
6102 static int
6103 iwm_send_bt_init_conf(struct iwm_softc *sc)
6104 {
6105 	struct iwm_bt_coex_cmd bt_cmd;
6106 
6107 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6108 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6109 
6110 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6111 }
6112 
6113 static bool
6114 iwm_is_lar_supported(struct iwm_softc *sc)
6115 {
6116 	bool nvm_lar = sc->sc_nvm.lar_enabled;
6117 	bool tlv_lar = isset(sc->sc_enabled_capa,
6118 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6119 
6120 	if (iwm_lar_disable)
6121 		return false;
6122 
6123 	/*
6124 	 * Enable LAR only if it is supported by the FW (TLV) &&
6125 	 * enabled in the NVM
6126 	 */
6127 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6128 		return nvm_lar && tlv_lar;
6129 	else
6130 		return tlv_lar;
6131 }
6132 
6133 static int
6134 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6135 {
6136 	struct iwm_mcc_update_cmd mcc_cmd;
6137 	struct iwm_host_cmd hcmd = {
6138 		.id = IWM_MCC_UPDATE_CMD,
6139 		.flags = IWM_CMD_WANT_SKB,
6140 		.data = { &mcc_cmd },
6141 	};
6142 	int err;
6143 	int resp_v2 = isset(sc->sc_enabled_capa,
6144 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6145 
6146 	if (!iwm_is_lar_supported(sc)) {
6147 		DPRINTF(("%s: no LAR support\n", __func__));
6148 		return 0;
6149 	}
6150 
6151 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6152 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6153 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6154 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6155 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6156 	else
6157 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6158 
6159 	if (resp_v2)
6160 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6161 	else
6162 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6163 
6164 	err = iwm_send_cmd(sc, &hcmd);
6165 	if (err)
6166 		return err;
6167 
6168 	iwm_free_resp(sc, &hcmd);
6169 
6170 	return 0;
6171 }
6172 
6173 static void
6174 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6175 {
6176 	struct iwm_host_cmd cmd = {
6177 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6178 		.len = { sizeof(uint32_t), },
6179 		.data = { &backoff, },
6180 	};
6181 
6182 	iwm_send_cmd(sc, &cmd);
6183 }
6184 
6185 static int
6186 iwm_init_hw(struct iwm_softc *sc)
6187 {
6188 	struct ieee80211com *ic = &sc->sc_ic;
6189 	int err, i, ac;
6190 
6191 	err = iwm_preinit(sc);
6192 	if (err)
6193 		return err;
6194 
6195 	err = iwm_start_hw(sc);
6196 	if (err) {
6197 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6198 		return err;
6199 	}
6200 
6201 	err = iwm_run_init_mvm_ucode(sc, 0);
6202 	if (err)
6203 		return err;
6204 
6205 	/* Should stop and start HW since INIT image just loaded. */
6206 	iwm_stop_device(sc);
6207 	err = iwm_start_hw(sc);
6208 	if (err) {
6209 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6210 		return err;
6211 	}
6212 
6213 	/* Restart, this time with the regular firmware */
6214 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6215 	if (err) {
6216 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
6217 		goto err;
6218 	}
6219 
6220 	err = iwm_send_bt_init_conf(sc);
6221 	if (err) {
6222 		aprint_error_dev(sc->sc_dev,
6223 		    "could not init bt coex (error %d)\n", err);
6224 		goto err;
6225 	}
6226 
6227 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6228 	if (err) {
6229 		aprint_error_dev(sc->sc_dev,
6230 		    "could not init tx ant config (error %d)\n", err);
6231 		goto err;
6232 	}
6233 
6234 	/* Send phy db control command and then phy db calibration*/
6235 	err = iwm_send_phy_db_data(sc);
6236 	if (err) {
6237 		aprint_error_dev(sc->sc_dev,
6238 		    "could not init phy db (error %d)\n", err);
6239 		goto err;
6240 	}
6241 
6242 	err = iwm_send_phy_cfg_cmd(sc);
6243 	if (err) {
6244 		aprint_error_dev(sc->sc_dev,
6245 		    "could not send phy config (error %d)\n", err);
6246 		goto err;
6247 	}
6248 
6249 	/* Add auxiliary station for scanning */
6250 	err = iwm_add_aux_sta(sc);
6251 	if (err) {
6252 		aprint_error_dev(sc->sc_dev,
6253 		    "could not add aux station (error %d)\n", err);
6254 		goto err;
6255 	}
6256 
6257 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6258 		/*
6259 		 * The channel used here isn't relevant as it's
6260 		 * going to be overwritten in the other flows.
6261 		 * For now use the first channel we have.
6262 		 */
6263 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6264 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6265 		    IWM_FW_CTXT_ACTION_ADD, 0);
6266 		if (err) {
6267 			aprint_error_dev(sc->sc_dev,
6268 			    "could not add phy context %d (error %d)\n",
6269 			    i, err);
6270 			goto err;
6271 		}
6272 	}
6273 
6274 	/* Initialize tx backoffs to the minimum. */
6275 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6276 		iwm_tt_tx_backoff(sc, 0);
6277 
6278 	err = iwm_power_update_device(sc);
6279 	if (err) {
6280 		aprint_error_dev(sc->sc_dev,
6281 		    "could send power command (error %d)\n", err);
6282 		goto err;
6283 	}
6284 
6285 	err = iwm_send_update_mcc_cmd(sc, "ZZ");
6286 	if (err) {
6287 		aprint_error_dev(sc->sc_dev,
6288 		    "could not init LAR (error %d)\n", err);
6289 		goto err;
6290 	}
6291 
6292 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6293 		err = iwm_config_umac_scan(sc);
6294 		if (err) {
6295 			aprint_error_dev(sc->sc_dev,
6296 			    "could not configure scan (error %d)\n", err);
6297 			goto err;
6298 		}
6299 	}
6300 
6301 	for (ac = 0; ac < WME_NUM_AC; ac++) {
6302 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6303 		    iwm_ac_to_tx_fifo[ac]);
6304 		if (err) {
6305 			aprint_error_dev(sc->sc_dev,
6306 			    "could not enable Tx queue %d (error %d)\n",
6307 			    i, err);
6308 			goto err;
6309 		}
6310 	}
6311 
6312 	err = iwm_disable_beacon_filter(sc);
6313 	if (err) {
6314 		aprint_error_dev(sc->sc_dev,
6315 		    "could not disable beacon filter (error %d)\n", err);
6316 		goto err;
6317 	}
6318 
6319 	return 0;
6320 
6321  err:
6322 	iwm_stop_device(sc);
6323 	return err;
6324 }
6325 
6326 /* Allow multicast from our BSSID. */
6327 static int
6328 iwm_allow_mcast(struct iwm_softc *sc)
6329 {
6330 	struct ieee80211com *ic = &sc->sc_ic;
6331 	struct ieee80211_node *ni = ic->ic_bss;
6332 	struct iwm_mcast_filter_cmd *cmd;
6333 	size_t size;
6334 	int err;
6335 
6336 	size = roundup(sizeof(*cmd), 4);
6337 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6338 	if (cmd == NULL)
6339 		return ENOMEM;
6340 	cmd->filter_own = 1;
6341 	cmd->port_id = 0;
6342 	cmd->count = 0;
6343 	cmd->pass_all = 1;
6344 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6345 
6346 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6347 	kmem_intr_free(cmd, size);
6348 	return err;
6349 }
6350 
6351 static int
6352 iwm_init(struct ifnet *ifp)
6353 {
6354 	struct iwm_softc *sc = ifp->if_softc;
6355 	int err;
6356 
6357 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6358 		return 0;
6359 
6360 	sc->sc_generation++;
6361 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
6362 
6363 	err = iwm_init_hw(sc);
6364 	if (err) {
6365 		iwm_stop(ifp, 1);
6366 		return err;
6367 	}
6368 
6369 	ifp->if_flags &= ~IFF_OACTIVE;
6370 	ifp->if_flags |= IFF_RUNNING;
6371 
6372 	ieee80211_begin_scan(&sc->sc_ic, 0);
6373 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6374 
6375 	return 0;
6376 }
6377 
6378 static void
6379 iwm_start(struct ifnet *ifp)
6380 {
6381 	struct iwm_softc *sc = ifp->if_softc;
6382 	struct ieee80211com *ic = &sc->sc_ic;
6383 	struct ieee80211_node *ni;
6384 	struct ether_header *eh;
6385 	struct mbuf *m;
6386 	int ac;
6387 
6388 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6389 		return;
6390 
6391 	for (;;) {
6392 		/* why isn't this done per-queue? */
6393 		if (sc->qfullmsk != 0) {
6394 			ifp->if_flags |= IFF_OACTIVE;
6395 			break;
6396 		}
6397 
6398 		/* need to send management frames even if we're not RUNning */
6399 		IF_DEQUEUE(&ic->ic_mgtq, m);
6400 		if (m) {
6401 			ni = M_GETCTX(m, struct ieee80211_node *);
6402 			M_CLEARCTX(m);
6403 			ac = WME_AC_BE;
6404 			goto sendit;
6405 		}
6406 		if (ic->ic_state != IEEE80211_S_RUN) {
6407 			break;
6408 		}
6409 
6410 		IFQ_DEQUEUE(&ifp->if_snd, m);
6411 		if (m == NULL)
6412 			break;
6413 
6414 		if (m->m_len < sizeof (*eh) &&
6415 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
6416 			ifp->if_oerrors++;
6417 			continue;
6418 		}
6419 
6420 		eh = mtod(m, struct ether_header *);
6421 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6422 		if (ni == NULL) {
6423 			m_freem(m);
6424 			ifp->if_oerrors++;
6425 			continue;
6426 		}
6427 
6428 		/* classify mbuf so we can find which tx ring to use */
6429 		if (ieee80211_classify(ic, m, ni) != 0) {
6430 			m_freem(m);
6431 			ieee80211_free_node(ni);
6432 			ifp->if_oerrors++;
6433 			continue;
6434 		}
6435 
6436 		/* No QoS encapsulation for EAPOL frames. */
6437 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6438 		    M_WME_GETAC(m) : WME_AC_BE;
6439 
6440 		bpf_mtap(ifp, m);
6441 
6442 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6443 			ieee80211_free_node(ni);
6444 			ifp->if_oerrors++;
6445 			continue;
6446 		}
6447 
6448  sendit:
6449 		bpf_mtap3(ic->ic_rawbpf, m);
6450 
6451 		if (iwm_tx(sc, m, ni, ac) != 0) {
6452 			ieee80211_free_node(ni);
6453 			ifp->if_oerrors++;
6454 			continue;
6455 		}
6456 
6457 		if (ifp->if_flags & IFF_UP) {
6458 			sc->sc_tx_timer = 15;
6459 			ifp->if_timer = 1;
6460 		}
6461 	}
6462 }
6463 
6464 static void
6465 iwm_stop(struct ifnet *ifp, int disable)
6466 {
6467 	struct iwm_softc *sc = ifp->if_softc;
6468 	struct ieee80211com *ic = &sc->sc_ic;
6469 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6470 
6471 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6472 	sc->sc_flags |= IWM_FLAG_STOPPED;
6473 	sc->sc_generation++;
6474 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6475 
6476 	if (in)
6477 		in->in_phyctxt = NULL;
6478 
6479 	if (ic->ic_state != IEEE80211_S_INIT)
6480 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6481 
6482 	callout_stop(&sc->sc_calib_to);
6483 	iwm_led_blink_stop(sc);
6484 	ifp->if_timer = sc->sc_tx_timer = 0;
6485 	iwm_stop_device(sc);
6486 }
6487 
6488 static void
6489 iwm_watchdog(struct ifnet *ifp)
6490 {
6491 	struct iwm_softc *sc = ifp->if_softc;
6492 
6493 	ifp->if_timer = 0;
6494 	if (sc->sc_tx_timer > 0) {
6495 		if (--sc->sc_tx_timer == 0) {
6496 			aprint_error_dev(sc->sc_dev, "device timeout\n");
6497 #ifdef IWM_DEBUG
6498 			iwm_nic_error(sc);
6499 #endif
6500 			ifp->if_flags &= ~IFF_UP;
6501 			iwm_stop(ifp, 1);
6502 			ifp->if_oerrors++;
6503 			return;
6504 		}
6505 		ifp->if_timer = 1;
6506 	}
6507 
6508 	ieee80211_watchdog(&sc->sc_ic);
6509 }
6510 
6511 static int
6512 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6513 {
6514 	struct iwm_softc *sc = ifp->if_softc;
6515 	struct ieee80211com *ic = &sc->sc_ic;
6516 	const struct sockaddr *sa;
6517 	int s, err = 0;
6518 
6519 	s = splnet();
6520 
6521 	switch (cmd) {
6522 	case SIOCSIFADDR:
6523 		ifp->if_flags |= IFF_UP;
6524 		/* FALLTHROUGH */
6525 	case SIOCSIFFLAGS:
6526 		err = ifioctl_common(ifp, cmd, data);
6527 		if (err)
6528 			break;
6529 		if (ifp->if_flags & IFF_UP) {
6530 			if (!(ifp->if_flags & IFF_RUNNING)) {
6531 				err = iwm_init(ifp);
6532 				if (err)
6533 					ifp->if_flags &= ~IFF_UP;
6534 			}
6535 		} else {
6536 			if (ifp->if_flags & IFF_RUNNING)
6537 				iwm_stop(ifp, 1);
6538 		}
6539 		break;
6540 
6541 	case SIOCADDMULTI:
6542 	case SIOCDELMULTI:
6543 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6544 			err = ENXIO;
6545 			break;
6546 		}
6547 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6548 		err = (cmd == SIOCADDMULTI) ?
6549 		    ether_addmulti(sa, &sc->sc_ec) :
6550 		    ether_delmulti(sa, &sc->sc_ec);
6551 		if (err == ENETRESET)
6552 			err = 0;
6553 		break;
6554 
6555 	default:
6556 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6557 			err = ether_ioctl(ifp, cmd, data);
6558 			break;
6559 		}
6560 		err = ieee80211_ioctl(ic, cmd, data);
6561 		break;
6562 	}
6563 
6564 	if (err == ENETRESET) {
6565 		err = 0;
6566 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6567 		    (IFF_UP | IFF_RUNNING)) {
6568 			iwm_stop(ifp, 0);
6569 			err = iwm_init(ifp);
6570 		}
6571 	}
6572 
6573 	splx(s);
6574 	return err;
6575 }
6576 
6577 /*
6578  * Note: This structure is read from the device with IO accesses,
6579  * and the reading already does the endian conversion. As it is
6580  * read with uint32_t-sized accesses, any members with a different size
6581  * need to be ordered correctly though!
6582  */
6583 struct iwm_error_event_table {
6584 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6585 	uint32_t error_id;		/* type of error */
6586 	uint32_t trm_hw_status0;	/* TRM HW status */
6587 	uint32_t trm_hw_status1;	/* TRM HW status */
6588 	uint32_t blink2;		/* branch link */
6589 	uint32_t ilink1;		/* interrupt link */
6590 	uint32_t ilink2;		/* interrupt link */
6591 	uint32_t data1;		/* error-specific data */
6592 	uint32_t data2;		/* error-specific data */
6593 	uint32_t data3;		/* error-specific data */
6594 	uint32_t bcon_time;		/* beacon timer */
6595 	uint32_t tsf_low;		/* network timestamp function timer */
6596 	uint32_t tsf_hi;		/* network timestamp function timer */
6597 	uint32_t gp1;		/* GP1 timer register */
6598 	uint32_t gp2;		/* GP2 timer register */
6599 	uint32_t fw_rev_type;	/* firmware revision type */
6600 	uint32_t major;		/* uCode version major */
6601 	uint32_t minor;		/* uCode version minor */
6602 	uint32_t hw_ver;		/* HW Silicon version */
6603 	uint32_t brd_ver;		/* HW board version */
6604 	uint32_t log_pc;		/* log program counter */
6605 	uint32_t frame_ptr;		/* frame pointer */
6606 	uint32_t stack_ptr;		/* stack pointer */
6607 	uint32_t hcmd;		/* last host command header */
6608 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6609 				 * rxtx_flag */
6610 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6611 				 * host_flag */
6612 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6613 				 * enc_flag */
6614 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6615 				 * time_flag */
6616 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6617 				 * wico interrupt */
6618 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6619 	uint32_t wait_event;		/* wait event() caller address */
6620 	uint32_t l2p_control;	/* L2pControlField */
6621 	uint32_t l2p_duration;	/* L2pDurationField */
6622 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6623 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6624 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6625 				 * (LMPM_PMG_SEL) */
6626 	uint32_t u_timestamp;	/* indicate when the date and time of the
6627 				 * compilation */
6628 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6629 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6630 
6631 /*
6632  * UMAC error struct - relevant starting from family 8000 chip.
6633  * Note: This structure is read from the device with IO accesses,
6634  * and the reading already does the endian conversion. As it is
6635  * read with u32-sized accesses, any members with a different size
6636  * need to be ordered correctly though!
6637  */
6638 struct iwm_umac_error_event_table {
6639 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6640 	uint32_t error_id;	/* type of error */
6641 	uint32_t blink1;	/* branch link */
6642 	uint32_t blink2;	/* branch link */
6643 	uint32_t ilink1;	/* interrupt link */
6644 	uint32_t ilink2;	/* interrupt link */
6645 	uint32_t data1;		/* error-specific data */
6646 	uint32_t data2;		/* error-specific data */
6647 	uint32_t data3;		/* error-specific data */
6648 	uint32_t umac_major;
6649 	uint32_t umac_minor;
6650 	uint32_t frame_pointer;	/* core register 27 */
6651 	uint32_t stack_pointer;	/* core register 28 */
6652 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6653 	uint32_t nic_isr_pref;	/* ISR status register */
6654 } __packed;
6655 
6656 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6657 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6658 
6659 #ifdef IWM_DEBUG
6660 static const struct {
6661 	const char *name;
6662 	uint8_t num;
6663 } advanced_lookup[] = {
6664 	{ "NMI_INTERRUPT_WDG", 0x34 },
6665 	{ "SYSASSERT", 0x35 },
6666 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6667 	{ "BAD_COMMAND", 0x38 },
6668 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6669 	{ "FATAL_ERROR", 0x3D },
6670 	{ "NMI_TRM_HW_ERR", 0x46 },
6671 	{ "NMI_INTERRUPT_TRM", 0x4C },
6672 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6673 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6674 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6675 	{ "NMI_INTERRUPT_HOST", 0x66 },
6676 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6677 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
6678 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6679 	{ "ADVANCED_SYSASSERT", 0 },
6680 };
6681 
6682 static const char *
6683 iwm_desc_lookup(uint32_t num)
6684 {
6685 	int i;
6686 
6687 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6688 		if (advanced_lookup[i].num == num)
6689 			return advanced_lookup[i].name;
6690 
6691 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6692 	return advanced_lookup[i].name;
6693 }
6694 
6695 /*
6696  * Support for dumping the error log seemed like a good idea ...
6697  * but it's mostly hex junk and the only sensible thing is the
6698  * hw/ucode revision (which we know anyway).  Since it's here,
6699  * I'll just leave it in, just in case e.g. the Intel guys want to
6700  * help us decipher some "ADVANCED_SYSASSERT" later.
6701  */
6702 static void
6703 iwm_nic_error(struct iwm_softc *sc)
6704 {
6705 	struct iwm_error_event_table t;
6706 	uint32_t base;
6707 
6708 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6709 	base = sc->sc_uc.uc_error_event_table;
6710 	if (base < 0x800000) {
6711 		aprint_error_dev(sc->sc_dev,
6712 		    "Invalid error log pointer 0x%08x\n", base);
6713 		return;
6714 	}
6715 
6716 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6717 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6718 		return;
6719 	}
6720 
6721 	if (!t.valid) {
6722 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6723 		return;
6724 	}
6725 
6726 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6727 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6728 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6729 		    sc->sc_flags, t.valid);
6730 	}
6731 
6732 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6733 	    iwm_desc_lookup(t.error_id));
6734 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6735 	    t.trm_hw_status0);
6736 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6737 	    t.trm_hw_status1);
6738 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6739 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6740 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6741 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6742 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6743 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6744 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6745 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6746 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6747 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6748 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6749 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6750 	    t.fw_rev_type);
6751 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6752 	    t.major);
6753 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6754 	    t.minor);
6755 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6756 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6757 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6758 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6759 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6760 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6761 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6762 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6763 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6764 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6765 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6766 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6767 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6768 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6769 	    t.l2p_addr_match);
6770 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6771 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6772 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6773 
6774 	if (sc->sc_uc.uc_umac_error_event_table)
6775 		iwm_nic_umac_error(sc);
6776 }
6777 
6778 static void
6779 iwm_nic_umac_error(struct iwm_softc *sc)
6780 {
6781 	struct iwm_umac_error_event_table t;
6782 	uint32_t base;
6783 
6784 	base = sc->sc_uc.uc_umac_error_event_table;
6785 
6786 	if (base < 0x800000) {
6787 		aprint_error_dev(sc->sc_dev,
6788 		    "Invalid error log pointer 0x%08x\n", base);
6789 		return;
6790 	}
6791 
6792 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6793 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6794 		return;
6795 	}
6796 
6797 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6798 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6799 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6800 		    sc->sc_flags, t.valid);
6801 	}
6802 
6803 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6804 		iwm_desc_lookup(t.error_id));
6805 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6806 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6807 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6808 	    t.ilink1);
6809 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6810 	    t.ilink2);
6811 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6812 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6813 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6814 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6815 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6816 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6817 	    t.frame_pointer);
6818 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6819 	    t.stack_pointer);
6820 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6821 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6822 	    t.nic_isr_pref);
6823 }
6824 #endif
6825 
6826 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
6827 do {									\
6828 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6829 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
6830 	_var_ = (void *)((_pkt_)+1);					\
6831 } while (/*CONSTCOND*/0)
6832 
6833 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6834 do {									\
6835 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6836 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6837 	_ptr_ = (void *)((_pkt_)+1);					\
6838 } while (/*CONSTCOND*/0)
6839 
6840 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6841 
6842 static void
6843 iwm_notif_intr(struct iwm_softc *sc)
6844 {
6845 	uint16_t hw;
6846 
6847 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6848 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6849 
6850 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6851 	while (sc->rxq.cur != hw) {
6852 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6853 		struct iwm_rx_packet *pkt;
6854 		struct iwm_cmd_response *cresp;
6855 		int orig_qid, qid, idx, code;
6856 
6857 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6858 		    BUS_DMASYNC_POSTREAD);
6859 		pkt = mtod(data->m, struct iwm_rx_packet *);
6860 
6861 		orig_qid = pkt->hdr.qid;
6862 		qid = orig_qid & ~0x80;
6863 		idx = pkt->hdr.idx;
6864 
6865 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6866 
6867 		/*
6868 		 * randomly get these from the firmware, no idea why.
6869 		 * they at least seem harmless, so just ignore them for now
6870 		 */
6871 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6872 		    || pkt->len_n_flags == htole32(0x55550000))) {
6873 			ADVANCE_RXQ(sc);
6874 			continue;
6875 		}
6876 
6877 		switch (code) {
6878 		case IWM_REPLY_RX_PHY_CMD:
6879 			iwm_rx_rx_phy_cmd(sc, pkt, data);
6880 			break;
6881 
6882 		case IWM_REPLY_RX_MPDU_CMD:
6883 			iwm_rx_rx_mpdu(sc, pkt, data);
6884 			break;
6885 
6886 		case IWM_TX_CMD:
6887 			iwm_rx_tx_cmd(sc, pkt, data);
6888 			break;
6889 
6890 		case IWM_MISSED_BEACONS_NOTIFICATION:
6891 			iwm_rx_missed_beacons_notif(sc, pkt, data);
6892 			break;
6893 
6894 		case IWM_MFUART_LOAD_NOTIFICATION:
6895 			break;
6896 
6897 		case IWM_ALIVE: {
6898 			struct iwm_alive_resp_v1 *resp1;
6899 			struct iwm_alive_resp_v2 *resp2;
6900 			struct iwm_alive_resp_v3 *resp3;
6901 
6902 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6903 				SYNC_RESP_STRUCT(resp1, pkt);
6904 				sc->sc_uc.uc_error_event_table
6905 				    = le32toh(resp1->error_event_table_ptr);
6906 				sc->sc_uc.uc_log_event_table
6907 				    = le32toh(resp1->log_event_table_ptr);
6908 				sc->sched_base = le32toh(resp1->scd_base_ptr);
6909 				if (resp1->status == IWM_ALIVE_STATUS_OK)
6910 					sc->sc_uc.uc_ok = 1;
6911 				else
6912 					sc->sc_uc.uc_ok = 0;
6913 			}
6914 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6915 				SYNC_RESP_STRUCT(resp2, pkt);
6916 				sc->sc_uc.uc_error_event_table
6917 				    = le32toh(resp2->error_event_table_ptr);
6918 				sc->sc_uc.uc_log_event_table
6919 				    = le32toh(resp2->log_event_table_ptr);
6920 				sc->sched_base = le32toh(resp2->scd_base_ptr);
6921 				sc->sc_uc.uc_umac_error_event_table
6922 				    = le32toh(resp2->error_info_addr);
6923 				if (resp2->status == IWM_ALIVE_STATUS_OK)
6924 					sc->sc_uc.uc_ok = 1;
6925 				else
6926 					sc->sc_uc.uc_ok = 0;
6927 			}
6928 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
6929 				SYNC_RESP_STRUCT(resp3, pkt);
6930 				sc->sc_uc.uc_error_event_table
6931 				    = le32toh(resp3->error_event_table_ptr);
6932 				sc->sc_uc.uc_log_event_table
6933 				    = le32toh(resp3->log_event_table_ptr);
6934 				sc->sched_base = le32toh(resp3->scd_base_ptr);
6935 				sc->sc_uc.uc_umac_error_event_table
6936 				    = le32toh(resp3->error_info_addr);
6937 				if (resp3->status == IWM_ALIVE_STATUS_OK)
6938 					sc->sc_uc.uc_ok = 1;
6939 				else
6940 					sc->sc_uc.uc_ok = 0;
6941 			}
6942 
6943 			sc->sc_uc.uc_intr = 1;
6944 			wakeup(&sc->sc_uc);
6945 			break;
6946 		}
6947 
6948 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
6949 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
6950 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
6951 			uint16_t size = le16toh(phy_db_notif->length);
6952 			bus_dmamap_sync(sc->sc_dmat, data->map,
6953 			    sizeof(*pkt) + sizeof(*phy_db_notif),
6954 			    size, BUS_DMASYNC_POSTREAD);
6955 			iwm_phy_db_set_section(sc, phy_db_notif, size);
6956 			break;
6957 		}
6958 
6959 		case IWM_STATISTICS_NOTIFICATION: {
6960 			struct iwm_notif_statistics *stats;
6961 			SYNC_RESP_STRUCT(stats, pkt);
6962 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6963 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
6964 			break;
6965 		}
6966 
6967 		case IWM_NVM_ACCESS_CMD:
6968 		case IWM_MCC_UPDATE_CMD:
6969 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6970 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6971 				    sizeof(sc->sc_cmd_resp),
6972 				    BUS_DMASYNC_POSTREAD);
6973 				memcpy(sc->sc_cmd_resp,
6974 				    pkt, sizeof(sc->sc_cmd_resp));
6975 			}
6976 			break;
6977 
6978 		case IWM_MCC_CHUB_UPDATE_CMD: {
6979 			struct iwm_mcc_chub_notif *notif;
6980 			SYNC_RESP_STRUCT(notif, pkt);
6981 
6982 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
6983 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
6984 			sc->sc_fw_mcc[2] = '\0';
6985 			break;
6986 		}
6987 
6988 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
6989 			break;
6990 
6991 		case IWM_PHY_CONFIGURATION_CMD:
6992 		case IWM_TX_ANT_CONFIGURATION_CMD:
6993 		case IWM_ADD_STA:
6994 		case IWM_MAC_CONTEXT_CMD:
6995 		case IWM_REPLY_SF_CFG_CMD:
6996 		case IWM_POWER_TABLE_CMD:
6997 		case IWM_PHY_CONTEXT_CMD:
6998 		case IWM_BINDING_CONTEXT_CMD:
6999 		case IWM_TIME_EVENT_CMD:
7000 		case IWM_SCAN_REQUEST_CMD:
7001 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7002 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7003 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7004 		case IWM_REPLY_BEACON_FILTERING_CMD:
7005 		case IWM_MAC_PM_POWER_TABLE:
7006 		case IWM_TIME_QUOTA_CMD:
7007 		case IWM_REMOVE_STA:
7008 		case IWM_TXPATH_FLUSH:
7009 		case IWM_LQ_CMD:
7010 		case IWM_BT_CONFIG:
7011 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7012 			SYNC_RESP_STRUCT(cresp, pkt);
7013 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7014 				memcpy(sc->sc_cmd_resp,
7015 				    pkt, sizeof(*pkt) + sizeof(*cresp));
7016 			}
7017 			break;
7018 
7019 		/* ignore */
7020 		case 0x6c: /* IWM_PHY_DB_CMD */
7021 			break;
7022 
7023 		case IWM_INIT_COMPLETE_NOTIF:
7024 			sc->sc_init_complete = 1;
7025 			wakeup(&sc->sc_init_complete);
7026 			break;
7027 
7028 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7029 			struct iwm_periodic_scan_complete *notif;
7030 			SYNC_RESP_STRUCT(notif, pkt);
7031 			break;
7032 		}
7033 
7034 		case IWM_SCAN_ITERATION_COMPLETE: {
7035 			struct iwm_lmac_scan_complete_notif *notif;
7036 			SYNC_RESP_STRUCT(notif, pkt);
7037 			iwm_endscan(sc);
7038 			break;
7039 		}
7040 
7041 		case IWM_SCAN_COMPLETE_UMAC: {
7042 			struct iwm_umac_scan_complete *notif;
7043 			SYNC_RESP_STRUCT(notif, pkt);
7044 			iwm_endscan(sc);
7045 			break;
7046 		}
7047 
7048 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7049 			struct iwm_umac_scan_iter_complete_notif *notif;
7050 			SYNC_RESP_STRUCT(notif, pkt);
7051 			iwm_endscan(sc);
7052 			break;
7053 		}
7054 
7055 		case IWM_REPLY_ERROR: {
7056 			struct iwm_error_resp *resp;
7057 			SYNC_RESP_STRUCT(resp, pkt);
7058 			aprint_error_dev(sc->sc_dev,
7059 			    "firmware error 0x%x, cmd 0x%x\n",
7060 			    le32toh(resp->error_type), resp->cmd_id);
7061 			break;
7062 		}
7063 
7064 		case IWM_TIME_EVENT_NOTIFICATION: {
7065 			struct iwm_time_event_notif *notif;
7066 			SYNC_RESP_STRUCT(notif, pkt);
7067 			break;
7068 		}
7069 
7070 		case IWM_MCAST_FILTER_CMD:
7071 			break;
7072 
7073 		case IWM_SCD_QUEUE_CFG: {
7074 			struct iwm_scd_txq_cfg_rsp *rsp;
7075 			SYNC_RESP_STRUCT(rsp, pkt);
7076 			break;
7077 		}
7078 
7079 		default:
7080 			aprint_error_dev(sc->sc_dev,
7081 			    "unhandled firmware response 0x%x 0x%x/0x%x "
7082 			    "rx ring %d[%d]\n",
7083 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7084 			break;
7085 		}
7086 
7087 		/*
7088 		 * uCode sets bit 0x80 when it originates the notification,
7089 		 * i.e. when the notification is not a direct response to a
7090 		 * command sent by the driver.
7091 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7092 		 * received frame to the driver.
7093 		 */
7094 		if (!(orig_qid & (1 << 7))) {
7095 			iwm_cmd_done(sc, qid, idx);
7096 		}
7097 
7098 		ADVANCE_RXQ(sc);
7099 	}
7100 
7101 	/*
7102 	 * Seems like the hardware gets upset unless we align the write by 8??
7103 	 */
7104 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7105 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7106 }
7107 
7108 static void
7109 iwm_softintr(void *arg)
7110 {
7111 	struct iwm_softc *sc = arg;
7112 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7113 	uint32_t r1;
7114 	int isperiodic = 0;
7115 
7116 	r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7117 
7118  restart:
7119 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7120 #ifdef IWM_DEBUG
7121 		int i;
7122 
7123 		iwm_nic_error(sc);
7124 
7125 		/* Dump driver status (TX and RX rings) while we're here. */
7126 		DPRINTF(("driver status:\n"));
7127 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7128 			struct iwm_tx_ring *ring = &sc->txq[i];
7129 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7130 			    "queued=%-3d\n",
7131 			    i, ring->qid, ring->cur, ring->queued));
7132 		}
7133 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7134 		DPRINTF(("  802.11 state %s\n",
7135 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7136 #endif
7137 
7138 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7139  fatal:
7140 		ifp->if_flags &= ~IFF_UP;
7141 		iwm_stop(ifp, 1);
7142 		/* Don't restore interrupt mask */
7143 		return;
7144 
7145 	}
7146 
7147 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7148 		aprint_error_dev(sc->sc_dev,
7149 		    "hardware error, stopping device\n");
7150 		goto fatal;
7151 	}
7152 
7153 	/* firmware chunk loaded */
7154 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7155 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7156 		sc->sc_fw_chunk_done = 1;
7157 		wakeup(&sc->sc_fw);
7158 	}
7159 
7160 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7161 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
7162 			ifp->if_flags &= ~IFF_UP;
7163 			iwm_stop(ifp, 1);
7164 		}
7165 	}
7166 
7167 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7168 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7169 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7170 			IWM_WRITE_1(sc,
7171 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7172 		isperiodic = 1;
7173 	}
7174 
7175 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7176 	    isperiodic) {
7177 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7178 
7179 		iwm_notif_intr(sc);
7180 
7181 		/* enable periodic interrupt, see above */
7182 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7183 		    !isperiodic)
7184 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7185 			    IWM_CSR_INT_PERIODIC_ENA);
7186 	}
7187 
7188 	r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7189 	if (r1 != 0)
7190 		goto restart;
7191 
7192 	iwm_restore_interrupts(sc);
7193 }
7194 
7195 static int
7196 iwm_intr(void *arg)
7197 {
7198 	struct iwm_softc *sc = arg;
7199 	int r1, r2;
7200 
7201 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7202 
7203 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7204 		uint32_t *ict = sc->ict_dma.vaddr;
7205 		int tmp;
7206 
7207 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7208 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7209 		tmp = htole32(ict[sc->ict_cur]);
7210 		if (!tmp)
7211 			goto out_ena;
7212 
7213 		/*
7214 		 * ok, there was something.  keep plowing until we have all.
7215 		 */
7216 		r1 = r2 = 0;
7217 		while (tmp) {
7218 			r1 |= tmp;
7219 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
7220 			bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7221 			    &ict[sc->ict_cur] - ict, sizeof(*ict),
7222 			    BUS_DMASYNC_PREWRITE);
7223 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7224 			tmp = htole32(ict[sc->ict_cur]);
7225 		}
7226 
7227 		/* this is where the fun begins.  don't ask */
7228 		if (r1 == 0xffffffff)
7229 			r1 = 0;
7230 
7231 		/* i am not expected to understand this */
7232 		if (r1 & 0xc0000)
7233 			r1 |= 0x8000;
7234 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7235 	} else {
7236 		r1 = IWM_READ(sc, IWM_CSR_INT);
7237 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7238 			goto out;
7239 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7240 	}
7241 	if (r1 == 0 && r2 == 0) {
7242 		goto out_ena;
7243 	}
7244 
7245 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7246 
7247 	atomic_or_32(&sc->sc_soft_flags, r1);
7248 	softint_schedule(sc->sc_soft_ih);
7249 	return 1;
7250 
7251  out_ena:
7252 	iwm_restore_interrupts(sc);
7253  out:
7254 	return 0;
7255 }
7256 
7257 /*
7258  * Autoconf glue-sniffing
7259  */
7260 
7261 static const pci_product_id_t iwm_devices[] = {
7262 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7263 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7264 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7265 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7266 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7267 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7268 #ifdef notyet
7269 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7270 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7271 #endif
7272 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7273 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7274 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7275 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7276 };
7277 
7278 static int
7279 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7280 {
7281 	struct pci_attach_args *pa = aux;
7282 
7283 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7284 		return 0;
7285 
7286 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7287 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7288 			return 1;
7289 
7290 	return 0;
7291 }
7292 
7293 static int
7294 iwm_preinit(struct iwm_softc *sc)
7295 {
7296 	struct ieee80211com *ic = &sc->sc_ic;
7297 	int err;
7298 
7299 	if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7300 		return 0;
7301 
7302 	err = iwm_start_hw(sc);
7303 	if (err) {
7304 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7305 		return err;
7306 	}
7307 
7308 	err = iwm_run_init_mvm_ucode(sc, 1);
7309 	iwm_stop_device(sc);
7310 	if (err)
7311 		return err;
7312 
7313 	sc->sc_flags |= IWM_FLAG_ATTACHED;
7314 
7315 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7316 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7317 	    ether_sprintf(sc->sc_nvm.hw_addr));
7318 
7319 #ifndef IEEE80211_NO_HT
7320 	if (sc->sc_nvm.sku_cap_11n_enable)
7321 		iwm_setup_ht_rates(sc);
7322 #endif
7323 
7324 	/* not all hardware can do 5GHz band */
7325 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7326 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7327 
7328 	ieee80211_ifattach(ic);
7329 
7330 	ic->ic_node_alloc = iwm_node_alloc;
7331 
7332 	/* Override 802.11 state transition machine. */
7333 	sc->sc_newstate = ic->ic_newstate;
7334 	ic->ic_newstate = iwm_newstate;
7335 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7336 	ieee80211_announce(ic);
7337 
7338 	iwm_radiotap_attach(sc);
7339 
7340 	return 0;
7341 }
7342 
7343 static void
7344 iwm_attach_hook(device_t dev)
7345 {
7346 	struct iwm_softc *sc = device_private(dev);
7347 
7348 	iwm_preinit(sc);
7349 }
7350 
7351 static void
7352 iwm_attach(device_t parent, device_t self, void *aux)
7353 {
7354 	struct iwm_softc *sc = device_private(self);
7355 	struct pci_attach_args *pa = aux;
7356 	struct ieee80211com *ic = &sc->sc_ic;
7357 	struct ifnet *ifp = &sc->sc_ec.ec_if;
7358 	pcireg_t reg, memtype;
7359 	char intrbuf[PCI_INTRSTR_LEN];
7360 	const char *intrstr;
7361 	int err;
7362 	int txq_i;
7363 	const struct sysctlnode *node;
7364 
7365 	sc->sc_dev = self;
7366 	sc->sc_pct = pa->pa_pc;
7367 	sc->sc_pcitag = pa->pa_tag;
7368 	sc->sc_dmat = pa->pa_dmat;
7369 	sc->sc_pciid = pa->pa_id;
7370 
7371 	pci_aprint_devinfo(pa, NULL);
7372 
7373 	if (workqueue_create(&sc->sc_nswq, "iwmns",
7374 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7375 		panic("%s: could not create workqueue: newstate",
7376 		    device_xname(self));
7377 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7378 	if (sc->sc_soft_ih == NULL)
7379 		panic("%s: could not establish softint", device_xname(self));
7380 
7381 	/*
7382 	 * Get the offset of the PCI Express Capability Structure in PCI
7383 	 * Configuration Space.
7384 	 */
7385 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7386 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7387 	if (err == 0) {
7388 		aprint_error_dev(self,
7389 		    "PCIe capability structure not found!\n");
7390 		return;
7391 	}
7392 
7393 	/* Clear device-specific "PCI retry timeout" register (41h). */
7394 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7395 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7396 
7397 	/* Enable bus-mastering */
7398 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7399 	reg |= PCI_COMMAND_MASTER_ENABLE;
7400 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7401 
7402 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7403 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7404 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7405 	if (err) {
7406 		aprint_error_dev(self, "can't map mem space\n");
7407 		return;
7408 	}
7409 
7410 	/* Install interrupt handler. */
7411 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7412 	if (err) {
7413 		aprint_error_dev(self, "can't allocate interrupt\n");
7414 		return;
7415 	}
7416 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
7417 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
7418 		    PCI_COMMAND_STATUS_REG);
7419 		if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
7420 			CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7421 			pci_conf_write(sc->sc_pct, sc->sc_pcitag,
7422 			    PCI_COMMAND_STATUS_REG, reg);
7423 		}
7424 	}
7425 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7426 	    sizeof(intrbuf));
7427 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7428 	    IPL_NET, iwm_intr, sc, device_xname(self));
7429 	if (sc->sc_ih == NULL) {
7430 		aprint_error_dev(self, "can't establish interrupt");
7431 		if (intrstr != NULL)
7432 			aprint_error(" at %s", intrstr);
7433 		aprint_error("\n");
7434 		return;
7435 	}
7436 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7437 
7438 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7439 
7440 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7441 	switch (PCI_PRODUCT(sc->sc_pciid)) {
7442 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7443 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7444 		sc->sc_fwname = "iwlwifi-3160-16.ucode";
7445 		sc->host_interrupt_operation_mode = 1;
7446 		sc->apmg_wake_up_wa = 1;
7447 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7448 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7449 		break;
7450 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7451 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7452 		sc->sc_fwname = "iwlwifi-7265D-16.ucode";
7453 		sc->host_interrupt_operation_mode = 0;
7454 		sc->apmg_wake_up_wa = 1;
7455 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7456 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7457 		break;
7458 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7459 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7460 		sc->sc_fwname = "iwlwifi-7260-16.ucode";
7461 		sc->host_interrupt_operation_mode = 1;
7462 		sc->apmg_wake_up_wa = 1;
7463 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7464 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7465 		break;
7466 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7467 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7468 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7469 		    IWM_CSR_HW_REV_TYPE_7265D ?
7470 		    "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
7471 		sc->host_interrupt_operation_mode = 0;
7472 		sc->apmg_wake_up_wa = 1;
7473 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7474 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7475 		break;
7476 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7477 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7478 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7479 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7480 		sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7481 		sc->host_interrupt_operation_mode = 0;
7482 		sc->apmg_wake_up_wa = 0;
7483 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7484 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7485 		break;
7486 	default:
7487 		aprint_error_dev(self, "unknown product %#x",
7488 		    PCI_PRODUCT(sc->sc_pciid));
7489 		return;
7490 	}
7491 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7492 
7493 	/*
7494 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7495 	 * changed, and now the revision step also includes bit 0-1 (no more
7496 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7497 	 * in the old format.
7498 	 */
7499 
7500 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7501 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7502 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7503 
7504 	if (iwm_prepare_card_hw(sc) != 0) {
7505 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7506 		return;
7507 	}
7508 
7509 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7510 		uint32_t hw_step;
7511 
7512 		/*
7513 		 * In order to recognize C step the driver should read the
7514 		 * chip version id located at the AUX bus MISC address.
7515 		 */
7516 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7517 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7518 		DELAY(2);
7519 
7520 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7521 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7522 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7523 				   25000);
7524 		if (!err) {
7525 			aprint_error_dev(sc->sc_dev,
7526 			    "failed to wake up the nic\n");
7527 			return;
7528 		}
7529 
7530 		if (iwm_nic_lock(sc)) {
7531 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7532 			hw_step |= IWM_ENABLE_WFPM;
7533 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7534 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7535 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7536 			if (hw_step == 0x3)
7537 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7538 				    (IWM_SILICON_C_STEP << 2);
7539 			iwm_nic_unlock(sc);
7540 		} else {
7541 			aprint_error_dev(sc->sc_dev,
7542 			    "failed to lock the nic\n");
7543 			return;
7544 		}
7545 	}
7546 
7547 	/*
7548 	 * Allocate DMA memory for firmware transfers.
7549 	 * Must be aligned on a 16-byte boundary.
7550 	 */
7551 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7552 	    16);
7553 	if (err) {
7554 		aprint_error_dev(sc->sc_dev,
7555 		    "could not allocate memory for firmware\n");
7556 		return;
7557 	}
7558 
7559 	/* Allocate "Keep Warm" page, used internally by the card. */
7560 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7561 	if (err) {
7562 		aprint_error_dev(sc->sc_dev,
7563 		    "could not allocate keep warm page\n");
7564 		goto fail1;
7565 	}
7566 
7567 	/* Allocate interrupt cause table (ICT).*/
7568 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7569 	    1 << IWM_ICT_PADDR_SHIFT);
7570 	if (err) {
7571 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7572 		goto fail2;
7573 	}
7574 
7575 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7576 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7577 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7578 	if (err) {
7579 		aprint_error_dev(sc->sc_dev,
7580 		    "could not allocate TX scheduler rings\n");
7581 		goto fail3;
7582 	}
7583 
7584 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7585 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7586 		if (err) {
7587 			aprint_error_dev(sc->sc_dev,
7588 			    "could not allocate TX ring %d\n", txq_i);
7589 			goto fail4;
7590 		}
7591 	}
7592 
7593 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
7594 	if (err) {
7595 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7596 		goto fail4;
7597 	}
7598 
7599 	/* Clear pending interrupts. */
7600 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7601 
7602 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7603 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7604 	    SYSCTL_DESCR("iwm per-controller controls"),
7605 	    NULL, 0, NULL, 0,
7606 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7607 	    CTL_EOL)) != 0) {
7608 		aprint_normal_dev(sc->sc_dev,
7609 		    "couldn't create iwm per-controller sysctl node\n");
7610 	}
7611 	if (err == 0) {
7612 		int iwm_nodenum = node->sysctl_num;
7613 
7614 		/* Reload firmware sysctl node */
7615 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7616 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7617 		    SYSCTL_DESCR("Reload firmware"),
7618 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7619 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7620 		    CTL_EOL)) != 0) {
7621 			aprint_normal_dev(sc->sc_dev,
7622 			    "couldn't create load_fw sysctl node\n");
7623 		}
7624 	}
7625 
7626 	/*
7627 	 * Attach interface
7628 	 */
7629 	ic->ic_ifp = ifp;
7630 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
7631 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
7632 	ic->ic_state = IEEE80211_S_INIT;
7633 
7634 	/* Set device capabilities. */
7635 	ic->ic_caps =
7636 	    IEEE80211_C_WEP |		/* WEP */
7637 	    IEEE80211_C_WPA |		/* 802.11i */
7638 #ifdef notyet
7639 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
7640 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
7641 #endif
7642 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
7643 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
7644 
7645 #ifndef IEEE80211_NO_HT
7646 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7647 	ic->ic_htxcaps = 0;
7648 	ic->ic_txbfcaps = 0;
7649 	ic->ic_aselcaps = 0;
7650 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7651 #endif
7652 
7653 	/* all hardware can do 2.4GHz band */
7654 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7655 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7656 
7657 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7658 		sc->sc_phyctxt[i].id = i;
7659 	}
7660 
7661 	sc->sc_amrr.amrr_min_success_threshold =  1;
7662 	sc->sc_amrr.amrr_max_success_threshold = 15;
7663 
7664 	/* IBSS channel undefined for now. */
7665 	ic->ic_ibss_chan = &ic->ic_channels[1];
7666 
7667 #if 0
7668 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7669 #endif
7670 
7671 	ifp->if_softc = sc;
7672 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7673 	ifp->if_init = iwm_init;
7674 	ifp->if_stop = iwm_stop;
7675 	ifp->if_ioctl = iwm_ioctl;
7676 	ifp->if_start = iwm_start;
7677 	ifp->if_watchdog = iwm_watchdog;
7678 	IFQ_SET_READY(&ifp->if_snd);
7679 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7680 
7681 	if_initialize(ifp);
7682 #if 0
7683 	ieee80211_ifattach(ic);
7684 #else
7685 	ether_ifattach(ifp, ic->ic_myaddr);	/* XXX */
7686 #endif
7687 	/* Use common softint-based if_input */
7688 	ifp->if_percpuq = if_percpuq_create(ifp);
7689 	if_deferred_start_init(ifp, NULL);
7690 	if_register(ifp);
7691 
7692 	callout_init(&sc->sc_calib_to, 0);
7693 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7694 	callout_init(&sc->sc_led_blink_to, 0);
7695 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7696 #ifndef IEEE80211_NO_HT
7697 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7698 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7699 		panic("%s: could not create workqueue: setrates",
7700 		    device_xname(self));
7701 	if (workqueue_create(&sc->sc_bawq, "iwmba",
7702 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7703 		panic("%s: could not create workqueue: blockack",
7704 		    device_xname(self));
7705 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7706 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7707 		panic("%s: could not create workqueue: htprot",
7708 		    device_xname(self));
7709 #endif
7710 
7711 	if (pmf_device_register(self, NULL, NULL))
7712 		pmf_class_network_register(self, ifp);
7713 	else
7714 		aprint_error_dev(self, "couldn't establish power handler\n");
7715 
7716 	/*
7717 	 * We can't do normal attach before the file system is mounted
7718 	 * because we cannot read the MAC address without loading the
7719 	 * firmware from disk.  So we postpone until mountroot is done.
7720 	 * Notably, this will require a full driver unload/load cycle
7721 	 * (or reboot) in case the firmware is not present when the
7722 	 * hook runs.
7723 	 */
7724 	config_mountroot(self, iwm_attach_hook);
7725 
7726 	return;
7727 
7728 fail4:	while (--txq_i >= 0)
7729 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7730 	iwm_free_rx_ring(sc, &sc->rxq);
7731 	iwm_dma_contig_free(&sc->sched_dma);
7732 fail3:	if (sc->ict_dma.vaddr != NULL)
7733 		iwm_dma_contig_free(&sc->ict_dma);
7734 fail2:	iwm_dma_contig_free(&sc->kw_dma);
7735 fail1:	iwm_dma_contig_free(&sc->fw_dma);
7736 }
7737 
7738 void
7739 iwm_radiotap_attach(struct iwm_softc *sc)
7740 {
7741 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7742 
7743 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7744 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7745 	    &sc->sc_drvbpf);
7746 
7747 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7748 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7749 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7750 
7751 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
7752 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7753 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7754 }
7755 
7756 #if 0
7757 static void
7758 iwm_init_task(void *arg)
7759 {
7760 	struct iwm_softc *sc = arg;
7761 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7762 	int s;
7763 
7764 	rw_enter_write(&sc->ioctl_rwl);
7765 	s = splnet();
7766 
7767 	iwm_stop(ifp, 0);
7768 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7769 		iwm_init(ifp);
7770 
7771 	splx(s);
7772 	rw_exit(&sc->ioctl_rwl);
7773 }
7774 
7775 static void
7776 iwm_wakeup(struct iwm_softc *sc)
7777 {
7778 	pcireg_t reg;
7779 
7780 	/* Clear device-specific "PCI retry timeout" register (41h). */
7781 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7782 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7783 
7784 	iwm_init_task(sc);
7785 }
7786 
7787 static int
7788 iwm_activate(device_t self, enum devact act)
7789 {
7790 	struct iwm_softc *sc = device_private(self);
7791 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7792 
7793 	switch (act) {
7794 	case DVACT_DEACTIVATE:
7795 		if (ifp->if_flags & IFF_RUNNING)
7796 			iwm_stop(ifp, 0);
7797 		return 0;
7798 	default:
7799 		return EOPNOTSUPP;
7800 	}
7801 }
7802 #endif
7803 
7804 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7805 	NULL, NULL);
7806 
7807 static int
7808 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7809 {
7810 	struct sysctlnode node;
7811 	struct iwm_softc *sc;
7812 	int err, t;
7813 
7814 	node = *rnode;
7815 	sc = node.sysctl_data;
7816 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7817 	node.sysctl_data = &t;
7818 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
7819 	if (err || newp == NULL)
7820 		return err;
7821 
7822 	if (t == 0)
7823 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7824 	return 0;
7825 }
7826 
7827 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7828 {
7829 	const struct sysctlnode *rnode;
7830 #ifdef IWM_DEBUG
7831 	const struct sysctlnode *cnode;
7832 #endif /* IWM_DEBUG */
7833 	int rc;
7834 
7835 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7836 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7837 	    SYSCTL_DESCR("iwm global controls"),
7838 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7839 		goto err;
7840 
7841 	iwm_sysctl_root_num = rnode->sysctl_num;
7842 
7843 #ifdef IWM_DEBUG
7844 	/* control debugging printfs */
7845 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7846 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7847 	    "debug", SYSCTL_DESCR("Enable debugging output"),
7848 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7849 		goto err;
7850 #endif /* IWM_DEBUG */
7851 
7852 	return;
7853 
7854  err:
7855 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7856 }
7857