xref: /dflybsd-src/sys/dev/netif/iwm/if_iwm.c (revision 32cb82727ec681e604bb5631909bd642cd14fe5a)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154 
155 #include <machine/endian.h>
156 
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159 
160 #include <net/bpf.h>
161 
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168 
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173 
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178 
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 
193 const uint8_t iwm_nvm_channels[] = {
194 	/* 2.4 GHz */
195 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196 	/* 5 GHz */
197 	36, 40, 44, 48, 52, 56, 60, 64,
198 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199 	149, 153, 157, 161, 165
200 };
201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
202     "IWM_NUM_CHANNELS is too small");
203 
204 const uint8_t iwm_nvm_channels_8000[] = {
205 	/* 2.4 GHz */
206 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207 	/* 5 GHz */
208 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
209 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
210 	149, 153, 157, 161, 165, 169, 173, 177, 181
211 };
212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
213     "IWM_NUM_CHANNELS_8000 is too small");
214 
215 #define IWM_NUM_2GHZ_CHANNELS	14
216 #define IWM_N_HW_ADDR_MASK	0xF
217 
218 /*
219  * XXX For now, there's simply a fixed set of rate table entries
220  * that are populated.
221  */
222 const struct iwm_rate {
223 	uint8_t rate;
224 	uint8_t plcp;
225 } iwm_rates[] = {
226 	{   2,	IWM_RATE_1M_PLCP  },
227 	{   4,	IWM_RATE_2M_PLCP  },
228 	{  11,	IWM_RATE_5M_PLCP  },
229 	{  22,	IWM_RATE_11M_PLCP },
230 	{  12,	IWM_RATE_6M_PLCP  },
231 	{  18,	IWM_RATE_9M_PLCP  },
232 	{  24,	IWM_RATE_12M_PLCP },
233 	{  36,	IWM_RATE_18M_PLCP },
234 	{  48,	IWM_RATE_24M_PLCP },
235 	{  72,	IWM_RATE_36M_PLCP },
236 	{  96,	IWM_RATE_48M_PLCP },
237 	{ 108,	IWM_RATE_54M_PLCP },
238 };
239 #define IWM_RIDX_CCK	0
240 #define IWM_RIDX_OFDM	4
241 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244 
245 struct iwm_nvm_section {
246 	uint16_t length;
247 	uint8_t *data;
248 };
249 
250 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
251 static int	iwm_firmware_store_section(struct iwm_softc *,
252                                            enum iwm_ucode_type,
253                                            const uint8_t *, size_t);
254 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
255 static void	iwm_fw_info_free(struct iwm_fw_info *);
256 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 #if !defined(__DragonFly__)
258 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
259 #endif
260 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
261                                      bus_size_t, bus_size_t);
262 static void	iwm_dma_contig_free(struct iwm_dma_info *);
263 static int	iwm_alloc_fwmem(struct iwm_softc *);
264 static void	iwm_free_fwmem(struct iwm_softc *);
265 static int	iwm_alloc_sched(struct iwm_softc *);
266 static void	iwm_free_sched(struct iwm_softc *);
267 static int	iwm_alloc_kw(struct iwm_softc *);
268 static void	iwm_free_kw(struct iwm_softc *);
269 static int	iwm_alloc_ict(struct iwm_softc *);
270 static void	iwm_free_ict(struct iwm_softc *);
271 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void	iwm_disable_rx_dma(struct iwm_softc *);
273 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
276                                   int);
277 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void	iwm_enable_interrupts(struct iwm_softc *);
280 static void	iwm_restore_interrupts(struct iwm_softc *);
281 static void	iwm_disable_interrupts(struct iwm_softc *);
282 static void	iwm_ict_reset(struct iwm_softc *);
283 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
284 static void	iwm_stop_device(struct iwm_softc *);
285 static void	iwm_mvm_nic_config(struct iwm_softc *);
286 static int	iwm_nic_rx_init(struct iwm_softc *);
287 static int	iwm_nic_tx_init(struct iwm_softc *);
288 static int	iwm_nic_init(struct iwm_softc *);
289 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
290 static int	iwm_post_alive(struct iwm_softc *);
291 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 				     uint16_t *, size_t);
295 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
296 static void	iwm_add_channel_band(struct iwm_softc *,
297 		    struct ieee80211_channel[], int, int *, int, size_t,
298 		    const uint8_t[]);
299 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
300 		    struct ieee80211_channel[]);
301 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302 				   const uint16_t *, const uint16_t *,
303 				   const uint16_t *, const uint16_t *,
304 				   const uint16_t *);
305 static void	iwm_set_hw_address_8000(struct iwm_softc *,
306 					struct iwm_nvm_data *,
307 					const uint16_t *, const uint16_t *);
308 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
309 			    const uint16_t *);
310 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
311 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
312 				  const uint16_t *);
313 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
314 				   const const uint16_t *);
315 static void	iwm_set_radio_cfg(const struct iwm_softc *,
316 				  struct iwm_nvm_data *, uint32_t);
317 static int	iwm_parse_nvm_sections(struct iwm_softc *,
318                                        struct iwm_nvm_section *);
319 static int	iwm_nvm_init(struct iwm_softc *);
320 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
321                                        const uint8_t *, uint32_t);
322 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
323                                         const uint8_t *, uint32_t);
324 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
325 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
326 					   struct iwm_fw_sects *, int , int *);
327 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
328 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
329 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
330 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
331 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
332 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
333                                               enum iwm_ucode_type);
334 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
335 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
336 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
337 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
338 					    struct iwm_rx_phy_info *);
339 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
340                                       struct iwm_rx_packet *,
341                                       struct iwm_rx_data *);
342 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
343 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
344                                    struct iwm_rx_data *);
345 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
346                                          struct iwm_rx_packet *,
347 				         struct iwm_node *);
348 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
349                                   struct iwm_rx_data *);
350 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
351 #if 0
352 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
353                                  uint16_t);
354 #endif
355 static const struct iwm_rate *
356 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
357 			struct ieee80211_frame *, struct iwm_tx_cmd *);
358 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
359                        struct ieee80211_node *, int);
360 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
361 			     const struct ieee80211_bpf_params *);
362 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
363 					        struct iwm_mvm_add_sta_cmd_v7 *,
364                                                 int *);
365 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
366                                        int);
367 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
368 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
369 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
370                                            struct iwm_int_sta *,
371 				           const uint8_t *, uint16_t, uint16_t);
372 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
373 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
374 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
375 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
376 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
377 static struct ieee80211_node *
378 		iwm_node_alloc(struct ieee80211vap *,
379 		               const uint8_t[IEEE80211_ADDR_LEN]);
380 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
381 static int	iwm_media_change(struct ifnet *);
382 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
383 static void	iwm_endscan_cb(void *, int);
384 static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
385 					struct iwm_sf_cfg_cmd *,
386 					struct ieee80211_node *);
387 static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
388 static int	iwm_send_bt_init_conf(struct iwm_softc *);
389 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
390 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
391 static int	iwm_init_hw(struct iwm_softc *);
392 static void	iwm_init(struct iwm_softc *);
393 static void	iwm_start(struct iwm_softc *);
394 static void	iwm_stop(struct iwm_softc *);
395 static void	iwm_watchdog(void *);
396 static void	iwm_parent(struct ieee80211com *);
397 #ifdef IWM_DEBUG
398 static const char *
399 		iwm_desc_lookup(uint32_t);
400 static void	iwm_nic_error(struct iwm_softc *);
401 static void	iwm_nic_umac_error(struct iwm_softc *);
402 #endif
403 static void	iwm_notif_intr(struct iwm_softc *);
404 static void	iwm_intr(void *);
405 static int	iwm_attach(device_t);
406 static int	iwm_is_valid_ether_addr(uint8_t *);
407 static void	iwm_preinit(void *);
408 static int	iwm_detach_local(struct iwm_softc *sc, int);
409 static void	iwm_init_task(void *);
410 static void	iwm_radiotap_attach(struct iwm_softc *);
411 static struct ieee80211vap *
412 		iwm_vap_create(struct ieee80211com *,
413 		               const char [IFNAMSIZ], int,
414 		               enum ieee80211_opmode, int,
415 		               const uint8_t [IEEE80211_ADDR_LEN],
416 		               const uint8_t [IEEE80211_ADDR_LEN]);
417 static void	iwm_vap_delete(struct ieee80211vap *);
418 static void	iwm_scan_start(struct ieee80211com *);
419 static void	iwm_scan_end(struct ieee80211com *);
420 static void	iwm_update_mcast(struct ieee80211com *);
421 static void	iwm_set_channel(struct ieee80211com *);
422 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
423 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
424 static int	iwm_detach(device_t);
425 
426 #if defined(__DragonFly__)
427 static int	iwm_msi_enable = 1;
428 
429 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
430 
431 /*
432  * This is a hack due to the wlan_serializer deadlocking sleepers.
433  */
434 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
435 
436 int
437 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
438 {
439 	int error;
440 
441 	if (wlan_is_serialized()) {
442 		wlan_serialize_exit();
443 		kprintf("%s: have to release serializer for sleeping\n",
444 		    __func__);
445 		error = lksleep(chan, lk, flags, wmesg, to);
446 		lockmgr(lk, LK_RELEASE);
447 		wlan_serialize_enter();
448 		lockmgr(lk, LK_EXCLUSIVE);
449 	} else {
450 		error = lksleep(chan, lk, flags, wmesg, to);
451 	}
452 	return error;
453 }
454 
455 #endif
456 
457 /*
458  * Firmware parser.
459  */
460 
461 static int
462 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
463 {
464 	const struct iwm_fw_cscheme_list *l = (const void *)data;
465 
466 	if (dlen < sizeof(*l) ||
467 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
468 		return EINVAL;
469 
470 	/* we don't actually store anything for now, always use s/w crypto */
471 
472 	return 0;
473 }
474 
475 static int
476 iwm_firmware_store_section(struct iwm_softc *sc,
477     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
478 {
479 	struct iwm_fw_sects *fws;
480 	struct iwm_fw_onesect *fwone;
481 
482 	if (type >= IWM_UCODE_TYPE_MAX)
483 		return EINVAL;
484 	if (dlen < sizeof(uint32_t))
485 		return EINVAL;
486 
487 	fws = &sc->sc_fw.fw_sects[type];
488 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
489 		return EINVAL;
490 
491 	fwone = &fws->fw_sect[fws->fw_count];
492 
493 	/* first 32bit are device load offset */
494 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
495 
496 	/* rest is data */
497 	fwone->fws_data = data + sizeof(uint32_t);
498 	fwone->fws_len = dlen - sizeof(uint32_t);
499 
500 	fws->fw_count++;
501 	fws->fw_totlen += fwone->fws_len;
502 
503 	return 0;
504 }
505 
506 struct iwm_tlv_calib_data {
507 	uint32_t ucode_type;
508 	struct iwm_tlv_calib_ctrl calib;
509 } __packed;
510 
511 static int
512 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
513 {
514 	const struct iwm_tlv_calib_data *def_calib = data;
515 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
516 
517 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
518 		device_printf(sc->sc_dev,
519 		    "Wrong ucode_type %u for default "
520 		    "calibration.\n", ucode_type);
521 		return EINVAL;
522 	}
523 
524 	sc->sc_default_calib[ucode_type].flow_trigger =
525 	    def_calib->calib.flow_trigger;
526 	sc->sc_default_calib[ucode_type].event_trigger =
527 	    def_calib->calib.event_trigger;
528 
529 	return 0;
530 }
531 
532 static void
533 iwm_fw_info_free(struct iwm_fw_info *fw)
534 {
535 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
536 	fw->fw_fp = NULL;
537 	/* don't touch fw->fw_status */
538 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
539 }
540 
541 static int
542 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
543 {
544 	struct iwm_fw_info *fw = &sc->sc_fw;
545 	const struct iwm_tlv_ucode_header *uhdr;
546 	struct iwm_ucode_tlv tlv;
547 	enum iwm_ucode_tlv_type tlv_type;
548 	const struct firmware *fwp;
549 	const uint8_t *data;
550 	int error = 0;
551 	size_t len;
552 
553 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
554 	    ucode_type != IWM_UCODE_TYPE_INIT)
555 		return 0;
556 
557 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
558 #if defined(__DragonFly__)
559 		iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
560 #else
561 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
562 #endif
563 	}
564 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
565 
566 	if (fw->fw_fp != NULL)
567 		iwm_fw_info_free(fw);
568 
569 	/*
570 	 * Load firmware into driver memory.
571 	 * fw_fp will be set.
572 	 */
573 	IWM_UNLOCK(sc);
574 	fwp = firmware_get(sc->sc_fwname);
575 	IWM_LOCK(sc);
576 	if (fwp == NULL) {
577 		device_printf(sc->sc_dev,
578 		    "could not read firmware %s (error %d)\n",
579 		    sc->sc_fwname, error);
580 		goto out;
581 	}
582 	fw->fw_fp = fwp;
583 
584 	/* (Re-)Initialize default values. */
585 	sc->sc_capaflags = 0;
586 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
587 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
588 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
589 
590 	/*
591 	 * Parse firmware contents
592 	 */
593 
594 	uhdr = (const void *)fw->fw_fp->data;
595 	if (*(const uint32_t *)fw->fw_fp->data != 0
596 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
597 		device_printf(sc->sc_dev, "invalid firmware %s\n",
598 		    sc->sc_fwname);
599 		error = EINVAL;
600 		goto out;
601 	}
602 
603 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
604 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
605 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
606 	    IWM_UCODE_API(le32toh(uhdr->ver)));
607 	data = uhdr->data;
608 	len = fw->fw_fp->datasize - sizeof(*uhdr);
609 
610 	while (len >= sizeof(tlv)) {
611 		size_t tlv_len;
612 		const void *tlv_data;
613 
614 		memcpy(&tlv, data, sizeof(tlv));
615 		tlv_len = le32toh(tlv.length);
616 		tlv_type = le32toh(tlv.type);
617 
618 		len -= sizeof(tlv);
619 		data += sizeof(tlv);
620 		tlv_data = data;
621 
622 		if (len < tlv_len) {
623 			device_printf(sc->sc_dev,
624 			    "firmware too short: %zu bytes\n",
625 			    len);
626 			error = EINVAL;
627 			goto parse_out;
628 		}
629 
630 		switch ((int)tlv_type) {
631 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
632 			if (tlv_len < sizeof(uint32_t)) {
633 				device_printf(sc->sc_dev,
634 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
635 				    __func__,
636 				    (int) tlv_len);
637 				error = EINVAL;
638 				goto parse_out;
639 			}
640 			sc->sc_capa_max_probe_len
641 			    = le32toh(*(const uint32_t *)tlv_data);
642 			/* limit it to something sensible */
643 			if (sc->sc_capa_max_probe_len >
644 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
645 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
646 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
647 				    "ridiculous\n", __func__);
648 				error = EINVAL;
649 				goto parse_out;
650 			}
651 			break;
652 		case IWM_UCODE_TLV_PAN:
653 			if (tlv_len) {
654 				device_printf(sc->sc_dev,
655 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
656 				    __func__,
657 				    (int) tlv_len);
658 				error = EINVAL;
659 				goto parse_out;
660 			}
661 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
662 			break;
663 		case IWM_UCODE_TLV_FLAGS:
664 			if (tlv_len < sizeof(uint32_t)) {
665 				device_printf(sc->sc_dev,
666 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
667 				    __func__,
668 				    (int) tlv_len);
669 				error = EINVAL;
670 				goto parse_out;
671 			}
672 			/*
673 			 * Apparently there can be many flags, but Linux driver
674 			 * parses only the first one, and so do we.
675 			 *
676 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
677 			 * Intentional or a bug?  Observations from
678 			 * current firmware file:
679 			 *  1) TLV_PAN is parsed first
680 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
681 			 * ==> this resets TLV_PAN to itself... hnnnk
682 			 */
683 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
684 			break;
685 		case IWM_UCODE_TLV_CSCHEME:
686 			if ((error = iwm_store_cscheme(sc,
687 			    tlv_data, tlv_len)) != 0) {
688 				device_printf(sc->sc_dev,
689 				    "%s: iwm_store_cscheme(): returned %d\n",
690 				    __func__,
691 				    error);
692 				goto parse_out;
693 			}
694 			break;
695 		case IWM_UCODE_TLV_NUM_OF_CPU: {
696 			uint32_t num_cpu;
697 			if (tlv_len != sizeof(uint32_t)) {
698 				device_printf(sc->sc_dev,
699 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
700 				    __func__,
701 				    (int) tlv_len);
702 				error = EINVAL;
703 				goto parse_out;
704 			}
705 			num_cpu = le32toh(*(const uint32_t *)tlv_data);
706 			if (num_cpu < 1 || num_cpu > 2) {
707 				device_printf(sc->sc_dev,
708 				    "%s: Driver supports only 1 or 2 CPUs\n",
709 				    __func__);
710 				error = EINVAL;
711 				goto parse_out;
712 			}
713 			break;
714 		}
715 		case IWM_UCODE_TLV_SEC_RT:
716 			if ((error = iwm_firmware_store_section(sc,
717 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
718 				device_printf(sc->sc_dev,
719 				    "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
720 				    __func__,
721 				    error);
722 				goto parse_out;
723 			}
724 			break;
725 		case IWM_UCODE_TLV_SEC_INIT:
726 			if ((error = iwm_firmware_store_section(sc,
727 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
728 				device_printf(sc->sc_dev,
729 				    "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
730 				    __func__,
731 				    error);
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_SEC_WOWLAN:
736 			if ((error = iwm_firmware_store_section(sc,
737 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
738 				device_printf(sc->sc_dev,
739 				    "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
740 				    __func__,
741 				    error);
742 				goto parse_out;
743 			}
744 			break;
745 		case IWM_UCODE_TLV_DEF_CALIB:
746 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
747 				device_printf(sc->sc_dev,
748 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
749 				    __func__,
750 				    (int) tlv_len,
751 				    (int) sizeof(struct iwm_tlv_calib_data));
752 				error = EINVAL;
753 				goto parse_out;
754 			}
755 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
756 				device_printf(sc->sc_dev,
757 				    "%s: iwm_set_default_calib() failed: %d\n",
758 				    __func__,
759 				    error);
760 				goto parse_out;
761 			}
762 			break;
763 		case IWM_UCODE_TLV_PHY_SKU:
764 			if (tlv_len != sizeof(uint32_t)) {
765 				error = EINVAL;
766 				device_printf(sc->sc_dev,
767 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
768 				    __func__,
769 				    (int) tlv_len);
770 				goto parse_out;
771 			}
772 			sc->sc_fw_phy_config =
773 			    le32toh(*(const uint32_t *)tlv_data);
774 			break;
775 
776 		case IWM_UCODE_TLV_API_CHANGES_SET: {
777 			const struct iwm_ucode_api *api;
778 			if (tlv_len != sizeof(*api)) {
779 				error = EINVAL;
780 				goto parse_out;
781 			}
782 			api = (const struct iwm_ucode_api *)tlv_data;
783 			/* Flags may exceed 32 bits in future firmware. */
784 			if (le32toh(api->api_index) > 0) {
785 				device_printf(sc->sc_dev,
786 				    "unsupported API index %d\n",
787 				    le32toh(api->api_index));
788 				goto parse_out;
789 			}
790 			sc->sc_ucode_api = le32toh(api->api_flags);
791 			break;
792 		}
793 
794 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
795 			const struct iwm_ucode_capa *capa;
796 			int idx, i;
797 			if (tlv_len != sizeof(*capa)) {
798 				error = EINVAL;
799 				goto parse_out;
800 			}
801 			capa = (const struct iwm_ucode_capa *)tlv_data;
802 			idx = le32toh(capa->api_index);
803 			if (idx > howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
804 				device_printf(sc->sc_dev,
805 				    "unsupported API index %d\n", idx);
806 				goto parse_out;
807 			}
808 			for (i = 0; i < 32; i++) {
809 				if ((le32toh(capa->api_capa) & (1U << i)) == 0)
810 					continue;
811 				setbit(sc->sc_enabled_capa, i + (32 * idx));
812 			}
813 			break;
814 		}
815 
816 		case 48: /* undocumented TLV */
817 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
818 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
819 			/* ignore, not used by current driver */
820 			break;
821 
822 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
823 			if ((error = iwm_firmware_store_section(sc,
824 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
825 			    tlv_len)) != 0)
826 				goto parse_out;
827 			break;
828 
829 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
830 			if (tlv_len != sizeof(uint32_t)) {
831 				error = EINVAL;
832 				goto parse_out;
833 			}
834 			sc->sc_capa_n_scan_channels =
835 			  le32toh(*(const uint32_t *)tlv_data);
836 			break;
837 
838 		case IWM_UCODE_TLV_FW_VERSION:
839 			if (tlv_len != sizeof(uint32_t) * 3) {
840 				error = EINVAL;
841 				goto parse_out;
842 			}
843 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
844 			    "%d.%d.%d",
845 			    le32toh(((const uint32_t *)tlv_data)[0]),
846 			    le32toh(((const uint32_t *)tlv_data)[1]),
847 			    le32toh(((const uint32_t *)tlv_data)[2]));
848 			break;
849 
850 		default:
851 			device_printf(sc->sc_dev,
852 			    "%s: unknown firmware section %d, abort\n",
853 			    __func__, tlv_type);
854 			error = EINVAL;
855 			goto parse_out;
856 		}
857 
858 		len -= roundup(tlv_len, 4);
859 		data += roundup(tlv_len, 4);
860 	}
861 
862 	KASSERT(error == 0, ("unhandled error"));
863 
864  parse_out:
865 	if (error) {
866 		device_printf(sc->sc_dev, "firmware parse error %d, "
867 		    "section type %d\n", error, tlv_type);
868 	}
869 
870 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
871 		device_printf(sc->sc_dev,
872 		    "device uses unsupported power ops\n");
873 		error = ENOTSUP;
874 	}
875 
876  out:
877 	if (error) {
878 		fw->fw_status = IWM_FW_STATUS_NONE;
879 		if (fw->fw_fp != NULL)
880 			iwm_fw_info_free(fw);
881 	} else
882 		fw->fw_status = IWM_FW_STATUS_DONE;
883 	wakeup(&sc->sc_fw);
884 
885 	return error;
886 }
887 
888 /*
889  * DMA resource routines
890  */
891 
892 #if !defined(__DragonFly__)
893 static void
894 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
895 {
896         if (error != 0)
897                 return;
898 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
899 	*(bus_addr_t *)arg = segs[0].ds_addr;
900 }
901 #endif
902 
903 static int
904 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
905     bus_size_t size, bus_size_t alignment)
906 {
907 	int error;
908 
909 	dma->tag = NULL;
910 	dma->map = NULL;
911 	dma->size = size;
912 	dma->vaddr = NULL;
913 
914 #if defined(__DragonFly__)
915 	bus_dmamem_t dmem;
916 	error = bus_dmamem_coherent(tag, alignment, 0,
917 				    BUS_SPACE_MAXADDR_32BIT,
918 				    BUS_SPACE_MAXADDR,
919 				    size, BUS_DMA_NOWAIT, &dmem);
920 	if (error != 0)
921 		goto fail;
922 
923 	dma->tag = dmem.dmem_tag;
924 	dma->map = dmem.dmem_map;
925 	dma->vaddr = dmem.dmem_addr;
926 	dma->paddr = dmem.dmem_busaddr;
927 #else
928 	error = bus_dma_tag_create(tag, alignment,
929             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
930             1, size, 0, NULL, NULL, &dma->tag);
931         if (error != 0)
932                 goto fail;
933 
934         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
935             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
936         if (error != 0)
937                 goto fail;
938 
939         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
940             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
941         if (error != 0) {
942 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
943 		dma->vaddr = NULL;
944 		goto fail;
945 	}
946 #endif
947 
948 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
949 
950 	return 0;
951 
952 fail:
953 	iwm_dma_contig_free(dma);
954 
955 	return error;
956 }
957 
958 static void
959 iwm_dma_contig_free(struct iwm_dma_info *dma)
960 {
961 	if (dma->vaddr != NULL) {
962 		bus_dmamap_sync(dma->tag, dma->map,
963 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
964 		bus_dmamap_unload(dma->tag, dma->map);
965 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
966 		dma->vaddr = NULL;
967 	}
968 	if (dma->tag != NULL) {
969 		bus_dma_tag_destroy(dma->tag);
970 		dma->tag = NULL;
971 	}
972 }
973 
974 /* fwmem is used to load firmware onto the card */
975 static int
976 iwm_alloc_fwmem(struct iwm_softc *sc)
977 {
978 	/* Must be aligned on a 16-byte boundary. */
979 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
980 	    sc->sc_fwdmasegsz, 16);
981 }
982 
983 static void
984 iwm_free_fwmem(struct iwm_softc *sc)
985 {
986 	iwm_dma_contig_free(&sc->fw_dma);
987 }
988 
989 /* tx scheduler rings.  not used? */
990 static int
991 iwm_alloc_sched(struct iwm_softc *sc)
992 {
993 	int rv;
994 
995 	/* TX scheduler rings must be aligned on a 1KB boundary. */
996 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
997 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
998 	return rv;
999 }
1000 
1001 static void
1002 iwm_free_sched(struct iwm_softc *sc)
1003 {
1004 	iwm_dma_contig_free(&sc->sched_dma);
1005 }
1006 
1007 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1008 static int
1009 iwm_alloc_kw(struct iwm_softc *sc)
1010 {
1011 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1012 }
1013 
1014 static void
1015 iwm_free_kw(struct iwm_softc *sc)
1016 {
1017 	iwm_dma_contig_free(&sc->kw_dma);
1018 }
1019 
1020 /* interrupt cause table */
1021 static int
1022 iwm_alloc_ict(struct iwm_softc *sc)
1023 {
1024 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1025 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1026 }
1027 
1028 static void
1029 iwm_free_ict(struct iwm_softc *sc)
1030 {
1031 	iwm_dma_contig_free(&sc->ict_dma);
1032 }
1033 
1034 static int
1035 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036 {
1037 	bus_size_t size;
1038 	int i, error;
1039 
1040 	ring->cur = 0;
1041 
1042 	/* Allocate RX descriptors (256-byte aligned). */
1043 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1044 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1045 	if (error != 0) {
1046 		device_printf(sc->sc_dev,
1047 		    "could not allocate RX ring DMA memory\n");
1048 		goto fail;
1049 	}
1050 	ring->desc = ring->desc_dma.vaddr;
1051 
1052 	/* Allocate RX status area (16-byte aligned). */
1053 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1054 	    sizeof(*ring->stat), 16);
1055 	if (error != 0) {
1056 		device_printf(sc->sc_dev,
1057 		    "could not allocate RX status DMA memory\n");
1058 		goto fail;
1059 	}
1060 	ring->stat = ring->stat_dma.vaddr;
1061 
1062         /* Create RX buffer DMA tag. */
1063 #if defined(__DragonFly__)
1064         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1065 				   0,
1066 				   BUS_SPACE_MAXADDR_32BIT,
1067 				   BUS_SPACE_MAXADDR,
1068 				   NULL, NULL,
1069 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1070 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1071 #else
1072         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1073             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1074             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1075 #endif
1076         if (error != 0) {
1077                 device_printf(sc->sc_dev,
1078                     "%s: could not create RX buf DMA tag, error %d\n",
1079                     __func__, error);
1080                 goto fail;
1081         }
1082 
1083 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1084 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1085 	if (error != 0) {
1086 		device_printf(sc->sc_dev,
1087 		    "%s: could not create RX buf DMA map, error %d\n",
1088 		    __func__, error);
1089 		goto fail;
1090 	}
1091 	/*
1092 	 * Allocate and map RX buffers.
1093 	 */
1094 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1095 		struct iwm_rx_data *data = &ring->data[i];
1096 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1097 		if (error != 0) {
1098 			device_printf(sc->sc_dev,
1099 			    "%s: could not create RX buf DMA map, error %d\n",
1100 			    __func__, error);
1101 			goto fail;
1102 		}
1103 		data->m = NULL;
1104 
1105 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1106 			goto fail;
1107 		}
1108 	}
1109 	return 0;
1110 
1111 fail:	iwm_free_rx_ring(sc, ring);
1112 	return error;
1113 }
1114 
1115 static void
1116 iwm_disable_rx_dma(struct iwm_softc *sc)
1117 {
1118 	/* XXX conditional nic locks are stupid */
1119 	/* XXX print out if we can't lock the NIC? */
1120 	if (iwm_nic_lock(sc)) {
1121 		/* XXX handle if RX stop doesn't finish? */
1122 		(void) iwm_pcie_rx_stop(sc);
1123 		iwm_nic_unlock(sc);
1124 	}
1125 }
1126 
1127 static void
1128 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1129 {
1130 	/* Reset the ring state */
1131 	ring->cur = 0;
1132 
1133 	/*
1134 	 * The hw rx ring index in shared memory must also be cleared,
1135 	 * otherwise the discrepancy can cause reprocessing chaos.
1136 	 */
1137 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1138 }
1139 
1140 static void
1141 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1142 {
1143 	int i;
1144 
1145 	iwm_dma_contig_free(&ring->desc_dma);
1146 	iwm_dma_contig_free(&ring->stat_dma);
1147 
1148 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1149 		struct iwm_rx_data *data = &ring->data[i];
1150 
1151 		if (data->m != NULL) {
1152 			bus_dmamap_sync(ring->data_dmat, data->map,
1153 			    BUS_DMASYNC_POSTREAD);
1154 			bus_dmamap_unload(ring->data_dmat, data->map);
1155 			m_freem(data->m);
1156 			data->m = NULL;
1157 		}
1158 		if (data->map != NULL) {
1159 			bus_dmamap_destroy(ring->data_dmat, data->map);
1160 			data->map = NULL;
1161 		}
1162 	}
1163 	if (ring->spare_map != NULL) {
1164 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1165 		ring->spare_map = NULL;
1166 	}
1167 	if (ring->data_dmat != NULL) {
1168 		bus_dma_tag_destroy(ring->data_dmat);
1169 		ring->data_dmat = NULL;
1170 	}
1171 }
1172 
1173 static int
1174 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1175 {
1176 	bus_addr_t paddr;
1177 	bus_size_t size;
1178 	size_t maxsize;
1179 	int nsegments;
1180 	int i, error;
1181 
1182 	ring->qid = qid;
1183 	ring->queued = 0;
1184 	ring->cur = 0;
1185 
1186 	/* Allocate TX descriptors (256-byte aligned). */
1187 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1188 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1189 	if (error != 0) {
1190 		device_printf(sc->sc_dev,
1191 		    "could not allocate TX ring DMA memory\n");
1192 		goto fail;
1193 	}
1194 	ring->desc = ring->desc_dma.vaddr;
1195 
1196 	/*
1197 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1198 	 * to allocate commands space for other rings.
1199 	 */
1200 	if (qid > IWM_MVM_CMD_QUEUE)
1201 		return 0;
1202 
1203 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1204 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1205 	if (error != 0) {
1206 		device_printf(sc->sc_dev,
1207 		    "could not allocate TX cmd DMA memory\n");
1208 		goto fail;
1209 	}
1210 	ring->cmd = ring->cmd_dma.vaddr;
1211 
1212 	/* FW commands may require more mapped space than packets. */
1213 	if (qid == IWM_MVM_CMD_QUEUE) {
1214 		maxsize = IWM_RBUF_SIZE;
1215 		nsegments = 1;
1216 	} else {
1217 		maxsize = MCLBYTES;
1218 		nsegments = IWM_MAX_SCATTER - 2;
1219 	}
1220 
1221 #if defined(__DragonFly__)
1222 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1223 				   0,
1224 				   BUS_SPACE_MAXADDR_32BIT,
1225 				   BUS_SPACE_MAXADDR,
1226 				   NULL, NULL,
1227 				   maxsize, nsegments, maxsize,
1228 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1229 #else
1230 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1231 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1232             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1233 #endif
1234 	if (error != 0) {
1235 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1236 		goto fail;
1237 	}
1238 
1239 	paddr = ring->cmd_dma.paddr;
1240 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241 		struct iwm_tx_data *data = &ring->data[i];
1242 
1243 		data->cmd_paddr = paddr;
1244 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1245 		    + offsetof(struct iwm_tx_cmd, scratch);
1246 		paddr += sizeof(struct iwm_device_cmd);
1247 
1248 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1249 		if (error != 0) {
1250 			device_printf(sc->sc_dev,
1251 			    "could not create TX buf DMA map\n");
1252 			goto fail;
1253 		}
1254 	}
1255 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1256 	    ("invalid physical address"));
1257 	return 0;
1258 
1259 fail:	iwm_free_tx_ring(sc, ring);
1260 	return error;
1261 }
1262 
1263 static void
1264 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1265 {
1266 	int i;
1267 
1268 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1269 		struct iwm_tx_data *data = &ring->data[i];
1270 
1271 		if (data->m != NULL) {
1272 			bus_dmamap_sync(ring->data_dmat, data->map,
1273 			    BUS_DMASYNC_POSTWRITE);
1274 			bus_dmamap_unload(ring->data_dmat, data->map);
1275 			m_freem(data->m);
1276 			data->m = NULL;
1277 		}
1278 	}
1279 	/* Clear TX descriptors. */
1280 	memset(ring->desc, 0, ring->desc_dma.size);
1281 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1282 	    BUS_DMASYNC_PREWRITE);
1283 	sc->qfullmsk &= ~(1 << ring->qid);
1284 	ring->queued = 0;
1285 	ring->cur = 0;
1286 }
1287 
1288 static void
1289 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1290 {
1291 	int i;
1292 
1293 	iwm_dma_contig_free(&ring->desc_dma);
1294 	iwm_dma_contig_free(&ring->cmd_dma);
1295 
1296 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1297 		struct iwm_tx_data *data = &ring->data[i];
1298 
1299 		if (data->m != NULL) {
1300 			bus_dmamap_sync(ring->data_dmat, data->map,
1301 			    BUS_DMASYNC_POSTWRITE);
1302 			bus_dmamap_unload(ring->data_dmat, data->map);
1303 			m_freem(data->m);
1304 			data->m = NULL;
1305 		}
1306 		if (data->map != NULL) {
1307 			bus_dmamap_destroy(ring->data_dmat, data->map);
1308 			data->map = NULL;
1309 		}
1310 	}
1311 	if (ring->data_dmat != NULL) {
1312 		bus_dma_tag_destroy(ring->data_dmat);
1313 		ring->data_dmat = NULL;
1314 	}
1315 }
1316 
1317 /*
1318  * High-level hardware frobbing routines
1319  */
1320 
1321 static void
1322 iwm_enable_interrupts(struct iwm_softc *sc)
1323 {
1324 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1325 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1326 }
1327 
1328 static void
1329 iwm_restore_interrupts(struct iwm_softc *sc)
1330 {
1331 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1332 }
1333 
1334 static void
1335 iwm_disable_interrupts(struct iwm_softc *sc)
1336 {
1337 	/* disable interrupts */
1338 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1339 
1340 	/* acknowledge all interrupts */
1341 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1342 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1343 }
1344 
1345 static void
1346 iwm_ict_reset(struct iwm_softc *sc)
1347 {
1348 	iwm_disable_interrupts(sc);
1349 
1350 	/* Reset ICT table. */
1351 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1352 	sc->ict_cur = 0;
1353 
1354 	/* Set physical address of ICT table (4KB aligned). */
1355 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1356 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1357 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1358 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1359 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1360 
1361 	/* Switch to ICT interrupt mode in driver. */
1362 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1363 
1364 	/* Re-enable interrupts. */
1365 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1366 	iwm_enable_interrupts(sc);
1367 }
1368 
1369 /*
1370  * Since this .. hard-resets things, it's time to actually
1371  * mark the first vap (if any) as having no mac context.
1372  * It's annoying, but since the driver is potentially being
1373  * stop/start'ed whilst active (thanks openbsd port!) we
1374  * have to correctly track this.
1375  */
1376 static void
1377 iwm_stop_device(struct iwm_softc *sc)
1378 {
1379 	struct ieee80211com *ic = &sc->sc_ic;
1380 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1381 	int chnl, ntries;
1382 	int qid;
1383 
1384 	/* tell the device to stop sending interrupts */
1385 	iwm_disable_interrupts(sc);
1386 
1387 	/*
1388 	 * FreeBSD-local: mark the first vap as not-uploaded,
1389 	 * so the next transition through auth/assoc
1390 	 * will correctly populate the MAC context.
1391 	 */
1392 	if (vap) {
1393 		struct iwm_vap *iv = IWM_VAP(vap);
1394 		iv->is_uploaded = 0;
1395 	}
1396 
1397 	/* device going down, Stop using ICT table */
1398 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1399 
1400 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1401 
1402 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1403 
1404 	/* Stop all DMA channels. */
1405 	if (iwm_nic_lock(sc)) {
1406 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1407 			IWM_WRITE(sc,
1408 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1409 			for (ntries = 0; ntries < 200; ntries++) {
1410 				uint32_t r;
1411 
1412 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1413 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1414 				    chnl))
1415 					break;
1416 				DELAY(20);
1417 			}
1418 		}
1419 		iwm_nic_unlock(sc);
1420 	}
1421 	iwm_disable_rx_dma(sc);
1422 
1423 	/* Stop RX ring. */
1424 	iwm_reset_rx_ring(sc, &sc->rxq);
1425 
1426 	/* Reset all TX rings. */
1427 	for (qid = 0; qid < nitems(sc->txq); qid++)
1428 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1429 
1430 	/*
1431 	 * Power-down device's busmaster DMA clocks
1432 	 */
1433 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1434 	DELAY(5);
1435 
1436 	/* Make sure (redundant) we've released our request to stay awake */
1437 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1438 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1439 
1440 	/* Stop the device, and put it in low power state */
1441 	iwm_apm_stop(sc);
1442 
1443 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1444 	 * Clean again the interrupt here
1445 	 */
1446 	iwm_disable_interrupts(sc);
1447 	/* stop and reset the on-board processor */
1448 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1449 
1450 	/*
1451 	 * Even if we stop the HW, we still want the RF kill
1452 	 * interrupt
1453 	 */
1454 	iwm_enable_rfkill_int(sc);
1455 	iwm_check_rfkill(sc);
1456 }
1457 
1458 static void
1459 iwm_mvm_nic_config(struct iwm_softc *sc)
1460 {
1461 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1462 	uint32_t reg_val = 0;
1463 
1464 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1465 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1466 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1467 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1468 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1469 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1470 
1471 	/* SKU control */
1472 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1473 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1474 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1475 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1476 
1477 	/* radio configuration */
1478 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1479 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1480 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1481 
1482 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1483 
1484 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1485 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1486 	    radio_cfg_step, radio_cfg_dash);
1487 
1488 	/*
1489 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1490 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1491 	 * to lose ownership and not being able to obtain it back.
1492 	 */
1493 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1494 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1495 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1496 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1497 	}
1498 }
1499 
1500 static int
1501 iwm_nic_rx_init(struct iwm_softc *sc)
1502 {
1503 	if (!iwm_nic_lock(sc))
1504 		return EBUSY;
1505 
1506 	/*
1507 	 * Initialize RX ring.  This is from the iwn driver.
1508 	 */
1509 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1510 
1511 	/* stop DMA */
1512 	iwm_disable_rx_dma(sc);
1513 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1514 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1515 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1516 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1517 
1518 	/* Set physical address of RX ring (256-byte aligned). */
1519 	IWM_WRITE(sc,
1520 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1521 
1522 	/* Set physical address of RX status (16-byte aligned). */
1523 	IWM_WRITE(sc,
1524 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1525 
1526 #if defined(__DragonFly__)
1527 	/* Force serialization (probably not needed but don't trust the HW) */
1528 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1529 #endif
1530 
1531 	/* Enable RX. */
1532 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1533 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1534 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1535 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1536 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1537 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1538 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1539 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1540 
1541 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1542 
1543 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1544 	if (sc->host_interrupt_operation_mode)
1545 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1546 
1547 	/*
1548 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1549 	 *
1550 	 * This value should initially be 0 (before preparing any
1551 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1552 	 */
1553 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1554 
1555 	iwm_nic_unlock(sc);
1556 
1557 	return 0;
1558 }
1559 
1560 static int
1561 iwm_nic_tx_init(struct iwm_softc *sc)
1562 {
1563 	int qid;
1564 
1565 	if (!iwm_nic_lock(sc))
1566 		return EBUSY;
1567 
1568 	/* Deactivate TX scheduler. */
1569 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1570 
1571 	/* Set physical address of "keep warm" page (16-byte aligned). */
1572 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1573 
1574 	/* Initialize TX rings. */
1575 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1576 		struct iwm_tx_ring *txq = &sc->txq[qid];
1577 
1578 		/* Set physical address of TX ring (256-byte aligned). */
1579 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1580 		    txq->desc_dma.paddr >> 8);
1581 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1582 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1583 		    __func__,
1584 		    qid, txq->desc,
1585 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1586 	}
1587 
1588 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1589 
1590 	iwm_nic_unlock(sc);
1591 
1592 	return 0;
1593 }
1594 
1595 static int
1596 iwm_nic_init(struct iwm_softc *sc)
1597 {
1598 	int error;
1599 
1600 	iwm_apm_init(sc);
1601 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1602 		iwm_set_pwr(sc);
1603 
1604 	iwm_mvm_nic_config(sc);
1605 
1606 	if ((error = iwm_nic_rx_init(sc)) != 0)
1607 		return error;
1608 
1609 	/*
1610 	 * Ditto for TX, from iwn
1611 	 */
1612 	if ((error = iwm_nic_tx_init(sc)) != 0)
1613 		return error;
1614 
1615 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1616 	    "%s: shadow registers enabled\n", __func__);
1617 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1618 
1619 	return 0;
1620 }
1621 
1622 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1623 	IWM_MVM_TX_FIFO_VO,
1624 	IWM_MVM_TX_FIFO_VI,
1625 	IWM_MVM_TX_FIFO_BE,
1626 	IWM_MVM_TX_FIFO_BK,
1627 };
1628 
1629 static int
1630 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1631 {
1632 	if (!iwm_nic_lock(sc)) {
1633 		device_printf(sc->sc_dev,
1634 		    "%s: cannot enable txq %d\n",
1635 		    __func__,
1636 		    qid);
1637 		return EBUSY;
1638 	}
1639 
1640 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1641 
1642 	if (qid == IWM_MVM_CMD_QUEUE) {
1643 		/* unactivate before configuration */
1644 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1645 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1646 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1647 
1648 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1649 
1650 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1651 
1652 		iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1653 		/* Set scheduler window size and frame limit. */
1654 		iwm_write_mem32(sc,
1655 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1656 		    sizeof(uint32_t),
1657 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1658 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1659 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1660 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1661 
1662 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1663 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1664 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1665 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1666 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1667 	} else {
1668 		struct iwm_scd_txq_cfg_cmd cmd;
1669 		int error;
1670 
1671 		iwm_nic_unlock(sc);
1672 
1673 		memset(&cmd, 0, sizeof(cmd));
1674 		cmd.scd_queue = qid;
1675 		cmd.enable = 1;
1676 		cmd.sta_id = sta_id;
1677 		cmd.tx_fifo = fifo;
1678 		cmd.aggregate = 0;
1679 		cmd.window = IWM_FRAME_LIMIT;
1680 
1681 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1682 		    sizeof(cmd), &cmd);
1683 		if (error) {
1684 			device_printf(sc->sc_dev,
1685 			    "cannot enable txq %d\n", qid);
1686 			return error;
1687 		}
1688 
1689 		if (!iwm_nic_lock(sc))
1690 			return EBUSY;
1691 	}
1692 
1693 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1694 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1695 
1696 	iwm_nic_unlock(sc);
1697 
1698 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1699 	    __func__, qid, fifo);
1700 
1701 	return 0;
1702 }
1703 
1704 static int
1705 iwm_post_alive(struct iwm_softc *sc)
1706 {
1707 	int nwords;
1708 	int error, chnl;
1709 	uint32_t base;
1710 
1711 	if (!iwm_nic_lock(sc))
1712 		return EBUSY;
1713 
1714 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1715 	if (sc->sched_base != base) {
1716 		device_printf(sc->sc_dev,
1717 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1718 		    __func__, sc->sched_base, base);
1719 	}
1720 
1721 	iwm_ict_reset(sc);
1722 
1723 	/* Clear TX scheduler state in SRAM. */
1724 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1725 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1726 	    / sizeof(uint32_t);
1727 	error = iwm_write_mem(sc,
1728 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1729 	    NULL, nwords);
1730 	if (error)
1731 		goto out;
1732 
1733 	/* Set physical address of TX scheduler rings (1KB aligned). */
1734 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1735 
1736 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1737 
1738 	iwm_nic_unlock(sc);
1739 
1740 	/* enable command channel */
1741 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1742 	if (error)
1743 		return error;
1744 
1745 	if (!iwm_nic_lock(sc))
1746 		return EBUSY;
1747 
1748 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1749 
1750 	/* Enable DMA channels. */
1751 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1752 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1753 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1754 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1755 	}
1756 
1757 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1758 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1759 
1760 	/* Enable L1-Active */
1761 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1762 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1763 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1764 	}
1765 
1766  out:
1767 	iwm_nic_unlock(sc);
1768 	return error;
1769 }
1770 
1771 /*
1772  * NVM read access and content parsing.  We do not support
1773  * external NVM or writing NVM.
1774  * iwlwifi/mvm/nvm.c
1775  */
1776 
1777 /* list of NVM sections we are allowed/need to read */
1778 const int nvm_to_read[] = {
1779 	IWM_NVM_SECTION_TYPE_HW,
1780 	IWM_NVM_SECTION_TYPE_SW,
1781 	IWM_NVM_SECTION_TYPE_REGULATORY,
1782 	IWM_NVM_SECTION_TYPE_CALIBRATION,
1783 	IWM_NVM_SECTION_TYPE_PRODUCTION,
1784 	IWM_NVM_SECTION_TYPE_HW_8000,
1785 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1786 	IWM_NVM_SECTION_TYPE_PHY_SKU,
1787 };
1788 
1789 /* Default NVM size to read */
1790 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1791 #define IWM_MAX_NVM_SECTION_SIZE	8192
1792 
1793 #define IWM_NVM_WRITE_OPCODE 1
1794 #define IWM_NVM_READ_OPCODE 0
1795 
1796 /* load nvm chunk response */
1797 #define IWM_READ_NVM_CHUNK_SUCCEED		0
1798 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS	1
1799 
1800 static int
1801 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1802 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1803 {
1804 	offset = 0;
1805 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1806 		.offset = htole16(offset),
1807 		.length = htole16(length),
1808 		.type = htole16(section),
1809 		.op_code = IWM_NVM_READ_OPCODE,
1810 	};
1811 	struct iwm_nvm_access_resp *nvm_resp;
1812 	struct iwm_rx_packet *pkt;
1813 	struct iwm_host_cmd cmd = {
1814 		.id = IWM_NVM_ACCESS_CMD,
1815 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1816 		    IWM_CMD_SEND_IN_RFKILL,
1817 		.data = { &nvm_access_cmd, },
1818 	};
1819 	int ret, offset_read;
1820 	size_t bytes_read;
1821 	uint8_t *resp_data;
1822 
1823 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1824 
1825 	ret = iwm_send_cmd(sc, &cmd);
1826 	if (ret) {
1827 		device_printf(sc->sc_dev,
1828 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1829 		return ret;
1830 	}
1831 
1832 	pkt = cmd.resp_pkt;
1833 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1834 		device_printf(sc->sc_dev,
1835 		    "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1836 		    pkt->hdr.flags);
1837 		ret = EIO;
1838 		goto exit;
1839 	}
1840 
1841 	/* Extract NVM response */
1842 	nvm_resp = (void *)pkt->data;
1843 
1844 	ret = le16toh(nvm_resp->status);
1845 	bytes_read = le16toh(nvm_resp->length);
1846 	offset_read = le16toh(nvm_resp->offset);
1847 	resp_data = nvm_resp->data;
1848 	if (ret) {
1849 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1850 		    "NVM access command failed with status %d\n", ret);
1851 		ret = EINVAL;
1852 		goto exit;
1853 	}
1854 
1855 	if (offset_read != offset) {
1856 		device_printf(sc->sc_dev,
1857 		    "NVM ACCESS response with invalid offset %d\n",
1858 		    offset_read);
1859 		ret = EINVAL;
1860 		goto exit;
1861 	}
1862 
1863 	if (bytes_read > length) {
1864 		device_printf(sc->sc_dev,
1865 		    "NVM ACCESS response with too much data "
1866 		    "(%d bytes requested, %zd bytes received)\n",
1867 		    length, bytes_read);
1868 		ret = EINVAL;
1869 		goto exit;
1870 	}
1871 
1872 	memcpy(data + offset, resp_data, bytes_read);
1873 	*len = bytes_read;
1874 
1875  exit:
1876 	iwm_free_resp(sc, &cmd);
1877 	return ret;
1878 }
1879 
1880 /*
1881  * Reads an NVM section completely.
1882  * NICs prior to 7000 family don't have a real NVM, but just read
1883  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1884  * by uCode, we need to manually check in this case that we don't
1885  * overflow and try to read more than the EEPROM size.
1886  * For 7000 family NICs, we supply the maximal size we can read, and
1887  * the uCode fills the response with as much data as we can,
1888  * without overflowing, so no check is needed.
1889  */
1890 static int
1891 iwm_nvm_read_section(struct iwm_softc *sc,
1892 	uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1893 {
1894 	uint16_t chunklen, seglen;
1895 	int error = 0;
1896 
1897 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1898 	    "reading NVM section %d\n", section);
1899 
1900 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1901 	*len = 0;
1902 
1903 	/* Read NVM chunks until exhausted (reading less than requested) */
1904 	while (seglen == chunklen && *len < max_len) {
1905 		error = iwm_nvm_read_chunk(sc,
1906 		    section, *len, chunklen, data, &seglen);
1907 		if (error) {
1908 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1909 			    "Cannot read from NVM section "
1910 			    "%d at offset %d\n", section, *len);
1911 			return error;
1912 		}
1913 		*len += seglen;
1914 	}
1915 
1916 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1917 	    "NVM section %d read completed (%d bytes, error=%d)\n",
1918 	    section, *len, error);
1919 	return error;
1920 }
1921 
1922 /* NVM offsets (in words) definitions */
1923 enum iwm_nvm_offsets {
1924 	/* NVM HW-Section offset (in words) definitions */
1925 	IWM_HW_ADDR = 0x15,
1926 
1927 /* NVM SW-Section offset (in words) definitions */
1928 	IWM_NVM_SW_SECTION = 0x1C0,
1929 	IWM_NVM_VERSION = 0,
1930 	IWM_RADIO_CFG = 1,
1931 	IWM_SKU = 2,
1932 	IWM_N_HW_ADDRS = 3,
1933 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1934 
1935 /* NVM calibration section offset (in words) definitions */
1936 	IWM_NVM_CALIB_SECTION = 0x2B8,
1937 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1938 };
1939 
1940 enum iwm_8000_nvm_offsets {
1941 	/* NVM HW-Section offset (in words) definitions */
1942 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1943 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1944 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1945 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1946 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1947 
1948 	/* NVM SW-Section offset (in words) definitions */
1949 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1950 	IWM_NVM_VERSION_8000 = 0,
1951 	IWM_RADIO_CFG_8000 = 0,
1952 	IWM_SKU_8000 = 2,
1953 	IWM_N_HW_ADDRS_8000 = 3,
1954 
1955 	/* NVM REGULATORY -Section offset (in words) definitions */
1956 	IWM_NVM_CHANNELS_8000 = 0,
1957 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1958 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1959 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1960 
1961 	/* NVM calibration section offset (in words) definitions */
1962 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1963 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1964 };
1965 
1966 /* SKU Capabilities (actual values from NVM definition) */
1967 enum nvm_sku_bits {
1968 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1969 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1970 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1971 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1972 };
1973 
1974 /* radio config bits (actual values from NVM definition) */
1975 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1976 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1977 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1978 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1979 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1980 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1981 
1982 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1983 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1984 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1985 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1986 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1987 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1988 
1989 #define DEFAULT_MAX_TX_POWER 16
1990 
1991 /**
1992  * enum iwm_nvm_channel_flags - channel flags in NVM
1993  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1994  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1995  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1996  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1997  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1998  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1999  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2000  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2001  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2002  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2003  */
2004 enum iwm_nvm_channel_flags {
2005 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2006 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2007 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2008 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2009 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2010 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2011 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2012 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2013 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2014 };
2015 
2016 /*
2017  * Translate EEPROM flags to net80211.
2018  */
2019 static uint32_t
2020 iwm_eeprom_channel_flags(uint16_t ch_flags)
2021 {
2022 	uint32_t nflags;
2023 
2024 	nflags = 0;
2025 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2026 		nflags |= IEEE80211_CHAN_PASSIVE;
2027 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2028 		nflags |= IEEE80211_CHAN_NOADHOC;
2029 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2030 		nflags |= IEEE80211_CHAN_DFS;
2031 		/* Just in case. */
2032 		nflags |= IEEE80211_CHAN_NOADHOC;
2033 	}
2034 
2035 	return (nflags);
2036 }
2037 
2038 static void
2039 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2040     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2041     const uint8_t bands[])
2042 {
2043 	const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
2044 	uint32_t nflags;
2045 	uint16_t ch_flags;
2046 	uint8_t ieee;
2047 	int error;
2048 
2049 	for (; ch_idx < ch_num; ch_idx++) {
2050 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2051 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2052 			ieee = iwm_nvm_channels[ch_idx];
2053 		else
2054 			ieee = iwm_nvm_channels_8000[ch_idx];
2055 
2056 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2057 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2058 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2059 			    ieee, ch_flags,
2060 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2061 			    "5.2" : "2.4");
2062 			continue;
2063 		}
2064 
2065 		nflags = iwm_eeprom_channel_flags(ch_flags);
2066 		error = ieee80211_add_channel(chans, maxchans, nchans,
2067 		    ieee, 0, 0, nflags, bands);
2068 		if (error != 0)
2069 			break;
2070 
2071 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2072 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2073 		    ieee, ch_flags,
2074 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2075 		    "5.2" : "2.4");
2076 	}
2077 }
2078 
2079 static void
2080 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2081     struct ieee80211_channel chans[])
2082 {
2083 	struct iwm_softc *sc = ic->ic_softc;
2084 	struct iwm_nvm_data *data = &sc->sc_nvm;
2085 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2086 	size_t ch_num;
2087 
2088 	memset(bands, 0, sizeof(bands));
2089 	/* 1-13: 11b/g channels. */
2090 	setbit(bands, IEEE80211_MODE_11B);
2091 	setbit(bands, IEEE80211_MODE_11G);
2092 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2093 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2094 
2095 	/* 14: 11b channel only. */
2096 	clrbit(bands, IEEE80211_MODE_11G);
2097 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2098 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2099 
2100 	if (data->sku_cap_band_52GHz_enable) {
2101 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2102 			ch_num = nitems(iwm_nvm_channels);
2103 		else
2104 			ch_num = nitems(iwm_nvm_channels_8000);
2105 		memset(bands, 0, sizeof(bands));
2106 		setbit(bands, IEEE80211_MODE_11A);
2107 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2108 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2109 	}
2110 }
2111 
2112 static void
2113 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2114 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2115 {
2116 	const uint8_t *hw_addr;
2117 
2118 	if (mac_override) {
2119 		static const uint8_t reserved_mac[] = {
2120 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2121 		};
2122 
2123 		hw_addr = (const uint8_t *)(mac_override +
2124 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2125 
2126 		/*
2127 		 * Store the MAC address from MAO section.
2128 		 * No byte swapping is required in MAO section
2129 		 */
2130 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2131 
2132 		/*
2133 		 * Force the use of the OTP MAC address in case of reserved MAC
2134 		 * address in the NVM, or if address is given but invalid.
2135 		 */
2136 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2137 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2138 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2139 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2140 			return;
2141 
2142 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2143 		    "%s: mac address from nvm override section invalid\n",
2144 		    __func__);
2145 	}
2146 
2147 	if (nvm_hw) {
2148 		/* read the mac address from WFMP registers */
2149 		uint32_t mac_addr0 =
2150 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2151 		uint32_t mac_addr1 =
2152 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2153 
2154 		hw_addr = (const uint8_t *)&mac_addr0;
2155 		data->hw_addr[0] = hw_addr[3];
2156 		data->hw_addr[1] = hw_addr[2];
2157 		data->hw_addr[2] = hw_addr[1];
2158 		data->hw_addr[3] = hw_addr[0];
2159 
2160 		hw_addr = (const uint8_t *)&mac_addr1;
2161 		data->hw_addr[4] = hw_addr[1];
2162 		data->hw_addr[5] = hw_addr[0];
2163 
2164 		return;
2165 	}
2166 
2167 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2168 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2169 }
2170 
2171 static int
2172 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2173 	    const uint16_t *phy_sku)
2174 {
2175 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2176 		return le16_to_cpup(nvm_sw + IWM_SKU);
2177 
2178 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2179 }
2180 
2181 static int
2182 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2183 {
2184 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2185 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2186 	else
2187 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2188 						IWM_NVM_VERSION_8000));
2189 }
2190 
2191 static int
2192 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2193 		  const uint16_t *phy_sku)
2194 {
2195         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2196                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2197 
2198         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2199 }
2200 
2201 static int
2202 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const const uint16_t *nvm_sw)
2203 {
2204 	int n_hw_addr;
2205 
2206 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2207 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2208 
2209 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2210 
2211         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2212 }
2213 
2214 static void
2215 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2216 		  uint32_t radio_cfg)
2217 {
2218 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2219 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2220 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2221 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2222 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2223 		return;
2224 	}
2225 
2226 	/* set the radio configuration for family 8000 */
2227 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2228 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2229 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2230 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2231 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2232 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2233 }
2234 
2235 static int
2236 iwm_parse_nvm_data(struct iwm_softc *sc,
2237 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2238 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2239 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2240 {
2241 	struct iwm_nvm_data *data = &sc->sc_nvm;
2242 	uint8_t hw_addr[IEEE80211_ADDR_LEN];
2243 	uint32_t sku, radio_cfg;
2244 
2245 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2246 
2247 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2248 	iwm_set_radio_cfg(sc, data, radio_cfg);
2249 
2250 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2251 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2252 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2253 	data->sku_cap_11n_enable = 0;
2254 
2255 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2256 
2257 	/* The byte order is little endian 16 bit, meaning 214365 */
2258 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2259 		IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2260 		data->hw_addr[0] = hw_addr[1];
2261 		data->hw_addr[1] = hw_addr[0];
2262 		data->hw_addr[2] = hw_addr[3];
2263 		data->hw_addr[3] = hw_addr[2];
2264 		data->hw_addr[4] = hw_addr[5];
2265 		data->hw_addr[5] = hw_addr[4];
2266 	} else {
2267 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2268 	}
2269 
2270 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2271 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2272 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2273 	} else {
2274 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2275 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2276 	}
2277 	data->calib_version = 255;   /* TODO:
2278 					this value will prevent some checks from
2279 					failing, we need to check if this
2280 					field is still needed, and if it does,
2281 					where is it in the NVM */
2282 
2283 	return 0;
2284 }
2285 
2286 static int
2287 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2288 {
2289 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2290 
2291 	/* Checking for required sections */
2292 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2293 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2294 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2295 			device_printf(sc->sc_dev,
2296 			    "Can't parse empty OTP/NVM sections\n");
2297 			return ENOENT;
2298 		}
2299 
2300 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2301 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2302 		/* SW and REGULATORY sections are mandatory */
2303 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2304 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2305 			device_printf(sc->sc_dev,
2306 			    "Can't parse empty OTP/NVM sections\n");
2307 			return ENOENT;
2308 		}
2309 		/* MAC_OVERRIDE or at least HW section must exist */
2310 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2311 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2312 			device_printf(sc->sc_dev,
2313 			    "Can't parse mac_address, empty sections\n");
2314 			return ENOENT;
2315 		}
2316 
2317 		/* PHY_SKU section is mandatory in B0 */
2318 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2319 			device_printf(sc->sc_dev,
2320 			    "Can't parse phy_sku in B0, empty sections\n");
2321 			return ENOENT;
2322 		}
2323 
2324 		hw = (const uint16_t *)
2325 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2326 	} else {
2327 		panic("unknown device family %d\n", sc->sc_device_family);
2328 	}
2329 
2330 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2331 	calib = (const uint16_t *)
2332 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2333 	regulatory = (const uint16_t *)
2334 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2335 	mac_override = (const uint16_t *)
2336 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2337 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2338 
2339 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2340 	    phy_sku, regulatory);
2341 }
2342 
2343 static int
2344 iwm_nvm_init(struct iwm_softc *sc)
2345 {
2346 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2347 	int i, section, error;
2348 	uint16_t len;
2349 	uint8_t *buf;
2350 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2351 
2352 	memset(nvm_sections, 0 , sizeof(nvm_sections));
2353 
2354 	buf = kmalloc(bufsz, M_DEVBUF, M_INTWAIT);
2355 	if (buf == NULL)
2356 		return ENOMEM;
2357 
2358 	for (i = 0; i < nitems(nvm_to_read); i++) {
2359 		section = nvm_to_read[i];
2360 		KKASSERT(section <= nitems(nvm_sections));
2361 
2362 		error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2363 		if (error) {
2364 			error = 0;
2365 			continue;
2366 		}
2367 		nvm_sections[section].data = kmalloc(len, M_DEVBUF, M_INTWAIT);
2368 		if (nvm_sections[section].data == NULL) {
2369 			error = ENOMEM;
2370 			break;
2371 		}
2372 		memcpy(nvm_sections[section].data, buf, len);
2373 		nvm_sections[section].length = len;
2374 	}
2375 	kfree(buf, M_DEVBUF);
2376 	if (error == 0)
2377 		error = iwm_parse_nvm_sections(sc, nvm_sections);
2378 
2379 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2380 		if (nvm_sections[i].data != NULL)
2381 			kfree(nvm_sections[i].data, M_DEVBUF);
2382 	}
2383 
2384 	return error;
2385 }
2386 
2387 /*
2388  * Firmware loading gunk.  This is kind of a weird hybrid between the
2389  * iwn driver and the Linux iwlwifi driver.
2390  */
2391 
2392 static int
2393 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2394 	const uint8_t *section, uint32_t byte_cnt)
2395 {
2396 	int error = EINVAL;
2397 	uint32_t chunk_sz, offset;
2398 
2399 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2400 
2401 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2402 		uint32_t addr, len;
2403 		const uint8_t *data;
2404 
2405 		addr = dst_addr + offset;
2406 		len = MIN(chunk_sz, byte_cnt - offset);
2407 		data = section + offset;
2408 
2409 		error = iwm_firmware_load_chunk(sc, addr, data, len);
2410 		if (error)
2411 			break;
2412 	}
2413 
2414 	return error;
2415 }
2416 
2417 static int
2418 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2419 	const uint8_t *chunk, uint32_t byte_cnt)
2420 {
2421 	struct iwm_dma_info *dma = &sc->fw_dma;
2422 	int error;
2423 
2424 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
2425 	memcpy(dma->vaddr, chunk, byte_cnt);
2426 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2427 
2428 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2429 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2430 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2431 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2432 	}
2433 
2434 	sc->sc_fw_chunk_done = 0;
2435 
2436 	if (!iwm_nic_lock(sc))
2437 		return EBUSY;
2438 
2439 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2440 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2441 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2442 	    dst_addr);
2443 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2444 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2445 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2446 	    (iwm_get_dma_hi_addr(dma->paddr)
2447 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2448 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2449 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2450 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2451 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2452 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2453 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2454 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2455 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2456 
2457 	iwm_nic_unlock(sc);
2458 
2459 	/* wait 1s for this segment to load */
2460 	error = 0;
2461 	while (!sc->sc_fw_chunk_done) {
2462 #if defined(__DragonFly__)
2463 		error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2464 #else
2465 		error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2466 #endif
2467 		if (error)
2468 			break;
2469 	}
2470 
2471 	if (!sc->sc_fw_chunk_done) {
2472 		device_printf(sc->sc_dev,
2473 		    "fw chunk addr 0x%x len %d failed to load\n",
2474 		    dst_addr, byte_cnt);
2475 	}
2476 
2477 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2478 	    dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2479 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2480 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2481 		iwm_nic_unlock(sc);
2482 	}
2483 
2484 	return error;
2485 }
2486 
2487 int
2488 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2489     int cpu, int *first_ucode_section)
2490 {
2491 	int shift_param;
2492 	int i, error = 0, sec_num = 0x1;
2493 	uint32_t val, last_read_idx = 0;
2494 	const void *data;
2495 	uint32_t dlen;
2496 	uint32_t offset;
2497 
2498 	if (cpu == 1) {
2499 		shift_param = 0;
2500 		*first_ucode_section = 0;
2501 	} else {
2502 		shift_param = 16;
2503 		(*first_ucode_section)++;
2504 	}
2505 
2506 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2507 		last_read_idx = i;
2508 		data = fws->fw_sect[i].fws_data;
2509 		dlen = fws->fw_sect[i].fws_len;
2510 		offset = fws->fw_sect[i].fws_devoff;
2511 
2512 		/*
2513 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2514 		 * CPU1 to CPU2.
2515 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2516 		 * CPU2 non paged to CPU2 paging sec.
2517 		 */
2518 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2519 		    offset == IWM_PAGING_SEPARATOR_SECTION)
2520 			break;
2521 
2522 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2523 		    "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2524 		    i, offset, dlen, cpu);
2525 
2526 		if (dlen > sc->sc_fwdmasegsz) {
2527 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2528 			    "chunk %d too large (%d bytes)\n", i, dlen);
2529 			error = EFBIG;
2530 		} else {
2531 			error = iwm_firmware_load_sect(sc, offset, data, dlen);
2532 		}
2533 		if (error) {
2534 			device_printf(sc->sc_dev,
2535 			    "could not load firmware chunk %d (error %d)\n",
2536 			    i, error);
2537 			return error;
2538 		}
2539 
2540 		/* Notify the ucode of the loaded section number and status */
2541 		if (iwm_nic_lock(sc)) {
2542 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2543 			val = val | (sec_num << shift_param);
2544 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2545 			sec_num = (sec_num << 1) | 0x1;
2546 			iwm_nic_unlock(sc);
2547 
2548 			/*
2549 			 * The firmware won't load correctly without this delay.
2550 			 */
2551 			DELAY(8000);
2552 		}
2553 	}
2554 
2555 	*first_ucode_section = last_read_idx;
2556 
2557 	if (iwm_nic_lock(sc)) {
2558 		if (cpu == 1)
2559 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2560 		else
2561 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2562 		iwm_nic_unlock(sc);
2563 	}
2564 
2565 	return 0;
2566 }
2567 
2568 int
2569 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2570 {
2571 	struct iwm_fw_sects *fws;
2572 	int error = 0;
2573 	int first_ucode_section;
2574 
2575 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2576 	    ucode_type);
2577 
2578 	fws = &sc->sc_fw.fw_sects[ucode_type];
2579 
2580 	/* configure the ucode to be ready to get the secured image */
2581 	/* release CPU reset */
2582 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2583 
2584 	/* load to FW the binary Secured sections of CPU1 */
2585 	error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2586 	if (error)
2587 		return error;
2588 
2589 	/* load to FW the binary sections of CPU2 */
2590 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2591 }
2592 
2593 static int
2594 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2595 {
2596 	struct iwm_fw_sects *fws;
2597 	int error, i;
2598 	const void *data;
2599 	uint32_t dlen;
2600 	uint32_t offset;
2601 
2602 	sc->sc_uc.uc_intr = 0;
2603 
2604 	fws = &sc->sc_fw.fw_sects[ucode_type];
2605 	for (i = 0; i < fws->fw_count; i++) {
2606 		data = fws->fw_sect[i].fws_data;
2607 		dlen = fws->fw_sect[i].fws_len;
2608 		offset = fws->fw_sect[i].fws_devoff;
2609 		IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2610 		    "LOAD FIRMWARE type %d offset %u len %d\n",
2611 		    ucode_type, offset, dlen);
2612 		if (dlen > sc->sc_fwdmasegsz) {
2613 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2614 			    "chunk %d too large (%d bytes)\n", i, dlen);
2615 			error = EFBIG;
2616 		} else {
2617 			error = iwm_firmware_load_sect(sc, offset, data, dlen);
2618 		}
2619 		if (error) {
2620 			device_printf(sc->sc_dev,
2621 			    "could not load firmware chunk %u of %u "
2622 			    "(error=%d)\n", i, fws->fw_count, error);
2623 			return error;
2624 		}
2625 	}
2626 
2627 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2628 
2629 	return 0;
2630 }
2631 
2632 static int
2633 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2634 {
2635 	int error, w;
2636 
2637 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2638 		error = iwm_load_firmware_8000(sc, ucode_type);
2639 	else
2640 		error = iwm_load_firmware_7000(sc, ucode_type);
2641 	if (error)
2642 		return error;
2643 
2644 	/* wait for the firmware to load */
2645 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2646 #if defined(__DragonFly__)
2647 		error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2648 #else
2649 		error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2650 #endif
2651 	}
2652 	if (error || !sc->sc_uc.uc_ok) {
2653 		device_printf(sc->sc_dev, "could not load firmware\n");
2654 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2655 			device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2656 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2657 			device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2658 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2659 		}
2660 	}
2661 
2662 	/*
2663 	 * Give the firmware some time to initialize.
2664 	 * Accessing it too early causes errors.
2665 	 */
2666 	iwmsleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2667 
2668 	return error;
2669 }
2670 
2671 static int
2672 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2673 {
2674 	int error;
2675 
2676 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2677 
2678 	if ((error = iwm_nic_init(sc)) != 0) {
2679 		device_printf(sc->sc_dev, "unable to init nic\n");
2680 		return error;
2681 	}
2682 
2683 	/* make sure rfkill handshake bits are cleared */
2684 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2685 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2686 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2687 
2688 	/* clear (again), then enable host interrupts */
2689 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2690 	iwm_enable_interrupts(sc);
2691 
2692 	/* really make sure rfkill handshake bits are cleared */
2693 	/* maybe we should write a few times more?  just to make sure */
2694 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2695 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2696 
2697 	/* Load the given image to the HW */
2698 	return iwm_load_firmware(sc, ucode_type);
2699 }
2700 
2701 static int
2702 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2703 {
2704 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2705 		.valid = htole32(valid_tx_ant),
2706 	};
2707 
2708 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2709 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2710 }
2711 
2712 static int
2713 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2714 {
2715 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2716 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2717 
2718 	/* Set parameters */
2719 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2720 	phy_cfg_cmd.calib_control.event_trigger =
2721 	    sc->sc_default_calib[ucode_type].event_trigger;
2722 	phy_cfg_cmd.calib_control.flow_trigger =
2723 	    sc->sc_default_calib[ucode_type].flow_trigger;
2724 
2725 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2726 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2727 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2728 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2729 }
2730 
2731 static int
2732 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2733 	enum iwm_ucode_type ucode_type)
2734 {
2735 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2736 	int error;
2737 
2738 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2739 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2740 			error);
2741 		return error;
2742 	}
2743 
2744 	sc->sc_uc_current = ucode_type;
2745 	error = iwm_start_fw(sc, ucode_type);
2746 	if (error) {
2747 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2748 		sc->sc_uc_current = old_type;
2749 		return error;
2750 	}
2751 
2752 	error = iwm_post_alive(sc);
2753 	if (error) {
2754 		device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2755 	}
2756 	return error;
2757 }
2758 
2759 /*
2760  * mvm misc bits
2761  */
2762 
2763 static int
2764 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2765 {
2766 	int error;
2767 
2768 	/* do not operate with rfkill switch turned on */
2769 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2770 		device_printf(sc->sc_dev,
2771 		    "radio is disabled by hardware switch\n");
2772 		return EPERM;
2773 	}
2774 
2775 	sc->sc_init_complete = 0;
2776 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2777 	    IWM_UCODE_TYPE_INIT)) != 0) {
2778 		device_printf(sc->sc_dev, "failed to load init firmware\n");
2779 		return error;
2780 	}
2781 
2782 	if (justnvm) {
2783 		if ((error = iwm_nvm_init(sc)) != 0) {
2784 			device_printf(sc->sc_dev, "failed to read nvm\n");
2785 			return error;
2786 		}
2787 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2788 
2789 		return 0;
2790 	}
2791 
2792 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2793 		device_printf(sc->sc_dev,
2794 		    "failed to send bt coex configuration: %d\n", error);
2795 		return error;
2796 	}
2797 
2798 	/* Init Smart FIFO. */
2799 	error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2800 	if (error != 0)
2801 		return error;
2802 
2803 	/* Send TX valid antennas before triggering calibrations */
2804 	if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2805 		device_printf(sc->sc_dev,
2806 		    "failed to send antennas before calibration: %d\n", error);
2807 		return error;
2808 	}
2809 
2810 	/*
2811 	 * Send phy configurations command to init uCode
2812 	 * to start the 16.0 uCode init image internal calibrations.
2813 	 */
2814 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2815 		device_printf(sc->sc_dev,
2816 		    "%s: failed to run internal calibration: %d\n",
2817 		    __func__, error);
2818 		return error;
2819 	}
2820 
2821 	/*
2822 	 * Nothing to do but wait for the init complete notification
2823 	 * from the firmware
2824 	 */
2825 	while (!sc->sc_init_complete) {
2826 #if defined(__DragonFly__)
2827 		error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2828 				 0, "iwminit", 2*hz);
2829 #else
2830 		error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2831 				 0, "iwminit", 2*hz);
2832 #endif
2833 		if (error) {
2834 			device_printf(sc->sc_dev, "init complete failed: %d\n",
2835 				sc->sc_init_complete);
2836 			break;
2837 		}
2838 	}
2839 
2840 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2841 	    sc->sc_init_complete ? "" : "not ");
2842 
2843 	return error;
2844 }
2845 
2846 /*
2847  * receive side
2848  */
2849 
2850 /* (re)stock rx ring, called at init-time and at runtime */
2851 static int
2852 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2853 {
2854 	struct iwm_rx_ring *ring = &sc->rxq;
2855 	struct iwm_rx_data *data = &ring->data[idx];
2856 	struct mbuf *m;
2857 	bus_dmamap_t dmamap = NULL;
2858 	bus_dma_segment_t seg;
2859 	int nsegs, error;
2860 
2861 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2862 	if (m == NULL)
2863 		return ENOBUFS;
2864 
2865 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2866 #if defined(__DragonFly__)
2867 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2868 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2869 #else
2870 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2871 	    &seg, &nsegs, BUS_DMA_NOWAIT);
2872 #endif
2873 	if (error != 0) {
2874 		device_printf(sc->sc_dev,
2875 		    "%s: can't map mbuf, error %d\n", __func__, error);
2876 		goto fail;
2877 	}
2878 
2879 	if (data->m != NULL)
2880 		bus_dmamap_unload(ring->data_dmat, data->map);
2881 
2882 	/* Swap ring->spare_map with data->map */
2883 	dmamap = data->map;
2884 	data->map = ring->spare_map;
2885 	ring->spare_map = dmamap;
2886 
2887 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2888 	data->m = m;
2889 
2890 	/* Update RX descriptor. */
2891 	KKASSERT((seg.ds_addr & 255) == 0);
2892 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
2893 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2894 	    BUS_DMASYNC_PREWRITE);
2895 
2896 	return 0;
2897 fail:
2898 	m_freem(m);
2899 	return error;
2900 }
2901 
2902 #define IWM_RSSI_OFFSET 50
2903 static int
2904 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2905 {
2906 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2907 	uint32_t agc_a, agc_b;
2908 	uint32_t val;
2909 
2910 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2911 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2912 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2913 
2914 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2915 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2916 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2917 
2918 	/*
2919 	 * dBm = rssi dB - agc dB - constant.
2920 	 * Higher AGC (higher radio gain) means lower signal.
2921 	 */
2922 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2923 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2924 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2925 
2926 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2927 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2928 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2929 
2930 	return max_rssi_dbm;
2931 }
2932 
2933 /*
2934  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2935  * values are reported by the fw as positive values - need to negate
2936  * to obtain their dBM.  Account for missing antennas by replacing 0
2937  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2938  */
2939 static int
2940 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2941 {
2942 	int energy_a, energy_b, energy_c, max_energy;
2943 	uint32_t val;
2944 
2945 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2946 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2947 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2948 	energy_a = energy_a ? -energy_a : -256;
2949 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2950 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2951 	energy_b = energy_b ? -energy_b : -256;
2952 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2953 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2954 	energy_c = energy_c ? -energy_c : -256;
2955 	max_energy = MAX(energy_a, energy_b);
2956 	max_energy = MAX(max_energy, energy_c);
2957 
2958 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2959 	    "energy In A %d B %d C %d , and max %d\n",
2960 	    energy_a, energy_b, energy_c, max_energy);
2961 
2962 	return max_energy;
2963 }
2964 
2965 static void
2966 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2967 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2968 {
2969 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2970 
2971 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2972 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2973 
2974 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2975 }
2976 
2977 /*
2978  * Retrieve the average noise (in dBm) among receivers.
2979  */
2980 static int
2981 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2982 {
2983 	int i, total, nbant, noise;
2984 
2985 	total = nbant = noise = 0;
2986 	for (i = 0; i < 3; i++) {
2987 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2988 		if (noise) {
2989 			total += noise;
2990 			nbant++;
2991 		}
2992 	}
2993 
2994 	/* There should be at least one antenna but check anyway. */
2995 	return (nbant == 0) ? -127 : (total / nbant) - 107;
2996 }
2997 
2998 /*
2999  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3000  *
3001  * Handles the actual data of the Rx packet from the fw
3002  */
3003 static void
3004 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3005 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3006 {
3007 	struct ieee80211com *ic = &sc->sc_ic;
3008 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3009 	struct ieee80211_frame *wh;
3010 	struct ieee80211_node *ni;
3011 	struct ieee80211_rx_stats rxs;
3012 	struct mbuf *m;
3013 	struct iwm_rx_phy_info *phy_info;
3014 	struct iwm_rx_mpdu_res_start *rx_res;
3015 	uint32_t len;
3016 	uint32_t rx_pkt_status;
3017 	int rssi;
3018 
3019 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3020 
3021 	phy_info = &sc->sc_last_phy_info;
3022 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3023 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3024 	len = le16toh(rx_res->byte_count);
3025 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3026 
3027 	m = data->m;
3028 	m->m_data = pkt->data + sizeof(*rx_res);
3029 	m->m_pkthdr.len = m->m_len = len;
3030 
3031 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3032 		device_printf(sc->sc_dev,
3033 		    "dsp size out of range [0,20]: %d\n",
3034 		    phy_info->cfg_phy_cnt);
3035 		return;
3036 	}
3037 
3038 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3039 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3040 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3041 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3042 		return; /* drop */
3043 	}
3044 
3045 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3046 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3047 	} else {
3048 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3049 	}
3050 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
3051 	rssi = MIN(rssi, sc->sc_max_rssi);	/* clip to max. 100% */
3052 
3053 	/* replenish ring for the buffer we're going to feed to the sharks */
3054 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3055 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3056 		    __func__);
3057 		return;
3058 	}
3059 
3060 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3061 
3062 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3063 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3064 	    __func__,
3065 	    le16toh(phy_info->channel),
3066 	    le16toh(phy_info->phy_flags));
3067 
3068 	/*
3069 	 * Populate an RX state struct with the provided information.
3070 	 */
3071 	bzero(&rxs, sizeof(rxs));
3072 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3073 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3074 	rxs.c_ieee = le16toh(phy_info->channel);
3075 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3076 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3077 	} else {
3078 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3079 	}
3080 	rxs.rssi = rssi - sc->sc_noise;
3081 	rxs.nf = sc->sc_noise;
3082 
3083 	if (ieee80211_radiotap_active_vap(vap)) {
3084 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3085 
3086 		tap->wr_flags = 0;
3087 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3088 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3089 		tap->wr_chan_freq = htole16(rxs.c_freq);
3090 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3091 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3092 		tap->wr_dbm_antsignal = (int8_t)rssi;
3093 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3094 		tap->wr_tsft = phy_info->system_timestamp;
3095 		switch (phy_info->rate) {
3096 		/* CCK rates. */
3097 		case  10: tap->wr_rate =   2; break;
3098 		case  20: tap->wr_rate =   4; break;
3099 		case  55: tap->wr_rate =  11; break;
3100 		case 110: tap->wr_rate =  22; break;
3101 		/* OFDM rates. */
3102 		case 0xd: tap->wr_rate =  12; break;
3103 		case 0xf: tap->wr_rate =  18; break;
3104 		case 0x5: tap->wr_rate =  24; break;
3105 		case 0x7: tap->wr_rate =  36; break;
3106 		case 0x9: tap->wr_rate =  48; break;
3107 		case 0xb: tap->wr_rate =  72; break;
3108 		case 0x1: tap->wr_rate =  96; break;
3109 		case 0x3: tap->wr_rate = 108; break;
3110 		/* Unknown rate: should not happen. */
3111 		default:  tap->wr_rate =   0;
3112 		}
3113 	}
3114 
3115 	IWM_UNLOCK(sc);
3116 	if (ni != NULL) {
3117 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3118 		ieee80211_input_mimo(ni, m, &rxs);
3119 		ieee80211_free_node(ni);
3120 	} else {
3121 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3122 		ieee80211_input_mimo_all(ic, m, &rxs);
3123 	}
3124 	IWM_LOCK(sc);
3125 }
3126 
3127 static int
3128 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3129 	struct iwm_node *in)
3130 {
3131 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3132 	struct ieee80211_node *ni = &in->in_ni;
3133 	struct ieee80211vap *vap = ni->ni_vap;
3134 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3135 	int failack = tx_resp->failure_frame;
3136 
3137 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3138 
3139 	/* Update rate control statistics. */
3140 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3141 	    __func__,
3142 	    (int) le16toh(tx_resp->status.status),
3143 	    (int) le16toh(tx_resp->status.sequence),
3144 	    tx_resp->frame_count,
3145 	    tx_resp->bt_kill_count,
3146 	    tx_resp->failure_rts,
3147 	    tx_resp->failure_frame,
3148 	    le32toh(tx_resp->initial_rate),
3149 	    (int) le16toh(tx_resp->wireless_media_time));
3150 
3151 	if (status != IWM_TX_STATUS_SUCCESS &&
3152 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3153 		ieee80211_ratectl_tx_complete(vap, ni,
3154 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3155 		return (1);
3156 	} else {
3157 		ieee80211_ratectl_tx_complete(vap, ni,
3158 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3159 		return (0);
3160 	}
3161 }
3162 
3163 static void
3164 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3165 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3166 {
3167 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3168 	int idx = cmd_hdr->idx;
3169 	int qid = cmd_hdr->qid;
3170 	struct iwm_tx_ring *ring = &sc->txq[qid];
3171 	struct iwm_tx_data *txd = &ring->data[idx];
3172 	struct iwm_node *in = txd->in;
3173 	struct mbuf *m = txd->m;
3174 	int status;
3175 
3176 	KASSERT(txd->done == 0, ("txd not done"));
3177 	KASSERT(txd->in != NULL, ("txd without node"));
3178 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3179 
3180 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3181 
3182 	sc->sc_tx_timer = 0;
3183 
3184 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3185 
3186 	/* Unmap and free mbuf. */
3187 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3188 	bus_dmamap_unload(ring->data_dmat, txd->map);
3189 
3190 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3191 	    "free txd %p, in %p\n", txd, txd->in);
3192 	txd->done = 1;
3193 	txd->m = NULL;
3194 	txd->in = NULL;
3195 
3196 	ieee80211_tx_complete(&in->in_ni, m, status);
3197 
3198 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3199 		sc->qfullmsk &= ~(1 << ring->qid);
3200 		if (sc->qfullmsk == 0) {
3201 			/*
3202 			 * Well, we're in interrupt context, but then again
3203 			 * I guess net80211 does all sorts of stunts in
3204 			 * interrupt context, so maybe this is no biggie.
3205 			 */
3206 			iwm_start(sc);
3207 		}
3208 	}
3209 }
3210 
3211 /*
3212  * transmit side
3213  */
3214 
3215 /*
3216  * Process a "command done" firmware notification.  This is where we wakeup
3217  * processes waiting for a synchronous command completion.
3218  * from if_iwn
3219  */
3220 static void
3221 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3222 {
3223 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3224 	struct iwm_tx_data *data;
3225 
3226 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3227 		return;	/* Not a command ack. */
3228 	}
3229 
3230 	data = &ring->data[pkt->hdr.idx];
3231 
3232 	/* If the command was mapped in an mbuf, free it. */
3233 	if (data->m != NULL) {
3234 		bus_dmamap_sync(ring->data_dmat, data->map,
3235 		    BUS_DMASYNC_POSTWRITE);
3236 		bus_dmamap_unload(ring->data_dmat, data->map);
3237 		m_freem(data->m);
3238 		data->m = NULL;
3239 	}
3240 	wakeup(&ring->desc[pkt->hdr.idx]);
3241 }
3242 
3243 #if 0
3244 /*
3245  * necessary only for block ack mode
3246  */
3247 void
3248 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3249 	uint16_t len)
3250 {
3251 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3252 	uint16_t w_val;
3253 
3254 	scd_bc_tbl = sc->sched_dma.vaddr;
3255 
3256 	len += 8; /* magic numbers came naturally from paris */
3257 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3258 		len = roundup(len, 4) / 4;
3259 
3260 	w_val = htole16(sta_id << 12 | len);
3261 
3262 	/* Update TX scheduler. */
3263 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3264 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3265 	    BUS_DMASYNC_PREWRITE);
3266 
3267 	/* I really wonder what this is ?!? */
3268 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3269 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3270 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3271 		    BUS_DMASYNC_PREWRITE);
3272 	}
3273 }
3274 #endif
3275 
3276 /*
3277  * Take an 802.11 (non-n) rate, find the relevant rate
3278  * table entry.  return the index into in_ridx[].
3279  *
3280  * The caller then uses that index back into in_ridx
3281  * to figure out the rate index programmed /into/
3282  * the firmware for this given node.
3283  */
3284 static int
3285 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3286     uint8_t rate)
3287 {
3288 	int i;
3289 	uint8_t r;
3290 
3291 	for (i = 0; i < nitems(in->in_ridx); i++) {
3292 		r = iwm_rates[in->in_ridx[i]].rate;
3293 		if (rate == r)
3294 			return (i);
3295 	}
3296 	/* XXX Return the first */
3297 	/* XXX TODO: have it return the /lowest/ */
3298 	return (0);
3299 }
3300 
3301 /*
3302  * Fill in the rate related information for a transmit command.
3303  */
3304 static const struct iwm_rate *
3305 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3306 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3307 {
3308 	struct ieee80211com *ic = &sc->sc_ic;
3309 	struct ieee80211_node *ni = &in->in_ni;
3310 	const struct iwm_rate *rinfo;
3311 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3312 	int ridx, rate_flags;
3313 
3314 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3315 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3316 
3317 	/*
3318 	 * XXX TODO: everything about the rate selection here is terrible!
3319 	 */
3320 
3321 	if (type == IEEE80211_FC0_TYPE_DATA) {
3322 		int i;
3323 		/* for data frames, use RS table */
3324 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3325 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3326 		ridx = in->in_ridx[i];
3327 
3328 		/* This is the index into the programmed table */
3329 		tx->initial_rate_index = i;
3330 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3331 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3332 		    "%s: start with i=%d, txrate %d\n",
3333 		    __func__, i, iwm_rates[ridx].rate);
3334 	} else {
3335 		/*
3336 		 * For non-data, use the lowest supported rate for the given
3337 		 * operational mode.
3338 		 *
3339 		 * Note: there may not be any rate control information available.
3340 		 * This driver currently assumes if we're transmitting data
3341 		 * frames, use the rate control table.  Grr.
3342 		 *
3343 		 * XXX TODO: use the configured rate for the traffic type!
3344 		 * XXX TODO: this should be per-vap, not curmode; as we later
3345 		 * on we'll want to handle off-channel stuff (eg TDLS).
3346 		 */
3347 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3348 			/*
3349 			 * XXX this assumes the mode is either 11a or not 11a;
3350 			 * definitely won't work for 11n.
3351 			 */
3352 			ridx = IWM_RIDX_OFDM;
3353 		} else {
3354 			ridx = IWM_RIDX_CCK;
3355 		}
3356 	}
3357 
3358 	rinfo = &iwm_rates[ridx];
3359 
3360 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3361 	    __func__, ridx,
3362 	    rinfo->rate,
3363 	    !! (IWM_RIDX_IS_CCK(ridx))
3364 	    );
3365 
3366 	/* XXX TODO: hard-coded TX antenna? */
3367 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3368 	if (IWM_RIDX_IS_CCK(ridx))
3369 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3370 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3371 
3372 	return rinfo;
3373 }
3374 
3375 #define TB0_SIZE 16
3376 static int
3377 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3378 {
3379 	struct ieee80211com *ic = &sc->sc_ic;
3380 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3381 	struct iwm_node *in = IWM_NODE(ni);
3382 	struct iwm_tx_ring *ring;
3383 	struct iwm_tx_data *data;
3384 	struct iwm_tfd *desc;
3385 	struct iwm_device_cmd *cmd;
3386 	struct iwm_tx_cmd *tx;
3387 	struct ieee80211_frame *wh;
3388 	struct ieee80211_key *k = NULL;
3389 #if !defined(__DragonFly__)
3390 	struct mbuf *m1;
3391 #endif
3392 	const struct iwm_rate *rinfo;
3393 	uint32_t flags;
3394 	u_int hdrlen;
3395 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3396 	int nsegs;
3397 	uint8_t tid, type;
3398 	int i, totlen, error, pad;
3399 
3400 	wh = mtod(m, struct ieee80211_frame *);
3401 	hdrlen = ieee80211_anyhdrsize(wh);
3402 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3403 	tid = 0;
3404 	ring = &sc->txq[ac];
3405 	desc = &ring->desc[ring->cur];
3406 	memset(desc, 0, sizeof(*desc));
3407 	data = &ring->data[ring->cur];
3408 
3409 	/* Fill out iwm_tx_cmd to send to the firmware */
3410 	cmd = &ring->cmd[ring->cur];
3411 	cmd->hdr.code = IWM_TX_CMD;
3412 	cmd->hdr.flags = 0;
3413 	cmd->hdr.qid = ring->qid;
3414 	cmd->hdr.idx = ring->cur;
3415 
3416 	tx = (void *)cmd->data;
3417 	memset(tx, 0, sizeof(*tx));
3418 
3419 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3420 
3421 	/* Encrypt the frame if need be. */
3422 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3423 		/* Retrieve key for TX && do software encryption. */
3424 		k = ieee80211_crypto_encap(ni, m);
3425 		if (k == NULL) {
3426 			m_freem(m);
3427 			return (ENOBUFS);
3428 		}
3429 		/* 802.11 header may have moved. */
3430 		wh = mtod(m, struct ieee80211_frame *);
3431 	}
3432 
3433 	if (ieee80211_radiotap_active_vap(vap)) {
3434 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3435 
3436 		tap->wt_flags = 0;
3437 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3438 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3439 		tap->wt_rate = rinfo->rate;
3440 		if (k != NULL)
3441 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3442 		ieee80211_radiotap_tx(vap, m);
3443 	}
3444 
3445 
3446 	totlen = m->m_pkthdr.len;
3447 
3448 	flags = 0;
3449 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3450 		flags |= IWM_TX_CMD_FLG_ACK;
3451 	}
3452 
3453 	if (type != IEEE80211_FC0_TYPE_DATA
3454 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3455 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3456 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3457 	}
3458 
3459 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3460 	    type != IEEE80211_FC0_TYPE_DATA)
3461 		tx->sta_id = sc->sc_aux_sta.sta_id;
3462 	else
3463 		tx->sta_id = IWM_STATION_ID;
3464 
3465 	if (type == IEEE80211_FC0_TYPE_MGT) {
3466 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3467 
3468 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3469 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3470 			tx->pm_frame_timeout = htole16(3);
3471 		else
3472 			tx->pm_frame_timeout = htole16(2);
3473 	} else {
3474 		tx->pm_frame_timeout = htole16(0);
3475 	}
3476 
3477 	if (hdrlen & 3) {
3478 		/* First segment length must be a multiple of 4. */
3479 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3480 		pad = 4 - (hdrlen & 3);
3481 	} else
3482 		pad = 0;
3483 
3484 	tx->driver_txop = 0;
3485 	tx->next_frame_len = 0;
3486 
3487 	tx->len = htole16(totlen);
3488 	tx->tid_tspec = tid;
3489 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3490 
3491 	/* Set physical address of "scratch area". */
3492 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3493 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3494 
3495 	/* Copy 802.11 header in TX command. */
3496 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3497 
3498 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3499 
3500 	tx->sec_ctl = 0;
3501 	tx->tx_flags |= htole32(flags);
3502 
3503 	/* Trim 802.11 header. */
3504 	m_adj(m, hdrlen);
3505 #if defined(__DragonFly__)
3506 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3507 					    segs, IWM_MAX_SCATTER - 2,
3508 					    &nsegs, BUS_DMA_NOWAIT);
3509 #else
3510 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3511 	    segs, &nsegs, BUS_DMA_NOWAIT);
3512 #endif
3513 	if (error != 0) {
3514 #if defined(__DragonFly__)
3515 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3516 		    error);
3517 		m_freem(m);
3518 		return error;
3519 #else
3520 		if (error != EFBIG) {
3521 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3522 			    error);
3523 			m_freem(m);
3524 			return error;
3525 		}
3526 		/* Too many DMA segments, linearize mbuf. */
3527 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3528 		if (m1 == NULL) {
3529 			device_printf(sc->sc_dev,
3530 			    "%s: could not defrag mbuf\n", __func__);
3531 			m_freem(m);
3532 			return (ENOBUFS);
3533 		}
3534 		m = m1;
3535 
3536 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3537 		    segs, &nsegs, BUS_DMA_NOWAIT);
3538 		if (error != 0) {
3539 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3540 			    error);
3541 			m_freem(m);
3542 			return error;
3543 		}
3544 #endif
3545 	}
3546 	data->m = m;
3547 	data->in = in;
3548 	data->done = 0;
3549 
3550 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3551 	    "sending txd %p, in %p\n", data, data->in);
3552 	KASSERT(data->in != NULL, ("node is NULL"));
3553 
3554 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3555 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3556 	    ring->qid, ring->cur, totlen, nsegs,
3557 	    le32toh(tx->tx_flags),
3558 	    le32toh(tx->rate_n_flags),
3559 	    tx->initial_rate_index
3560 	    );
3561 
3562 	/* Fill TX descriptor. */
3563 	desc->num_tbs = 2 + nsegs;
3564 
3565 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3566 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3567 	    (TB0_SIZE << 4);
3568 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3569 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3570 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3571 	      + hdrlen + pad - TB0_SIZE) << 4);
3572 
3573 	/* Other DMA segments are for data payload. */
3574 	for (i = 0; i < nsegs; i++) {
3575 		seg = &segs[i];
3576 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3577 		desc->tbs[i+2].hi_n_len = \
3578 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3579 		    | ((seg->ds_len) << 4);
3580 	}
3581 
3582 	bus_dmamap_sync(ring->data_dmat, data->map,
3583 	    BUS_DMASYNC_PREWRITE);
3584 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3585 	    BUS_DMASYNC_PREWRITE);
3586 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3587 	    BUS_DMASYNC_PREWRITE);
3588 
3589 #if 0
3590 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3591 #endif
3592 
3593 	/* Kick TX ring. */
3594 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3595 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3596 
3597 	/* Mark TX ring as full if we reach a certain threshold. */
3598 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3599 		sc->qfullmsk |= 1 << ring->qid;
3600 	}
3601 
3602 	return 0;
3603 }
3604 
3605 static int
3606 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3607     const struct ieee80211_bpf_params *params)
3608 {
3609 	struct ieee80211com *ic = ni->ni_ic;
3610 	struct iwm_softc *sc = ic->ic_softc;
3611 	int error = 0;
3612 
3613 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3614 	    "->%s begin\n", __func__);
3615 
3616 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3617 		m_freem(m);
3618 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3619 		    "<-%s not RUNNING\n", __func__);
3620 		return (ENETDOWN);
3621         }
3622 
3623 	IWM_LOCK(sc);
3624 	/* XXX fix this */
3625         if (params == NULL) {
3626 		error = iwm_tx(sc, m, ni, 0);
3627 	} else {
3628 		error = iwm_tx(sc, m, ni, 0);
3629 	}
3630 	sc->sc_tx_timer = 5;
3631 	IWM_UNLOCK(sc);
3632 
3633         return (error);
3634 }
3635 
3636 /*
3637  * mvm/tx.c
3638  */
3639 
3640 #if 0
3641 /*
3642  * Note that there are transports that buffer frames before they reach
3643  * the firmware. This means that after flush_tx_path is called, the
3644  * queue might not be empty. The race-free way to handle this is to:
3645  * 1) set the station as draining
3646  * 2) flush the Tx path
3647  * 3) wait for the transport queues to be empty
3648  */
3649 int
3650 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3651 {
3652 	struct iwm_tx_path_flush_cmd flush_cmd = {
3653 		.queues_ctl = htole32(tfd_msk),
3654 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3655 	};
3656 	int ret;
3657 
3658 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3659 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3660 	    sizeof(flush_cmd), &flush_cmd);
3661 	if (ret)
3662                 device_printf(sc->sc_dev,
3663 		    "Flushing tx queue failed: %d\n", ret);
3664 	return ret;
3665 }
3666 #endif
3667 
3668 static int
3669 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3670 	struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3671 {
3672 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3673 	    cmd, status);
3674 }
3675 
3676 /* send station add/update command to firmware */
3677 static int
3678 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3679 {
3680 	struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3681 	int ret;
3682 	uint32_t status;
3683 
3684 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3685 
3686 	add_sta_cmd.sta_id = IWM_STATION_ID;
3687 	add_sta_cmd.mac_id_n_color
3688 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3689 	        IWM_DEFAULT_COLOR));
3690 	if (!update) {
3691 		int ac;
3692 		for (ac = 0; ac < WME_NUM_AC; ac++) {
3693 			add_sta_cmd.tfd_queue_msk |=
3694 			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3695 		}
3696 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3697 	}
3698 	add_sta_cmd.add_modify = update ? 1 : 0;
3699 	add_sta_cmd.station_flags_msk
3700 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3701 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3702 	if (update)
3703 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3704 
3705 	status = IWM_ADD_STA_SUCCESS;
3706 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3707 	if (ret)
3708 		return ret;
3709 
3710 	switch (status) {
3711 	case IWM_ADD_STA_SUCCESS:
3712 		break;
3713 	default:
3714 		ret = EIO;
3715 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3716 		break;
3717 	}
3718 
3719 	return ret;
3720 }
3721 
3722 static int
3723 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3724 {
3725 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3726 }
3727 
3728 static int
3729 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3730 {
3731 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3732 }
3733 
3734 static int
3735 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3736 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3737 {
3738 	struct iwm_mvm_add_sta_cmd_v7 cmd;
3739 	int ret;
3740 	uint32_t status;
3741 
3742 	memset(&cmd, 0, sizeof(cmd));
3743 	cmd.sta_id = sta->sta_id;
3744 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3745 
3746 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3747 	cmd.tid_disable_tx = htole16(0xffff);
3748 
3749 	if (addr)
3750 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3751 
3752 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3753 	if (ret)
3754 		return ret;
3755 
3756 	switch (status) {
3757 	case IWM_ADD_STA_SUCCESS:
3758 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3759 		    "%s: Internal station added.\n", __func__);
3760 		return 0;
3761 	default:
3762 		device_printf(sc->sc_dev,
3763 		    "%s: Add internal station failed, status=0x%x\n",
3764 		    __func__, status);
3765 		ret = EIO;
3766 		break;
3767 	}
3768 	return ret;
3769 }
3770 
3771 static int
3772 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3773 {
3774 	int ret;
3775 
3776 	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3777 	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3778 
3779 	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3780 	if (ret)
3781 		return ret;
3782 
3783 	ret = iwm_mvm_add_int_sta_common(sc,
3784 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3785 
3786 	if (ret)
3787 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3788 	return ret;
3789 }
3790 
3791 static int
3792 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3793 {
3794 	struct iwm_time_quota_cmd cmd;
3795 	int i, idx, ret, num_active_macs, quota, quota_rem;
3796 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3797 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3798 	uint16_t id;
3799 
3800 	memset(&cmd, 0, sizeof(cmd));
3801 
3802 	/* currently, PHY ID == binding ID */
3803 	if (in) {
3804 		id = in->in_phyctxt->id;
3805 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3806 		colors[id] = in->in_phyctxt->color;
3807 
3808 		if (1)
3809 			n_ifs[id] = 1;
3810 	}
3811 
3812 	/*
3813 	 * The FW's scheduling session consists of
3814 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3815 	 * equally between all the bindings that require quota
3816 	 */
3817 	num_active_macs = 0;
3818 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3819 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3820 		num_active_macs += n_ifs[i];
3821 	}
3822 
3823 	quota = 0;
3824 	quota_rem = 0;
3825 	if (num_active_macs) {
3826 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3827 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3828 	}
3829 
3830 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3831 		if (colors[i] < 0)
3832 			continue;
3833 
3834 		cmd.quotas[idx].id_and_color =
3835 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3836 
3837 		if (n_ifs[i] <= 0) {
3838 			cmd.quotas[idx].quota = htole32(0);
3839 			cmd.quotas[idx].max_duration = htole32(0);
3840 		} else {
3841 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3842 			cmd.quotas[idx].max_duration = htole32(0);
3843 		}
3844 		idx++;
3845 	}
3846 
3847 	/* Give the remainder of the session to the first binding */
3848 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3849 
3850 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3851 	    sizeof(cmd), &cmd);
3852 	if (ret)
3853 		device_printf(sc->sc_dev,
3854 		    "%s: Failed to send quota: %d\n", __func__, ret);
3855 	return ret;
3856 }
3857 
3858 /*
3859  * ieee80211 routines
3860  */
3861 
3862 /*
3863  * Change to AUTH state in 80211 state machine.  Roughly matches what
3864  * Linux does in bss_info_changed().
3865  */
3866 static int
3867 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3868 {
3869 	struct ieee80211_node *ni;
3870 	struct iwm_node *in;
3871 	struct iwm_vap *iv = IWM_VAP(vap);
3872 	uint32_t duration;
3873 	int error;
3874 
3875 	/*
3876 	 * XXX i have a feeling that the vap node is being
3877 	 * freed from underneath us. Grr.
3878 	 */
3879 	ni = ieee80211_ref_node(vap->iv_bss);
3880 	in = IWM_NODE(ni);
3881 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3882 	    "%s: called; vap=%p, bss ni=%p\n",
3883 	    __func__,
3884 	    vap,
3885 	    ni);
3886 
3887 	in->in_assoc = 0;
3888 
3889 	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3890 	if (error != 0)
3891 		return error;
3892 
3893 	error = iwm_allow_mcast(vap, sc);
3894 	if (error) {
3895 		device_printf(sc->sc_dev,
3896 		    "%s: failed to set multicast\n", __func__);
3897 		goto out;
3898 	}
3899 
3900 	/*
3901 	 * This is where it deviates from what Linux does.
3902 	 *
3903 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3904 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3905 	 * and always does a mac_ctx_changed().
3906 	 *
3907 	 * The openbsd port doesn't attempt to do that - it reset things
3908 	 * at odd states and does the add here.
3909 	 *
3910 	 * So, until the state handling is fixed (ie, we never reset
3911 	 * the NIC except for a firmware failure, which should drag
3912 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3913 	 * contexts that are required), let's do a dirty hack here.
3914 	 */
3915 	if (iv->is_uploaded) {
3916 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3917 			device_printf(sc->sc_dev,
3918 			    "%s: failed to update MAC\n", __func__);
3919 			goto out;
3920 		}
3921 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3922 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3923 			device_printf(sc->sc_dev,
3924 			    "%s: failed update phy ctxt\n", __func__);
3925 			goto out;
3926 		}
3927 		in->in_phyctxt = &sc->sc_phyctxt[0];
3928 
3929 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3930 			device_printf(sc->sc_dev,
3931 			    "%s: binding update cmd\n", __func__);
3932 			goto out;
3933 		}
3934 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3935 			device_printf(sc->sc_dev,
3936 			    "%s: failed to update sta\n", __func__);
3937 			goto out;
3938 		}
3939 	} else {
3940 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3941 			device_printf(sc->sc_dev,
3942 			    "%s: failed to add MAC\n", __func__);
3943 			goto out;
3944 		}
3945 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3946 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3947 			device_printf(sc->sc_dev,
3948 			    "%s: failed add phy ctxt!\n", __func__);
3949 			error = ETIMEDOUT;
3950 			goto out;
3951 		}
3952 		in->in_phyctxt = &sc->sc_phyctxt[0];
3953 
3954 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3955 			device_printf(sc->sc_dev,
3956 			    "%s: binding add cmd\n", __func__);
3957 			goto out;
3958 		}
3959 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3960 			device_printf(sc->sc_dev,
3961 			    "%s: failed to add sta\n", __func__);
3962 			goto out;
3963 		}
3964 	}
3965 
3966 	/*
3967 	 * Prevent the FW from wandering off channel during association
3968 	 * by "protecting" the session with a time event.
3969 	 */
3970 	/* XXX duration is in units of TU, not MS */
3971 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3972 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3973 	DELAY(100);
3974 
3975 	error = 0;
3976 out:
3977 	ieee80211_free_node(ni);
3978 	return (error);
3979 }
3980 
3981 static int
3982 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3983 {
3984 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
3985 	int error;
3986 
3987 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3988 		device_printf(sc->sc_dev,
3989 		    "%s: failed to update STA\n", __func__);
3990 		return error;
3991 	}
3992 
3993 	in->in_assoc = 1;
3994 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3995 		device_printf(sc->sc_dev,
3996 		    "%s: failed to update MAC\n", __func__);
3997 		return error;
3998 	}
3999 
4000 	return 0;
4001 }
4002 
4003 static int
4004 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4005 {
4006 	/*
4007 	 * Ok, so *technically* the proper set of calls for going
4008 	 * from RUN back to SCAN is:
4009 	 *
4010 	 * iwm_mvm_power_mac_disable(sc, in);
4011 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4012 	 * iwm_mvm_rm_sta(sc, in);
4013 	 * iwm_mvm_update_quotas(sc, NULL);
4014 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4015 	 * iwm_mvm_binding_remove_vif(sc, in);
4016 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4017 	 *
4018 	 * However, that freezes the device not matter which permutations
4019 	 * and modifications are attempted.  Obviously, this driver is missing
4020 	 * something since it works in the Linux driver, but figuring out what
4021 	 * is missing is a little more complicated.  Now, since we're going
4022 	 * back to nothing anyway, we'll just do a complete device reset.
4023 	 * Up your's, device!
4024 	 */
4025 	/* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
4026 	iwm_stop_device(sc);
4027 	iwm_init_hw(sc);
4028 	if (in)
4029 		in->in_assoc = 0;
4030 	return 0;
4031 
4032 #if 0
4033 	int error;
4034 
4035 	iwm_mvm_power_mac_disable(sc, in);
4036 
4037 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4038 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4039 		return error;
4040 	}
4041 
4042 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4043 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4044 		return error;
4045 	}
4046 	error = iwm_mvm_rm_sta(sc, in);
4047 	in->in_assoc = 0;
4048 	iwm_mvm_update_quotas(sc, NULL);
4049 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4050 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4051 		return error;
4052 	}
4053 	iwm_mvm_binding_remove_vif(sc, in);
4054 
4055 	iwm_mvm_mac_ctxt_remove(sc, in);
4056 
4057 	return error;
4058 #endif
4059 }
4060 
4061 static struct ieee80211_node *
4062 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4063 {
4064 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4065 	    M_INTWAIT | M_ZERO);
4066 }
4067 
4068 static void
4069 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4070 {
4071 	struct ieee80211_node *ni = &in->in_ni;
4072 	struct iwm_lq_cmd *lq = &in->in_lq;
4073 	int nrates = ni->ni_rates.rs_nrates;
4074 	int i, ridx, tab = 0;
4075 	int txant = 0;
4076 
4077 	if (nrates > nitems(lq->rs_table)) {
4078 		device_printf(sc->sc_dev,
4079 		    "%s: node supports %d rates, driver handles "
4080 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4081 		return;
4082 	}
4083 	if (nrates == 0) {
4084 		device_printf(sc->sc_dev,
4085 		    "%s: node supports 0 rates, odd!\n", __func__);
4086 		return;
4087 	}
4088 
4089 	/*
4090 	 * XXX .. and most of iwm_node is not initialised explicitly;
4091 	 * it's all just 0x0 passed to the firmware.
4092 	 */
4093 
4094 	/* first figure out which rates we should support */
4095 	/* XXX TODO: this isn't 11n aware /at all/ */
4096 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4097 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4098 	    "%s: nrates=%d\n", __func__, nrates);
4099 
4100 	/*
4101 	 * Loop over nrates and populate in_ridx from the highest
4102 	 * rate to the lowest rate.  Remember, in_ridx[] has
4103 	 * IEEE80211_RATE_MAXSIZE entries!
4104 	 */
4105 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4106 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4107 
4108 		/* Map 802.11 rate to HW rate index. */
4109 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4110 			if (iwm_rates[ridx].rate == rate)
4111 				break;
4112 		if (ridx > IWM_RIDX_MAX) {
4113 			device_printf(sc->sc_dev,
4114 			    "%s: WARNING: device rate for %d not found!\n",
4115 			    __func__, rate);
4116 		} else {
4117 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4118 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4119 			    __func__,
4120 			    i,
4121 			    rate,
4122 			    ridx);
4123 			in->in_ridx[i] = ridx;
4124 		}
4125 	}
4126 
4127 	/* then construct a lq_cmd based on those */
4128 	memset(lq, 0, sizeof(*lq));
4129 	lq->sta_id = IWM_STATION_ID;
4130 
4131 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4132 	if (ni->ni_flags & IEEE80211_NODE_HT)
4133 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4134 
4135 	/*
4136 	 * are these used? (we don't do SISO or MIMO)
4137 	 * need to set them to non-zero, though, or we get an error.
4138 	 */
4139 	lq->single_stream_ant_msk = 1;
4140 	lq->dual_stream_ant_msk = 1;
4141 
4142 	/*
4143 	 * Build the actual rate selection table.
4144 	 * The lowest bits are the rates.  Additionally,
4145 	 * CCK needs bit 9 to be set.  The rest of the bits
4146 	 * we add to the table select the tx antenna
4147 	 * Note that we add the rates in the highest rate first
4148 	 * (opposite of ni_rates).
4149 	 */
4150 	/*
4151 	 * XXX TODO: this should be looping over the min of nrates
4152 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4153 	 */
4154 	for (i = 0; i < nrates; i++) {
4155 		int nextant;
4156 
4157 		if (txant == 0)
4158 			txant = iwm_fw_valid_tx_ant(sc);
4159 		nextant = 1<<(ffs(txant)-1);
4160 		txant &= ~nextant;
4161 
4162 		/*
4163 		 * Map the rate id into a rate index into
4164 		 * our hardware table containing the
4165 		 * configuration to use for this rate.
4166 		 */
4167 		ridx = in->in_ridx[i];
4168 		tab = iwm_rates[ridx].plcp;
4169 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4170 		if (IWM_RIDX_IS_CCK(ridx))
4171 			tab |= IWM_RATE_MCS_CCK_MSK;
4172 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4173 		    "station rate i=%d, rate=%d, hw=%x\n",
4174 		    i, iwm_rates[ridx].rate, tab);
4175 		lq->rs_table[i] = htole32(tab);
4176 	}
4177 	/* then fill the rest with the lowest possible rate */
4178 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4179 		KASSERT(tab != 0, ("invalid tab"));
4180 		lq->rs_table[i] = htole32(tab);
4181 	}
4182 }
4183 
4184 static int
4185 iwm_media_change(struct ifnet *ifp)
4186 {
4187 	struct ieee80211vap *vap = ifp->if_softc;
4188 	struct ieee80211com *ic = vap->iv_ic;
4189 	struct iwm_softc *sc = ic->ic_softc;
4190 	int error;
4191 
4192 	error = ieee80211_media_change(ifp);
4193 	if (error != ENETRESET)
4194 		return error;
4195 
4196 	IWM_LOCK(sc);
4197 	if (ic->ic_nrunning > 0) {
4198 		iwm_stop(sc);
4199 		iwm_init(sc);
4200 	}
4201 	IWM_UNLOCK(sc);
4202 	return error;
4203 }
4204 
4205 
4206 static int
4207 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4208 {
4209 	struct iwm_vap *ivp = IWM_VAP(vap);
4210 	struct ieee80211com *ic = vap->iv_ic;
4211 	struct iwm_softc *sc = ic->ic_softc;
4212 	struct iwm_node *in;
4213 	int error;
4214 
4215 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4216 	    "switching state %s -> %s\n",
4217 	    ieee80211_state_name[vap->iv_state],
4218 	    ieee80211_state_name[nstate]);
4219 	IEEE80211_UNLOCK(ic);
4220 	IWM_LOCK(sc);
4221 
4222 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4223 		iwm_led_blink_stop(sc);
4224 
4225 	/* disable beacon filtering if we're hopping out of RUN */
4226 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4227 		iwm_mvm_disable_beacon_filter(sc);
4228 
4229 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4230 			in->in_assoc = 0;
4231 
4232 		iwm_release(sc, NULL);
4233 
4234 		/*
4235 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4236 		 * above then the card will be completely reinitialized,
4237 		 * so the driver must do everything necessary to bring the card
4238 		 * from INIT to SCAN.
4239 		 *
4240 		 * Additionally, upon receiving deauth frame from AP,
4241 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4242 		 * state. This will also fail with this driver, so bring the FSM
4243 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4244 		 *
4245 		 * XXX TODO: fix this for FreeBSD!
4246 		 */
4247 		if (nstate == IEEE80211_S_SCAN ||
4248 		    nstate == IEEE80211_S_AUTH ||
4249 		    nstate == IEEE80211_S_ASSOC) {
4250 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4251 			    "Force transition to INIT; MGT=%d\n", arg);
4252 			IWM_UNLOCK(sc);
4253 			IEEE80211_LOCK(ic);
4254 			/* Always pass arg as -1 since we can't Tx right now. */
4255 			/*
4256 			 * XXX arg is just ignored anyway when transitioning
4257 			 *     to IEEE80211_S_INIT.
4258 			 */
4259 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4260 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4261 			    "Going INIT->SCAN\n");
4262 			nstate = IEEE80211_S_SCAN;
4263 			IEEE80211_UNLOCK(ic);
4264 			IWM_LOCK(sc);
4265 		}
4266 	}
4267 
4268 	switch (nstate) {
4269 	case IEEE80211_S_INIT:
4270 		break;
4271 
4272 	case IEEE80211_S_AUTH:
4273 		if ((error = iwm_auth(vap, sc)) != 0) {
4274 			device_printf(sc->sc_dev,
4275 			    "%s: could not move to auth state: %d\n",
4276 			    __func__, error);
4277 			break;
4278 		}
4279 		break;
4280 
4281 	case IEEE80211_S_ASSOC:
4282 		if ((error = iwm_assoc(vap, sc)) != 0) {
4283 			device_printf(sc->sc_dev,
4284 			    "%s: failed to associate: %d\n", __func__,
4285 			    error);
4286 			break;
4287 		}
4288 		break;
4289 
4290 	case IEEE80211_S_RUN:
4291 	{
4292 		struct iwm_host_cmd cmd = {
4293 			.id = IWM_LQ_CMD,
4294 			.len = { sizeof(in->in_lq), },
4295 			.flags = IWM_CMD_SYNC,
4296 		};
4297 
4298 		/* Update the association state, now we have it all */
4299 		/* (eg associd comes in at this point */
4300 		error = iwm_assoc(vap, sc);
4301 		if (error != 0) {
4302 			device_printf(sc->sc_dev,
4303 			    "%s: failed to update association state: %d\n",
4304 			    __func__,
4305 			    error);
4306 			break;
4307 		}
4308 
4309 		in = IWM_NODE(vap->iv_bss);
4310 		iwm_mvm_power_mac_update_mode(sc, in);
4311 		iwm_mvm_enable_beacon_filter(sc, in);
4312 		iwm_mvm_update_quotas(sc, in);
4313 		iwm_setrates(sc, in);
4314 
4315 		cmd.data[0] = &in->in_lq;
4316 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4317 			device_printf(sc->sc_dev,
4318 			    "%s: IWM_LQ_CMD failed\n", __func__);
4319 		}
4320 
4321 		iwm_mvm_led_enable(sc);
4322 		break;
4323 	}
4324 
4325 	default:
4326 		break;
4327 	}
4328 	IWM_UNLOCK(sc);
4329 	IEEE80211_LOCK(ic);
4330 
4331 	return (ivp->iv_newstate(vap, nstate, arg));
4332 }
4333 
4334 void
4335 iwm_endscan_cb(void *arg, int pending)
4336 {
4337 	struct iwm_softc *sc = arg;
4338 	struct ieee80211com *ic = &sc->sc_ic;
4339 
4340 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4341 	    "%s: scan ended\n",
4342 	    __func__);
4343 
4344 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4345 }
4346 
4347 /*
4348  * Aging and idle timeouts for the different possible scenarios
4349  * in default configuration
4350  */
4351 static const uint32_t
4352 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4353 	{
4354 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4355 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4356 	},
4357 	{
4358 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4359 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4360 	},
4361 	{
4362 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4363 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4364 	},
4365 	{
4366 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4367 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4368 	},
4369 	{
4370 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4371 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4372 	},
4373 };
4374 
4375 /*
4376  * Aging and idle timeouts for the different possible scenarios
4377  * in single BSS MAC configuration.
4378  */
4379 static const uint32_t
4380 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4381 	{
4382 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4383 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4384 	},
4385 	{
4386 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4387 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4388 	},
4389 	{
4390 		htole32(IWM_SF_MCAST_AGING_TIMER),
4391 		htole32(IWM_SF_MCAST_IDLE_TIMER)
4392 	},
4393 	{
4394 		htole32(IWM_SF_BA_AGING_TIMER),
4395 		htole32(IWM_SF_BA_IDLE_TIMER)
4396 	},
4397 	{
4398 		htole32(IWM_SF_TX_RE_AGING_TIMER),
4399 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4400 	},
4401 };
4402 
4403 static void
4404 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4405     struct ieee80211_node *ni)
4406 {
4407 	int i, j, watermark;
4408 
4409 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4410 
4411 	/*
4412 	 * If we are in association flow - check antenna configuration
4413 	 * capabilities of the AP station, and choose the watermark accordingly.
4414 	 */
4415 	if (ni) {
4416 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4417 #ifdef notyet
4418 			if (ni->ni_rxmcs[2] != 0)
4419 				watermark = IWM_SF_W_MARK_MIMO3;
4420 			else if (ni->ni_rxmcs[1] != 0)
4421 				watermark = IWM_SF_W_MARK_MIMO2;
4422 			else
4423 #endif
4424 				watermark = IWM_SF_W_MARK_SISO;
4425 		} else {
4426 			watermark = IWM_SF_W_MARK_LEGACY;
4427 		}
4428 	/* default watermark value for unassociated mode. */
4429 	} else {
4430 		watermark = IWM_SF_W_MARK_MIMO2;
4431 	}
4432 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4433 
4434 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4435 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4436 			sf_cmd->long_delay_timeouts[i][j] =
4437 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4438 		}
4439 	}
4440 
4441 	if (ni) {
4442 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4443 		       sizeof(iwm_sf_full_timeout));
4444 	} else {
4445 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4446 		       sizeof(iwm_sf_full_timeout_def));
4447 	}
4448 }
4449 
4450 static int
4451 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4452 {
4453 	struct ieee80211com *ic = &sc->sc_ic;
4454 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4455 	struct iwm_sf_cfg_cmd sf_cmd = {
4456 		.state = htole32(IWM_SF_FULL_ON),
4457 	};
4458 	int ret = 0;
4459 
4460 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4461 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4462 
4463 	switch (new_state) {
4464 	case IWM_SF_UNINIT:
4465 	case IWM_SF_INIT_OFF:
4466 		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4467 		break;
4468 	case IWM_SF_FULL_ON:
4469 		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4470 		break;
4471 	default:
4472 		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4473 		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4474 			  new_state);
4475 		return EINVAL;
4476 	}
4477 
4478 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4479 				   sizeof(sf_cmd), &sf_cmd);
4480 	return ret;
4481 }
4482 
4483 static int
4484 iwm_send_bt_init_conf(struct iwm_softc *sc)
4485 {
4486 	struct iwm_bt_coex_cmd bt_cmd;
4487 
4488 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4489 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4490 
4491 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4492 	    &bt_cmd);
4493 }
4494 
4495 static int
4496 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4497 {
4498 	struct iwm_mcc_update_cmd mcc_cmd;
4499 	struct iwm_host_cmd hcmd = {
4500 		.id = IWM_MCC_UPDATE_CMD,
4501 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4502 		.data = { &mcc_cmd },
4503 	};
4504 	int ret;
4505 #ifdef IWM_DEBUG
4506 	struct iwm_rx_packet *pkt;
4507 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4508 	struct iwm_mcc_update_resp *mcc_resp;
4509 	int n_channels;
4510 	uint16_t mcc;
4511 #endif
4512 	int resp_v2 = isset(sc->sc_enabled_capa,
4513 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4514 
4515 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4516 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4517 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4518 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4519 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4520 	else
4521 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4522 
4523 	if (resp_v2)
4524 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4525 	else
4526 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4527 
4528 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4529 	    "send MCC update to FW with '%c%c' src = %d\n",
4530 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4531 
4532 	ret = iwm_send_cmd(sc, &hcmd);
4533 	if (ret)
4534 		return ret;
4535 
4536 #ifdef IWM_DEBUG
4537 	pkt = hcmd.resp_pkt;
4538 
4539 	/* Extract MCC response */
4540 	if (resp_v2) {
4541 		mcc_resp = (void *)pkt->data;
4542 		mcc = mcc_resp->mcc;
4543 		n_channels =  le32toh(mcc_resp->n_channels);
4544 	} else {
4545 		mcc_resp_v1 = (void *)pkt->data;
4546 		mcc = mcc_resp_v1->mcc;
4547 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4548 	}
4549 
4550 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4551 	if (mcc == 0)
4552 		mcc = 0x3030;  /* "00" - world */
4553 
4554 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4555 	    "regulatory domain '%c%c' (%d channels available)\n",
4556 	    mcc >> 8, mcc & 0xff, n_channels);
4557 #endif
4558 	iwm_free_resp(sc, &hcmd);
4559 
4560 	return 0;
4561 }
4562 
4563 static void
4564 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4565 {
4566 	struct iwm_host_cmd cmd = {
4567 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4568 		.len = { sizeof(uint32_t), },
4569 		.data = { &backoff, },
4570 	};
4571 
4572 	if (iwm_send_cmd(sc, &cmd) != 0) {
4573 		device_printf(sc->sc_dev,
4574 		    "failed to change thermal tx backoff\n");
4575 	}
4576 }
4577 
4578 static int
4579 iwm_init_hw(struct iwm_softc *sc)
4580 {
4581 	struct ieee80211com *ic = &sc->sc_ic;
4582 	int error, i, ac;
4583 
4584 	if ((error = iwm_start_hw(sc)) != 0) {
4585 		kprintf("iwm_start_hw: failed %d\n", error);
4586 		return error;
4587 	}
4588 
4589 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4590 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4591 		return error;
4592 	}
4593 
4594 	/*
4595 	 * should stop and start HW since that INIT
4596 	 * image just loaded
4597 	 */
4598 	iwm_stop_device(sc);
4599 	if ((error = iwm_start_hw(sc)) != 0) {
4600 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4601 		return error;
4602 	}
4603 
4604 	/* omstart, this time with the regular firmware */
4605 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4606 	if (error) {
4607 		device_printf(sc->sc_dev, "could not load firmware\n");
4608 		goto error;
4609 	}
4610 
4611 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4612 		device_printf(sc->sc_dev, "bt init conf failed\n");
4613 		goto error;
4614 	}
4615 
4616 	if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4617 		device_printf(sc->sc_dev, "antenna config failed\n");
4618 		goto error;
4619 	}
4620 
4621 	/* Send phy db control command and then phy db calibration*/
4622 	if ((error = iwm_send_phy_db_data(sc)) != 0) {
4623 		device_printf(sc->sc_dev, "phy_db_data failed\n");
4624 		goto error;
4625 	}
4626 
4627 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4628 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4629 		goto error;
4630 	}
4631 
4632 	/* Add auxiliary station for scanning */
4633 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4634 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4635 		goto error;
4636 	}
4637 
4638 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4639 		/*
4640 		 * The channel used here isn't relevant as it's
4641 		 * going to be overwritten in the other flows.
4642 		 * For now use the first channel we have.
4643 		 */
4644 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4645 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4646 			goto error;
4647 	}
4648 
4649 	/* Initialize tx backoffs to the minimum. */
4650 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4651 		iwm_mvm_tt_tx_backoff(sc, 0);
4652 
4653 	error = iwm_mvm_power_update_device(sc);
4654 	if (error)
4655 		goto error;
4656 
4657 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4658 		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4659 			goto error;
4660 	}
4661 
4662 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4663 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4664 			goto error;
4665 	}
4666 
4667 	/* Enable Tx queues. */
4668 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4669 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4670 		    iwm_mvm_ac_to_tx_fifo[ac]);
4671 		if (error)
4672 			goto error;
4673 	}
4674 
4675 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4676 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4677 		goto error;
4678 	}
4679 
4680 	return 0;
4681 
4682  error:
4683 	iwm_stop_device(sc);
4684 	return error;
4685 }
4686 
4687 /* Allow multicast from our BSSID. */
4688 static int
4689 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4690 {
4691 	struct ieee80211_node *ni = vap->iv_bss;
4692 	struct iwm_mcast_filter_cmd *cmd;
4693 	size_t size;
4694 	int error;
4695 
4696 	size = roundup(sizeof(*cmd), 4);
4697 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4698 	if (cmd == NULL)
4699 		return ENOMEM;
4700 	cmd->filter_own = 1;
4701 	cmd->port_id = 0;
4702 	cmd->count = 0;
4703 	cmd->pass_all = 1;
4704 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4705 
4706 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4707 	    IWM_CMD_SYNC, size, cmd);
4708 	kfree(cmd, M_DEVBUF);
4709 
4710 	return (error);
4711 }
4712 
4713 /*
4714  * ifnet interfaces
4715  */
4716 
4717 static void
4718 iwm_init(struct iwm_softc *sc)
4719 {
4720 	int error;
4721 
4722 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4723 		return;
4724 	}
4725 	sc->sc_generation++;
4726 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4727 
4728 	if ((error = iwm_init_hw(sc)) != 0) {
4729 		kprintf("iwm_init_hw failed %d\n", error);
4730 		iwm_stop(sc);
4731 		return;
4732 	}
4733 
4734 	/*
4735 	 * Ok, firmware loaded and we are jogging
4736 	 */
4737 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4738 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4739 }
4740 
4741 static int
4742 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4743 {
4744 	struct iwm_softc *sc;
4745 	int error;
4746 
4747 	sc = ic->ic_softc;
4748 
4749 	IWM_LOCK(sc);
4750 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4751 		IWM_UNLOCK(sc);
4752 		return (ENXIO);
4753 	}
4754 	error = mbufq_enqueue(&sc->sc_snd, m);
4755 	if (error) {
4756 		IWM_UNLOCK(sc);
4757 		return (error);
4758 	}
4759 	iwm_start(sc);
4760 	IWM_UNLOCK(sc);
4761 	return (0);
4762 }
4763 
4764 /*
4765  * Dequeue packets from sendq and call send.
4766  */
4767 static void
4768 iwm_start(struct iwm_softc *sc)
4769 {
4770 	struct ieee80211_node *ni;
4771 	struct mbuf *m;
4772 	int ac = 0;
4773 
4774 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4775 	while (sc->qfullmsk == 0 &&
4776 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4777 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4778 		if (iwm_tx(sc, m, ni, ac) != 0) {
4779 			if_inc_counter(ni->ni_vap->iv_ifp,
4780 			    IFCOUNTER_OERRORS, 1);
4781 			ieee80211_free_node(ni);
4782 			continue;
4783 		}
4784 		sc->sc_tx_timer = 15;
4785 	}
4786 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4787 }
4788 
4789 static void
4790 iwm_stop(struct iwm_softc *sc)
4791 {
4792 
4793 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4794 	sc->sc_flags |= IWM_FLAG_STOPPED;
4795 	sc->sc_generation++;
4796 	iwm_led_blink_stop(sc);
4797 	sc->sc_tx_timer = 0;
4798 	iwm_stop_device(sc);
4799 }
4800 
4801 static void
4802 iwm_watchdog(void *arg)
4803 {
4804 	struct iwm_softc *sc = arg;
4805 
4806 	if (sc->sc_tx_timer > 0) {
4807 		if (--sc->sc_tx_timer == 0) {
4808 			device_printf(sc->sc_dev, "device timeout\n");
4809 #ifdef IWM_DEBUG
4810 			iwm_nic_error(sc);
4811 #endif
4812 			iwm_stop(sc);
4813 #if defined(__DragonFly__)
4814 			++sc->sc_ic.ic_oerrors;
4815 #else
4816 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4817 #endif
4818 			return;
4819 		}
4820 	}
4821 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4822 }
4823 
4824 static void
4825 iwm_parent(struct ieee80211com *ic)
4826 {
4827 	struct iwm_softc *sc = ic->ic_softc;
4828 	int startall = 0;
4829 
4830 	IWM_LOCK(sc);
4831 	if (ic->ic_nrunning > 0) {
4832 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4833 			iwm_init(sc);
4834 			startall = 1;
4835 		}
4836 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4837 		iwm_stop(sc);
4838 	IWM_UNLOCK(sc);
4839 	if (startall)
4840 		ieee80211_start_all(ic);
4841 }
4842 
4843 /*
4844  * The interrupt side of things
4845  */
4846 
4847 /*
4848  * error dumping routines are from iwlwifi/mvm/utils.c
4849  */
4850 
4851 /*
4852  * Note: This structure is read from the device with IO accesses,
4853  * and the reading already does the endian conversion. As it is
4854  * read with uint32_t-sized accesses, any members with a different size
4855  * need to be ordered correctly though!
4856  */
4857 struct iwm_error_event_table {
4858 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4859 	uint32_t error_id;		/* type of error */
4860 	uint32_t trm_hw_status0;	/* TRM HW status */
4861 	uint32_t trm_hw_status1;	/* TRM HW status */
4862 	uint32_t blink2;		/* branch link */
4863 	uint32_t ilink1;		/* interrupt link */
4864 	uint32_t ilink2;		/* interrupt link */
4865 	uint32_t data1;		/* error-specific data */
4866 	uint32_t data2;		/* error-specific data */
4867 	uint32_t data3;		/* error-specific data */
4868 	uint32_t bcon_time;		/* beacon timer */
4869 	uint32_t tsf_low;		/* network timestamp function timer */
4870 	uint32_t tsf_hi;		/* network timestamp function timer */
4871 	uint32_t gp1;		/* GP1 timer register */
4872 	uint32_t gp2;		/* GP2 timer register */
4873 	uint32_t fw_rev_type;	/* firmware revision type */
4874 	uint32_t major;		/* uCode version major */
4875 	uint32_t minor;		/* uCode version minor */
4876 	uint32_t hw_ver;		/* HW Silicon version */
4877 	uint32_t brd_ver;		/* HW board version */
4878 	uint32_t log_pc;		/* log program counter */
4879 	uint32_t frame_ptr;		/* frame pointer */
4880 	uint32_t stack_ptr;		/* stack pointer */
4881 	uint32_t hcmd;		/* last host command header */
4882 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4883 				 * rxtx_flag */
4884 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4885 				 * host_flag */
4886 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4887 				 * enc_flag */
4888 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4889 				 * time_flag */
4890 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4891 				 * wico interrupt */
4892 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4893 	uint32_t wait_event;		/* wait event() caller address */
4894 	uint32_t l2p_control;	/* L2pControlField */
4895 	uint32_t l2p_duration;	/* L2pDurationField */
4896 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4897 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4898 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4899 				 * (LMPM_PMG_SEL) */
4900 	uint32_t u_timestamp;	/* indicate when the date and time of the
4901 				 * compilation */
4902 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4903 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4904 
4905 /*
4906  * UMAC error struct - relevant starting from family 8000 chip.
4907  * Note: This structure is read from the device with IO accesses,
4908  * and the reading already does the endian conversion. As it is
4909  * read with u32-sized accesses, any members with a different size
4910  * need to be ordered correctly though!
4911  */
4912 struct iwm_umac_error_event_table {
4913 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4914 	uint32_t error_id;	/* type of error */
4915 	uint32_t blink1;	/* branch link */
4916 	uint32_t blink2;	/* branch link */
4917 	uint32_t ilink1;	/* interrupt link */
4918 	uint32_t ilink2;	/* interrupt link */
4919 	uint32_t data1;		/* error-specific data */
4920 	uint32_t data2;		/* error-specific data */
4921 	uint32_t data3;		/* error-specific data */
4922 	uint32_t umac_major;
4923 	uint32_t umac_minor;
4924 	uint32_t frame_pointer;	/* core register 27*/
4925 	uint32_t stack_pointer;	/* core register 28 */
4926 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4927 	uint32_t nic_isr_pref;	/* ISR status register */
4928 } __packed;
4929 
4930 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4931 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4932 
4933 #ifdef IWM_DEBUG
4934 struct {
4935 	const char *name;
4936 	uint8_t num;
4937 } advanced_lookup[] = {
4938 	{ "NMI_INTERRUPT_WDG", 0x34 },
4939 	{ "SYSASSERT", 0x35 },
4940 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4941 	{ "BAD_COMMAND", 0x38 },
4942 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4943 	{ "FATAL_ERROR", 0x3D },
4944 	{ "NMI_TRM_HW_ERR", 0x46 },
4945 	{ "NMI_INTERRUPT_TRM", 0x4C },
4946 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4947 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4948 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4949 	{ "NMI_INTERRUPT_HOST", 0x66 },
4950 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4951 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4952 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4953 	{ "ADVANCED_SYSASSERT", 0 },
4954 };
4955 
4956 static const char *
4957 iwm_desc_lookup(uint32_t num)
4958 {
4959 	int i;
4960 
4961 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4962 		if (advanced_lookup[i].num == num)
4963 			return advanced_lookup[i].name;
4964 
4965 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4966 	return advanced_lookup[i].name;
4967 }
4968 
4969 static void
4970 iwm_nic_umac_error(struct iwm_softc *sc)
4971 {
4972 	struct iwm_umac_error_event_table table;
4973 	uint32_t base;
4974 
4975 	base = sc->sc_uc.uc_umac_error_event_table;
4976 
4977 	if (base < 0x800000) {
4978 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4979 		    base);
4980 		return;
4981 	}
4982 
4983 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4984 		device_printf(sc->sc_dev, "reading errlog failed\n");
4985 		return;
4986 	}
4987 
4988 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4989 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4990 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4991 		    sc->sc_flags, table.valid);
4992 	}
4993 
4994 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4995 		iwm_desc_lookup(table.error_id));
4996 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4997 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4998 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4999 	    table.ilink1);
5000 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5001 	    table.ilink2);
5002 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5003 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5004 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5005 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5006 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5007 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5008 	    table.frame_pointer);
5009 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5010 	    table.stack_pointer);
5011 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5012 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5013 	    table.nic_isr_pref);
5014 }
5015 
5016 /*
5017  * Support for dumping the error log seemed like a good idea ...
5018  * but it's mostly hex junk and the only sensible thing is the
5019  * hw/ucode revision (which we know anyway).  Since it's here,
5020  * I'll just leave it in, just in case e.g. the Intel guys want to
5021  * help us decipher some "ADVANCED_SYSASSERT" later.
5022  */
5023 static void
5024 iwm_nic_error(struct iwm_softc *sc)
5025 {
5026 	struct iwm_error_event_table table;
5027 	uint32_t base;
5028 
5029 	device_printf(sc->sc_dev, "dumping device error log\n");
5030 	base = sc->sc_uc.uc_error_event_table;
5031 	if (base < 0x800000) {
5032 		device_printf(sc->sc_dev,
5033 		    "Invalid error log pointer 0x%08x\n", base);
5034 		return;
5035 	}
5036 
5037 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5038 		device_printf(sc->sc_dev, "reading errlog failed\n");
5039 		return;
5040 	}
5041 
5042 	if (!table.valid) {
5043 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5044 		return;
5045 	}
5046 
5047 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5048 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5049 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5050 		    sc->sc_flags, table.valid);
5051 	}
5052 
5053 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5054 	    iwm_desc_lookup(table.error_id));
5055 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5056 	    table.trm_hw_status0);
5057 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5058 	    table.trm_hw_status1);
5059 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5060 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5061 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5062 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5063 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5064 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5065 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5066 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5067 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5068 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5069 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5070 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5071 	    table.fw_rev_type);
5072 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5073 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5074 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5075 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5076 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5077 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5078 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5079 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5080 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5081 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5082 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5083 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5084 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5085 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5086 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5087 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5088 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5089 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5090 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5091 
5092 	if (sc->sc_uc.uc_umac_error_event_table)
5093 		iwm_nic_umac_error(sc);
5094 }
5095 #endif
5096 
5097 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
5098 do {									\
5099 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5100 	_var_ = (void *)((_pkt_)+1);					\
5101 } while (/*CONSTCOND*/0)
5102 
5103 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
5104 do {									\
5105 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5106 	_ptr_ = (void *)((_pkt_)+1);					\
5107 } while (/*CONSTCOND*/0)
5108 
5109 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5110 
5111 /*
5112  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5113  * Basic structure from if_iwn
5114  */
5115 static void
5116 iwm_notif_intr(struct iwm_softc *sc)
5117 {
5118 	uint16_t hw;
5119 
5120 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5121 	    BUS_DMASYNC_POSTREAD);
5122 
5123 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5124 
5125 	/*
5126 	 * Process responses
5127 	 */
5128 	while (sc->rxq.cur != hw) {
5129 		struct iwm_rx_ring *ring = &sc->rxq;
5130 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5131 		struct iwm_rx_packet *pkt;
5132 		struct iwm_cmd_response *cresp;
5133 		int qid, idx, code;
5134 
5135 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5136 		    BUS_DMASYNC_POSTREAD);
5137 		pkt = mtod(data->m, struct iwm_rx_packet *);
5138 
5139 		qid = pkt->hdr.qid & ~0x80;
5140 		idx = pkt->hdr.idx;
5141 
5142 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5143 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5144 		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5145 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5146 
5147 		/*
5148 		 * randomly get these from the firmware, no idea why.
5149 		 * they at least seem harmless, so just ignore them for now
5150 		 */
5151 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5152 		    || pkt->len_n_flags == htole32(0x55550000))) {
5153 			ADVANCE_RXQ(sc);
5154 			continue;
5155 		}
5156 
5157 		switch (code) {
5158 		case IWM_REPLY_RX_PHY_CMD:
5159 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5160 			break;
5161 
5162 		case IWM_REPLY_RX_MPDU_CMD:
5163 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5164 			break;
5165 
5166 		case IWM_TX_CMD:
5167 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5168 			break;
5169 
5170 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5171 			struct iwm_missed_beacons_notif *resp;
5172 			int missed;
5173 
5174 			/* XXX look at mac_id to determine interface ID */
5175 			struct ieee80211com *ic = &sc->sc_ic;
5176 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5177 
5178 			SYNC_RESP_STRUCT(resp, pkt);
5179 			missed = le32toh(resp->consec_missed_beacons);
5180 
5181 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5182 			    "%s: MISSED_BEACON: mac_id=%d, "
5183 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5184 			    "num_rx=%d\n",
5185 			    __func__,
5186 			    le32toh(resp->mac_id),
5187 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5188 			    le32toh(resp->consec_missed_beacons),
5189 			    le32toh(resp->num_expected_beacons),
5190 			    le32toh(resp->num_recvd_beacons));
5191 
5192 			/* Be paranoid */
5193 			if (vap == NULL)
5194 				break;
5195 
5196 			/* XXX no net80211 locking? */
5197 			if (vap->iv_state == IEEE80211_S_RUN &&
5198 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5199 				if (missed > vap->iv_bmissthreshold) {
5200 					/* XXX bad locking; turn into task */
5201 					IWM_UNLOCK(sc);
5202 					ieee80211_beacon_miss(ic);
5203 					IWM_LOCK(sc);
5204 				}
5205 			}
5206 
5207 			break; }
5208 
5209 		case IWM_MFUART_LOAD_NOTIFICATION:
5210 			break;
5211 
5212 		case IWM_MVM_ALIVE: {
5213 			struct iwm_mvm_alive_resp_v1 *resp1;
5214 			struct iwm_mvm_alive_resp_v2 *resp2;
5215 			struct iwm_mvm_alive_resp_v3 *resp3;
5216 
5217 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5218 				SYNC_RESP_STRUCT(resp1, pkt);
5219 				sc->sc_uc.uc_error_event_table
5220 				    = le32toh(resp1->error_event_table_ptr);
5221 				sc->sc_uc.uc_log_event_table
5222 				    = le32toh(resp1->log_event_table_ptr);
5223 				sc->sched_base = le32toh(resp1->scd_base_ptr);
5224 				if (resp1->status == IWM_ALIVE_STATUS_OK)
5225 					sc->sc_uc.uc_ok = 1;
5226 				else
5227 					sc->sc_uc.uc_ok = 0;
5228 			}
5229 
5230 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5231 				SYNC_RESP_STRUCT(resp2, pkt);
5232 				sc->sc_uc.uc_error_event_table
5233 				    = le32toh(resp2->error_event_table_ptr);
5234 				sc->sc_uc.uc_log_event_table
5235 				    = le32toh(resp2->log_event_table_ptr);
5236 				sc->sched_base = le32toh(resp2->scd_base_ptr);
5237 				sc->sc_uc.uc_umac_error_event_table
5238 				    = le32toh(resp2->error_info_addr);
5239 				if (resp2->status == IWM_ALIVE_STATUS_OK)
5240 					sc->sc_uc.uc_ok = 1;
5241 				else
5242 					sc->sc_uc.uc_ok = 0;
5243 			}
5244 
5245 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5246 				SYNC_RESP_STRUCT(resp3, pkt);
5247 				sc->sc_uc.uc_error_event_table
5248 				    = le32toh(resp3->error_event_table_ptr);
5249 				sc->sc_uc.uc_log_event_table
5250 				    = le32toh(resp3->log_event_table_ptr);
5251 				sc->sched_base = le32toh(resp3->scd_base_ptr);
5252 				sc->sc_uc.uc_umac_error_event_table
5253 				    = le32toh(resp3->error_info_addr);
5254 				if (resp3->status == IWM_ALIVE_STATUS_OK)
5255 					sc->sc_uc.uc_ok = 1;
5256 				else
5257 					sc->sc_uc.uc_ok = 0;
5258 			}
5259 
5260 			sc->sc_uc.uc_intr = 1;
5261 			wakeup(&sc->sc_uc);
5262 			break; }
5263 
5264 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
5265 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
5266 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
5267 
5268 			iwm_phy_db_set_section(sc, phy_db_notif);
5269 
5270 			break; }
5271 
5272 		case IWM_STATISTICS_NOTIFICATION: {
5273 			struct iwm_notif_statistics *stats;
5274 			SYNC_RESP_STRUCT(stats, pkt);
5275 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5276 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
5277 			break; }
5278 
5279 		case IWM_NVM_ACCESS_CMD:
5280 		case IWM_MCC_UPDATE_CMD:
5281 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5282 				bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5283 				    BUS_DMASYNC_POSTREAD);
5284 				memcpy(sc->sc_cmd_resp,
5285 				    pkt, sizeof(sc->sc_cmd_resp));
5286 			}
5287 			break;
5288 
5289 		case IWM_MCC_CHUB_UPDATE_CMD: {
5290 			struct iwm_mcc_chub_notif *notif;
5291 			SYNC_RESP_STRUCT(notif, pkt);
5292 
5293 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5294 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5295 			sc->sc_fw_mcc[2] = '\0';
5296 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5297 			    "fw source %d sent CC '%s'\n",
5298 			    notif->source_id, sc->sc_fw_mcc);
5299 			break; }
5300 
5301 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5302 			break;
5303 
5304 		case IWM_PHY_CONFIGURATION_CMD:
5305 		case IWM_TX_ANT_CONFIGURATION_CMD:
5306 		case IWM_ADD_STA:
5307 		case IWM_MAC_CONTEXT_CMD:
5308 		case IWM_REPLY_SF_CFG_CMD:
5309 		case IWM_POWER_TABLE_CMD:
5310 		case IWM_PHY_CONTEXT_CMD:
5311 		case IWM_BINDING_CONTEXT_CMD:
5312 		case IWM_TIME_EVENT_CMD:
5313 		case IWM_SCAN_REQUEST_CMD:
5314 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5315 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5316 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5317 		case IWM_REPLY_BEACON_FILTERING_CMD:
5318 		case IWM_MAC_PM_POWER_TABLE:
5319 		case IWM_TIME_QUOTA_CMD:
5320 		case IWM_REMOVE_STA:
5321 		case IWM_TXPATH_FLUSH:
5322 		case IWM_LQ_CMD:
5323 		case IWM_BT_CONFIG:
5324 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5325 			SYNC_RESP_STRUCT(cresp, pkt);
5326 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5327 				memcpy(sc->sc_cmd_resp,
5328 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5329 			}
5330 			break;
5331 
5332 		/* ignore */
5333 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5334 			break;
5335 
5336 		case IWM_INIT_COMPLETE_NOTIF:
5337 			sc->sc_init_complete = 1;
5338 			wakeup(&sc->sc_init_complete);
5339 			break;
5340 
5341 		case IWM_SCAN_OFFLOAD_COMPLETE: {
5342 			struct iwm_periodic_scan_complete *notif;
5343 			SYNC_RESP_STRUCT(notif, pkt);
5344 
5345 			break; }
5346 
5347 		case IWM_SCAN_ITERATION_COMPLETE: {
5348 			struct iwm_lmac_scan_complete_notif *notif;
5349 			SYNC_RESP_STRUCT(notif, pkt);
5350 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5351 			break; }
5352 
5353 		case IWM_SCAN_COMPLETE_UMAC: {
5354 			struct iwm_umac_scan_complete *notif;
5355 			SYNC_RESP_STRUCT(notif, pkt);
5356 
5357 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5358 			    "UMAC scan complete, status=0x%x\n",
5359 			    notif->status);
5360 #if 0	/* XXX This would be a duplicate scan end call */
5361 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5362 #endif
5363 			break;
5364 		}
5365 
5366 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5367 			struct iwm_umac_scan_iter_complete_notif *notif;
5368 			SYNC_RESP_STRUCT(notif, pkt);
5369 
5370 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5371 			    "complete, status=0x%x, %d channels scanned\n",
5372 			    notif->status, notif->scanned_channels);
5373 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5374 			break;
5375 		}
5376 
5377 		case IWM_REPLY_ERROR: {
5378 			struct iwm_error_resp *resp;
5379 			SYNC_RESP_STRUCT(resp, pkt);
5380 
5381 			device_printf(sc->sc_dev,
5382 			    "firmware error 0x%x, cmd 0x%x\n",
5383 			    le32toh(resp->error_type),
5384 			    resp->cmd_id);
5385 			break; }
5386 
5387 		case IWM_TIME_EVENT_NOTIFICATION: {
5388 			struct iwm_time_event_notif *notif;
5389 			SYNC_RESP_STRUCT(notif, pkt);
5390 
5391 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5392 			    "TE notif status = 0x%x action = 0x%x\n",
5393 			    notif->status, notif->action);
5394 			break; }
5395 
5396 		case IWM_MCAST_FILTER_CMD:
5397 			break;
5398 
5399 		case IWM_SCD_QUEUE_CFG: {
5400 			struct iwm_scd_txq_cfg_rsp *rsp;
5401 			SYNC_RESP_STRUCT(rsp, pkt);
5402 
5403 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5404 			    "queue cfg token=0x%x sta_id=%d "
5405 			    "tid=%d scd_queue=%d\n",
5406 			    rsp->token, rsp->sta_id, rsp->tid,
5407 			    rsp->scd_queue);
5408 			break;
5409 		}
5410 
5411 		default:
5412 			device_printf(sc->sc_dev,
5413 			    "frame %d/%d %x UNHANDLED (this should "
5414 			    "not happen)\n", qid, idx,
5415 			    pkt->len_n_flags);
5416 			break;
5417 		}
5418 
5419 		/*
5420 		 * Why test bit 0x80?  The Linux driver:
5421 		 *
5422 		 * There is one exception:  uCode sets bit 15 when it
5423 		 * originates the response/notification, i.e. when the
5424 		 * response/notification is not a direct response to a
5425 		 * command sent by the driver.  For example, uCode issues
5426 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5427 		 * it is not a direct response to any driver command.
5428 		 *
5429 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5430 		 * uses a slightly different format for pkt->hdr, and "qid"
5431 		 * is actually the upper byte of a two-byte field.
5432 		 */
5433 		if (!(pkt->hdr.qid & (1 << 7))) {
5434 			iwm_cmd_done(sc, pkt);
5435 		}
5436 
5437 		ADVANCE_RXQ(sc);
5438 	}
5439 
5440 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5441 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5442 
5443 	/*
5444 	 * Tell the firmware what we have processed.
5445 	 * Seems like the hardware gets upset unless we align
5446 	 * the write by 8??
5447 	 */
5448 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5449 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5450 }
5451 
5452 static void
5453 iwm_intr(void *arg)
5454 {
5455 	struct iwm_softc *sc = arg;
5456 	int handled = 0;
5457 	int r1, r2, rv = 0;
5458 	int isperiodic = 0;
5459 
5460 #if defined(__DragonFly__)
5461 	if (sc->sc_mem == NULL) {
5462 		kprintf("iwm_intr: detached\n");
5463 		return;
5464 	}
5465 #endif
5466 	IWM_LOCK(sc);
5467 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5468 
5469 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5470 		uint32_t *ict = sc->ict_dma.vaddr;
5471 		int tmp;
5472 
5473 		tmp = htole32(ict[sc->ict_cur]);
5474 		if (!tmp)
5475 			goto out_ena;
5476 
5477 		/*
5478 		 * ok, there was something.  keep plowing until we have all.
5479 		 */
5480 		r1 = r2 = 0;
5481 		while (tmp) {
5482 			r1 |= tmp;
5483 			ict[sc->ict_cur] = 0;
5484 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5485 			tmp = htole32(ict[sc->ict_cur]);
5486 		}
5487 
5488 		/* this is where the fun begins.  don't ask */
5489 		if (r1 == 0xffffffff)
5490 			r1 = 0;
5491 
5492 		/* i am not expected to understand this */
5493 		if (r1 & 0xc0000)
5494 			r1 |= 0x8000;
5495 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5496 	} else {
5497 		r1 = IWM_READ(sc, IWM_CSR_INT);
5498 		/* "hardware gone" (where, fishing?) */
5499 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5500 			goto out;
5501 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5502 	}
5503 	if (r1 == 0 && r2 == 0) {
5504 		goto out_ena;
5505 	}
5506 
5507 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5508 
5509 	/* ignored */
5510 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5511 
5512 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5513 		int i;
5514 		struct ieee80211com *ic = &sc->sc_ic;
5515 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5516 
5517 #ifdef IWM_DEBUG
5518 		iwm_nic_error(sc);
5519 #endif
5520 		/* Dump driver status (TX and RX rings) while we're here. */
5521 		device_printf(sc->sc_dev, "driver status:\n");
5522 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5523 			struct iwm_tx_ring *ring = &sc->txq[i];
5524 			device_printf(sc->sc_dev,
5525 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5526 			    "queued=%-3d\n",
5527 			    i, ring->qid, ring->cur, ring->queued);
5528 		}
5529 		device_printf(sc->sc_dev,
5530 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5531 		device_printf(sc->sc_dev,
5532 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5533 
5534 		/* Don't stop the device; just do a VAP restart */
5535 		IWM_UNLOCK(sc);
5536 
5537 		if (vap == NULL) {
5538 			kprintf("%s: null vap\n", __func__);
5539 			return;
5540 		}
5541 
5542 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5543 		    "restarting\n", __func__, vap->iv_state);
5544 
5545 		/* XXX TODO: turn this into a callout/taskqueue */
5546 		ieee80211_restart_all(ic);
5547 		return;
5548 	}
5549 
5550 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5551 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5552 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5553 		iwm_stop(sc);
5554 		rv = 1;
5555 		goto out;
5556 	}
5557 
5558 	/* firmware chunk loaded */
5559 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5560 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5561 		handled |= IWM_CSR_INT_BIT_FH_TX;
5562 		sc->sc_fw_chunk_done = 1;
5563 		wakeup(&sc->sc_fw);
5564 	}
5565 
5566 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5567 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5568 		if (iwm_check_rfkill(sc)) {
5569 			device_printf(sc->sc_dev,
5570 			    "%s: rfkill switch, disabling interface\n",
5571 			    __func__);
5572 			iwm_stop(sc);
5573 		}
5574 	}
5575 
5576 	/*
5577 	 * The Linux driver uses periodic interrupts to avoid races.
5578 	 * We cargo-cult like it's going out of fashion.
5579 	 */
5580 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5581 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5582 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5583 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5584 			IWM_WRITE_1(sc,
5585 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5586 		isperiodic = 1;
5587 	}
5588 
5589 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5590 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5591 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5592 
5593 		iwm_notif_intr(sc);
5594 
5595 		/* enable periodic interrupt, see above */
5596 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5597 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5598 			    IWM_CSR_INT_PERIODIC_ENA);
5599 	}
5600 
5601 	if (__predict_false(r1 & ~handled))
5602 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5603 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5604 	rv = 1;
5605 
5606  out_ena:
5607 	iwm_restore_interrupts(sc);
5608  out:
5609 	IWM_UNLOCK(sc);
5610 	return;
5611 }
5612 
5613 /*
5614  * Autoconf glue-sniffing
5615  */
5616 #define	PCI_VENDOR_INTEL		0x8086
5617 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5618 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5619 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5620 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5621 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5622 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5623 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5624 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5625 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5626 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5627 
5628 static const struct iwm_devices {
5629 	uint16_t	device;
5630 	const char	*name;
5631 } iwm_devices[] = {
5632 	{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5633 	{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5634 	{ PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5635 	{ PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5636 	{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5637 	{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5638 	{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5639 	{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5640 	{ PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5641 	{ PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5642 };
5643 
5644 static int
5645 iwm_probe(device_t dev)
5646 {
5647 	int i;
5648 
5649 	for (i = 0; i < nitems(iwm_devices); i++) {
5650 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5651 		    pci_get_device(dev) == iwm_devices[i].device) {
5652 			device_set_desc(dev, iwm_devices[i].name);
5653 			return (BUS_PROBE_DEFAULT);
5654 		}
5655 	}
5656 
5657 	return (ENXIO);
5658 }
5659 
5660 static int
5661 iwm_dev_check(device_t dev)
5662 {
5663 	struct iwm_softc *sc;
5664 
5665 	sc = device_get_softc(dev);
5666 
5667 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5668 	switch (pci_get_device(dev)) {
5669 	case PCI_PRODUCT_INTEL_WL_3160_1:
5670 	case PCI_PRODUCT_INTEL_WL_3160_2:
5671 		sc->sc_fwname = "iwm3160fw";
5672 		sc->host_interrupt_operation_mode = 1;
5673 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5674 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5675 		return (0);
5676 	case PCI_PRODUCT_INTEL_WL_3165_1:
5677 	case PCI_PRODUCT_INTEL_WL_3165_2:
5678 		sc->sc_fwname = "iwm7265fw";
5679 		sc->host_interrupt_operation_mode = 0;
5680 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5681 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5682 		return (0);
5683 	case PCI_PRODUCT_INTEL_WL_7260_1:
5684 	case PCI_PRODUCT_INTEL_WL_7260_2:
5685 		sc->sc_fwname = "iwm7260fw";
5686 		sc->host_interrupt_operation_mode = 1;
5687 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5688 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5689 		return (0);
5690 	case PCI_PRODUCT_INTEL_WL_7265_1:
5691 	case PCI_PRODUCT_INTEL_WL_7265_2:
5692 		sc->sc_fwname = "iwm7265fw";
5693 		sc->host_interrupt_operation_mode = 0;
5694 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5695 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5696 		return (0);
5697 	case PCI_PRODUCT_INTEL_WL_8260_1:
5698 	case PCI_PRODUCT_INTEL_WL_8260_2:
5699 		sc->sc_fwname = "iwm8000Cfw";
5700 		sc->host_interrupt_operation_mode = 0;
5701 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5702 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5703 		return (0);
5704 	default:
5705 		device_printf(dev, "unknown adapter type\n");
5706 		return ENXIO;
5707 	}
5708 }
5709 
5710 static int
5711 iwm_pci_attach(device_t dev)
5712 {
5713 	struct iwm_softc *sc;
5714 	int count, error, rid;
5715 	uint16_t reg;
5716 #if defined(__DragonFly__)
5717 	int irq_flags;
5718 #endif
5719 
5720 	sc = device_get_softc(dev);
5721 
5722 	/* Clear device-specific "PCI retry timeout" register (41h). */
5723 	reg = pci_read_config(dev, 0x40, sizeof(reg));
5724 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5725 
5726 	/* Enable bus-mastering and hardware bug workaround. */
5727 	pci_enable_busmaster(dev);
5728 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5729 	/* if !MSI */
5730 	if (reg & PCIM_STATUS_INTxSTATE) {
5731 		reg &= ~PCIM_STATUS_INTxSTATE;
5732 	}
5733 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5734 
5735 	rid = PCIR_BAR(0);
5736 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5737 	    RF_ACTIVE);
5738 	if (sc->sc_mem == NULL) {
5739 		device_printf(sc->sc_dev, "can't map mem space\n");
5740 		return (ENXIO);
5741 	}
5742 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5743 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5744 
5745 	/* Install interrupt handler. */
5746 	count = 1;
5747 	rid = 0;
5748 #if defined(__DragonFly__)
5749 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5750 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5751 #else
5752 	if (pci_alloc_msi(dev, &count) == 0)
5753 		rid = 1;
5754 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5755 	    (rid != 0 ? 0 : RF_SHAREABLE));
5756 #endif
5757 	if (sc->sc_irq == NULL) {
5758 		device_printf(dev, "can't map interrupt\n");
5759 			return (ENXIO);
5760 	}
5761 #if defined(__DragonFly__)
5762 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5763 			       iwm_intr, sc, &sc->sc_ih,
5764 			       &wlan_global_serializer);
5765 #else
5766 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5767 	    NULL, iwm_intr, sc, &sc->sc_ih);
5768 #endif
5769 	if (sc->sc_ih == NULL) {
5770 		device_printf(dev, "can't establish interrupt");
5771 #if defined(__DragonFly__)
5772                 pci_release_msi(dev);
5773 #endif
5774 			return (ENXIO);
5775 	}
5776 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5777 
5778 	return (0);
5779 }
5780 
5781 static void
5782 iwm_pci_detach(device_t dev)
5783 {
5784 	struct iwm_softc *sc = device_get_softc(dev);
5785 
5786 	if (sc->sc_irq != NULL) {
5787 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5788 		bus_release_resource(dev, SYS_RES_IRQ,
5789 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5790 		pci_release_msi(dev);
5791 #if defined(__DragonFly__)
5792 		sc->sc_irq = NULL;
5793 #endif
5794         }
5795 	if (sc->sc_mem != NULL) {
5796 		bus_release_resource(dev, SYS_RES_MEMORY,
5797 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5798 #if defined(__DragonFly__)
5799 		sc->sc_mem = NULL;
5800 #endif
5801 	}
5802 }
5803 
5804 
5805 
5806 static int
5807 iwm_attach(device_t dev)
5808 {
5809 	struct iwm_softc *sc = device_get_softc(dev);
5810 	struct ieee80211com *ic = &sc->sc_ic;
5811 	int error;
5812 	int txq_i, i;
5813 
5814 	sc->sc_dev = dev;
5815 	IWM_LOCK_INIT(sc);
5816 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5817 #if defined(__DragonFly__)
5818 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5819 #else
5820 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5821 #endif
5822 	callout_init(&sc->sc_led_blink_to);
5823 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5824 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5825             taskqueue_thread_enqueue, &sc->sc_tq);
5826 #if defined(__DragonFly__)
5827 	error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5828 					-1, "iwm_taskq");
5829 #else
5830         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5831 #endif
5832         if (error != 0) {
5833                 device_printf(dev, "can't start threads, error %d\n",
5834 		    error);
5835 		goto fail;
5836         }
5837 
5838 	/* PCI attach */
5839 	error = iwm_pci_attach(dev);
5840 	if (error != 0)
5841 		goto fail;
5842 
5843 	sc->sc_wantresp = -1;
5844 
5845 	/* Check device type */
5846 	error = iwm_dev_check(dev);
5847 	if (error != 0)
5848 		goto fail;
5849 
5850 	/*
5851 	 * We now start fiddling with the hardware
5852 	 */
5853 	/*
5854 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5855 	 * changed, and now the revision step also includes bit 0-1 (no more
5856 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5857 	 * in the old format.
5858 	 */
5859 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5860 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5861 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5862 
5863 	if (iwm_prepare_card_hw(sc) != 0) {
5864 		device_printf(dev, "could not initialize hardware\n");
5865 		goto fail;
5866 	}
5867 
5868 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5869 		int ret;
5870 		uint32_t hw_step;
5871 
5872 		/*
5873 		 * In order to recognize C step the driver should read the
5874 		 * chip version id located at the AUX bus MISC address.
5875 		 */
5876 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5877 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5878 		DELAY(2);
5879 
5880 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5881 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5882 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5883 				   25000);
5884 		if (ret < 0) {
5885 			device_printf(sc->sc_dev,
5886 			    "Failed to wake up the nic\n");
5887 			goto fail;
5888 		}
5889 
5890 		if (iwm_nic_lock(sc)) {
5891 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5892 			hw_step |= IWM_ENABLE_WFPM;
5893 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5894 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5895 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5896 			if (hw_step == 0x3)
5897 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5898 						(IWM_SILICON_C_STEP << 2);
5899 			iwm_nic_unlock(sc);
5900 		} else {
5901 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5902 			goto fail;
5903 		}
5904 	}
5905 
5906 	/* Allocate DMA memory for firmware transfers. */
5907 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5908 		device_printf(dev, "could not allocate memory for firmware\n");
5909 		goto fail;
5910 	}
5911 
5912 	/* Allocate "Keep Warm" page. */
5913 	if ((error = iwm_alloc_kw(sc)) != 0) {
5914 		device_printf(dev, "could not allocate keep warm page\n");
5915 		goto fail;
5916 	}
5917 
5918 	/* We use ICT interrupts */
5919 	if ((error = iwm_alloc_ict(sc)) != 0) {
5920 		device_printf(dev, "could not allocate ICT table\n");
5921 		goto fail;
5922 	}
5923 
5924 	/* Allocate TX scheduler "rings". */
5925 	if ((error = iwm_alloc_sched(sc)) != 0) {
5926 		device_printf(dev, "could not allocate TX scheduler rings\n");
5927 		goto fail;
5928 	}
5929 
5930 	/* Allocate TX rings */
5931 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5932 		if ((error = iwm_alloc_tx_ring(sc,
5933 		    &sc->txq[txq_i], txq_i)) != 0) {
5934 			device_printf(dev,
5935 			    "could not allocate TX ring %d\n",
5936 			    txq_i);
5937 			goto fail;
5938 		}
5939 	}
5940 
5941 	/* Allocate RX ring. */
5942 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5943 		device_printf(dev, "could not allocate RX ring\n");
5944 		goto fail;
5945 	}
5946 
5947 	/* Clear pending interrupts. */
5948 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5949 
5950 	ic->ic_softc = sc;
5951 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5952 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5953 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5954 
5955 	/* Set device capabilities. */
5956 	ic->ic_caps =
5957 	    IEEE80211_C_STA |
5958 	    IEEE80211_C_WPA |		/* WPA/RSN */
5959 	    IEEE80211_C_WME |
5960 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5961 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5962 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5963 	    ;
5964 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5965 		sc->sc_phyctxt[i].id = i;
5966 		sc->sc_phyctxt[i].color = 0;
5967 		sc->sc_phyctxt[i].ref = 0;
5968 		sc->sc_phyctxt[i].channel = NULL;
5969 	}
5970 
5971 	/* Max RSSI */
5972 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5973 	sc->sc_preinit_hook.ich_func = iwm_preinit;
5974 	sc->sc_preinit_hook.ich_arg = sc;
5975 	sc->sc_preinit_hook.ich_desc = "iwm";
5976 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5977 		device_printf(dev, "config_intrhook_establish failed\n");
5978 		goto fail;
5979 	}
5980 
5981 #ifdef IWM_DEBUG
5982 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5983 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5984 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5985 #endif
5986 
5987 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5988 	    "<-%s\n", __func__);
5989 
5990 	return 0;
5991 
5992 	/* Free allocated memory if something failed during attachment. */
5993 fail:
5994 	iwm_detach_local(sc, 0);
5995 
5996 	return ENXIO;
5997 }
5998 
5999 static int
6000 iwm_is_valid_ether_addr(uint8_t *addr)
6001 {
6002 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6003 
6004 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6005 		return (FALSE);
6006 
6007 	return (TRUE);
6008 }
6009 
6010 static int
6011 iwm_update_edca(struct ieee80211com *ic)
6012 {
6013 	struct iwm_softc *sc = ic->ic_softc;
6014 
6015 	device_printf(sc->sc_dev, "%s: called\n", __func__);
6016 	return (0);
6017 }
6018 
6019 static void
6020 iwm_preinit(void *arg)
6021 {
6022 	struct iwm_softc *sc = arg;
6023 	device_t dev = sc->sc_dev;
6024 	struct ieee80211com *ic = &sc->sc_ic;
6025 	int error;
6026 
6027 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6028 	    "->%s\n", __func__);
6029 
6030 	IWM_LOCK(sc);
6031 	if ((error = iwm_start_hw(sc)) != 0) {
6032 		device_printf(dev, "could not initialize hardware\n");
6033 		IWM_UNLOCK(sc);
6034 		goto fail;
6035 	}
6036 
6037 	error = iwm_run_init_mvm_ucode(sc, 1);
6038 	iwm_stop_device(sc);
6039 	if (error) {
6040 		IWM_UNLOCK(sc);
6041 		goto fail;
6042 	}
6043 	device_printf(dev,
6044 	    "hw rev 0x%x, fw ver %s, address %s\n",
6045 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6046 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
6047 
6048 	/* not all hardware can do 5GHz band */
6049 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6050 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6051 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6052 	IWM_UNLOCK(sc);
6053 
6054 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6055 	    ic->ic_channels);
6056 
6057 	/*
6058 	 * At this point we've committed - if we fail to do setup,
6059 	 * we now also have to tear down the net80211 state.
6060 	 */
6061 	ieee80211_ifattach(ic);
6062 	ic->ic_vap_create = iwm_vap_create;
6063 	ic->ic_vap_delete = iwm_vap_delete;
6064 	ic->ic_raw_xmit = iwm_raw_xmit;
6065 	ic->ic_node_alloc = iwm_node_alloc;
6066 	ic->ic_scan_start = iwm_scan_start;
6067 	ic->ic_scan_end = iwm_scan_end;
6068 	ic->ic_update_mcast = iwm_update_mcast;
6069 	ic->ic_getradiocaps = iwm_init_channel_map;
6070 	ic->ic_set_channel = iwm_set_channel;
6071 	ic->ic_scan_curchan = iwm_scan_curchan;
6072 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6073 	ic->ic_wme.wme_update = iwm_update_edca;
6074 	ic->ic_parent = iwm_parent;
6075 	ic->ic_transmit = iwm_transmit;
6076 	iwm_radiotap_attach(sc);
6077 	if (bootverbose)
6078 		ieee80211_announce(ic);
6079 
6080 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6081 	    "<-%s\n", __func__);
6082 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6083 
6084 	return;
6085 fail:
6086 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6087 	iwm_detach_local(sc, 0);
6088 }
6089 
6090 /*
6091  * Attach the interface to 802.11 radiotap.
6092  */
6093 static void
6094 iwm_radiotap_attach(struct iwm_softc *sc)
6095 {
6096         struct ieee80211com *ic = &sc->sc_ic;
6097 
6098 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6099 	    "->%s begin\n", __func__);
6100         ieee80211_radiotap_attach(ic,
6101             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6102                 IWM_TX_RADIOTAP_PRESENT,
6103             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6104                 IWM_RX_RADIOTAP_PRESENT);
6105 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6106 	    "->%s end\n", __func__);
6107 }
6108 
6109 static struct ieee80211vap *
6110 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6111     enum ieee80211_opmode opmode, int flags,
6112     const uint8_t bssid[IEEE80211_ADDR_LEN],
6113     const uint8_t mac[IEEE80211_ADDR_LEN])
6114 {
6115 	struct iwm_vap *ivp;
6116 	struct ieee80211vap *vap;
6117 
6118 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6119 		return NULL;
6120 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6121 	vap = &ivp->iv_vap;
6122 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6123 	vap->iv_bmissthreshold = 10;            /* override default */
6124 	/* Override with driver methods. */
6125 	ivp->iv_newstate = vap->iv_newstate;
6126 	vap->iv_newstate = iwm_newstate;
6127 
6128 	ieee80211_ratectl_init(vap);
6129 	/* Complete setup. */
6130 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6131 	    mac);
6132 	ic->ic_opmode = opmode;
6133 
6134 	return vap;
6135 }
6136 
6137 static void
6138 iwm_vap_delete(struct ieee80211vap *vap)
6139 {
6140 	struct iwm_vap *ivp = IWM_VAP(vap);
6141 
6142 	ieee80211_ratectl_deinit(vap);
6143 	ieee80211_vap_detach(vap);
6144 	kfree(ivp, M_80211_VAP);
6145 }
6146 
6147 static void
6148 iwm_scan_start(struct ieee80211com *ic)
6149 {
6150 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6151 	struct iwm_softc *sc = ic->ic_softc;
6152 	int error;
6153 
6154 	IWM_LOCK(sc);
6155 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6156 		error = iwm_mvm_umac_scan(sc);
6157 	else
6158 		error = iwm_mvm_lmac_scan(sc);
6159 	if (error != 0) {
6160 		device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6161 		IWM_UNLOCK(sc);
6162 		ieee80211_cancel_scan(vap);
6163 	} else {
6164 		iwm_led_blink_start(sc);
6165 		IWM_UNLOCK(sc);
6166 	}
6167 }
6168 
6169 static void
6170 iwm_scan_end(struct ieee80211com *ic)
6171 {
6172 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6173 	struct iwm_softc *sc = ic->ic_softc;
6174 
6175 	IWM_LOCK(sc);
6176 	iwm_led_blink_stop(sc);
6177 	if (vap->iv_state == IEEE80211_S_RUN)
6178 		iwm_mvm_led_enable(sc);
6179 	IWM_UNLOCK(sc);
6180 }
6181 
6182 static void
6183 iwm_update_mcast(struct ieee80211com *ic)
6184 {
6185 }
6186 
6187 static void
6188 iwm_set_channel(struct ieee80211com *ic)
6189 {
6190 }
6191 
6192 static void
6193 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6194 {
6195 }
6196 
6197 static void
6198 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6199 {
6200 	return;
6201 }
6202 
6203 void
6204 iwm_init_task(void *arg1)
6205 {
6206 	struct iwm_softc *sc = arg1;
6207 
6208 	IWM_LOCK(sc);
6209 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6210 #if defined(__DragonFly__)
6211 		iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6212 #else
6213 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6214 #endif
6215 }
6216 	sc->sc_flags |= IWM_FLAG_BUSY;
6217 	iwm_stop(sc);
6218 	if (sc->sc_ic.ic_nrunning > 0)
6219 		iwm_init(sc);
6220 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6221 	wakeup(&sc->sc_flags);
6222 	IWM_UNLOCK(sc);
6223 }
6224 
6225 static int
6226 iwm_resume(device_t dev)
6227 {
6228 	struct iwm_softc *sc = device_get_softc(dev);
6229 	int do_reinit = 0;
6230 	uint16_t reg;
6231 
6232 	/* Clear device-specific "PCI retry timeout" register (41h). */
6233 	reg = pci_read_config(dev, 0x40, sizeof(reg));
6234 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6235 	iwm_init_task(device_get_softc(dev));
6236 
6237 	IWM_LOCK(sc);
6238 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6239 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6240 		do_reinit = 1;
6241 	}
6242 	IWM_UNLOCK(sc);
6243 
6244 	if (do_reinit)
6245 		ieee80211_resume_all(&sc->sc_ic);
6246 
6247 	return 0;
6248 }
6249 
6250 static int
6251 iwm_suspend(device_t dev)
6252 {
6253 	int do_stop = 0;
6254 	struct iwm_softc *sc = device_get_softc(dev);
6255 
6256 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6257 
6258 	ieee80211_suspend_all(&sc->sc_ic);
6259 
6260 	if (do_stop) {
6261 		IWM_LOCK(sc);
6262 		iwm_stop(sc);
6263 		sc->sc_flags |= IWM_FLAG_SCANNING;
6264 		IWM_UNLOCK(sc);
6265 	}
6266 
6267 	return (0);
6268 }
6269 
6270 static int
6271 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6272 {
6273 	struct iwm_fw_info *fw = &sc->sc_fw;
6274 	device_t dev = sc->sc_dev;
6275 	int i;
6276 
6277 	if (sc->sc_tq) {
6278 #if defined(__DragonFly__)
6279 		/* doesn't exist for DFly, DFly drains tasks on free */
6280 #else
6281 		taskqueue_drain_all(sc->sc_tq);
6282 #endif
6283 		taskqueue_free(sc->sc_tq);
6284 #if defined(__DragonFly__)
6285 		sc->sc_tq = NULL;
6286 #endif
6287 	}
6288 	callout_drain(&sc->sc_led_blink_to);
6289 	callout_drain(&sc->sc_watchdog_to);
6290 	iwm_stop_device(sc);
6291 	if (do_net80211) {
6292 		ieee80211_ifdetach(&sc->sc_ic);
6293 	}
6294 
6295 	iwm_phy_db_free(sc);
6296 
6297 	/* Free descriptor rings */
6298 	iwm_free_rx_ring(sc, &sc->rxq);
6299 	for (i = 0; i < nitems(sc->txq); i++)
6300 		iwm_free_tx_ring(sc, &sc->txq[i]);
6301 
6302 	/* Free firmware */
6303 	if (fw->fw_fp != NULL)
6304 		iwm_fw_info_free(fw);
6305 
6306 	/* Free scheduler */
6307 	iwm_free_sched(sc);
6308 	if (sc->ict_dma.vaddr != NULL)
6309 		iwm_free_ict(sc);
6310 	if (sc->kw_dma.vaddr != NULL)
6311 		iwm_free_kw(sc);
6312 	if (sc->fw_dma.vaddr != NULL)
6313 		iwm_free_fwmem(sc);
6314 
6315 	/* Finished with the hardware - detach things */
6316 	iwm_pci_detach(dev);
6317 
6318 	mbufq_drain(&sc->sc_snd);
6319 	IWM_LOCK_DESTROY(sc);
6320 
6321 	return (0);
6322 }
6323 
6324 static int
6325 iwm_detach(device_t dev)
6326 {
6327 	struct iwm_softc *sc = device_get_softc(dev);
6328 
6329 	return (iwm_detach_local(sc, 1));
6330 }
6331 
6332 static device_method_t iwm_pci_methods[] = {
6333         /* Device interface */
6334         DEVMETHOD(device_probe,         iwm_probe),
6335         DEVMETHOD(device_attach,        iwm_attach),
6336         DEVMETHOD(device_detach,        iwm_detach),
6337         DEVMETHOD(device_suspend,       iwm_suspend),
6338         DEVMETHOD(device_resume,        iwm_resume),
6339 
6340         DEVMETHOD_END
6341 };
6342 
6343 static driver_t iwm_pci_driver = {
6344         "iwm",
6345         iwm_pci_methods,
6346         sizeof (struct iwm_softc)
6347 };
6348 
6349 static devclass_t iwm_devclass;
6350 
6351 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6352 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6353 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6354 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6355