xref: /dflybsd-src/sys/dev/netif/iwm/if_iwm.c (revision 8d378610e3b5687c707bc8aad4e11a3a96bea2fc)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> lksleep
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150 
151 #include <machine/endian.h>
152 
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155 
156 #include <net/bpf.h>
157 
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164 
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169 
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174 
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_pcie_trans.h"
189 #include "if_iwm_led.h"
190 #include "if_iwm_fw.h"
191 
192 const uint8_t iwm_nvm_channels[] = {
193 	/* 2.4 GHz */
194 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
195 	/* 5 GHz */
196 	36, 40, 44, 48, 52, 56, 60, 64,
197 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
198 	149, 153, 157, 161, 165
199 };
200 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
201     "IWM_NUM_CHANNELS is too small");
202 
203 const uint8_t iwm_nvm_channels_8000[] = {
204 	/* 2.4 GHz */
205 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
206 	/* 5 GHz */
207 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
208 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
209 	149, 153, 157, 161, 165, 169, 173, 177, 181
210 };
211 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
212     "IWM_NUM_CHANNELS_8000 is too small");
213 
214 #define IWM_NUM_2GHZ_CHANNELS	14
215 #define IWM_N_HW_ADDR_MASK	0xF
216 
217 /*
218  * XXX For now, there's simply a fixed set of rate table entries
219  * that are populated.
220  */
221 const struct iwm_rate {
222 	uint8_t rate;
223 	uint8_t plcp;
224 } iwm_rates[] = {
225 	{   2,	IWM_RATE_1M_PLCP  },
226 	{   4,	IWM_RATE_2M_PLCP  },
227 	{  11,	IWM_RATE_5M_PLCP  },
228 	{  22,	IWM_RATE_11M_PLCP },
229 	{  12,	IWM_RATE_6M_PLCP  },
230 	{  18,	IWM_RATE_9M_PLCP  },
231 	{  24,	IWM_RATE_12M_PLCP },
232 	{  36,	IWM_RATE_18M_PLCP },
233 	{  48,	IWM_RATE_24M_PLCP },
234 	{  72,	IWM_RATE_36M_PLCP },
235 	{  96,	IWM_RATE_48M_PLCP },
236 	{ 108,	IWM_RATE_54M_PLCP },
237 };
238 #define IWM_RIDX_CCK	0
239 #define IWM_RIDX_OFDM	4
240 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
241 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
242 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
243 
244 struct iwm_nvm_section {
245 	uint16_t length;
246 	uint8_t *data;
247 };
248 
249 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
250 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
251 
252 struct iwm_mvm_alive_data {
253 	int valid;
254 	uint32_t scd_base_addr;
255 };
256 
257 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
258 static int	iwm_firmware_store_section(struct iwm_softc *,
259                                            enum iwm_ucode_type,
260                                            const uint8_t *, size_t);
261 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
262 static void	iwm_fw_info_free(struct iwm_fw_info *);
263 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
264 #if !defined(__DragonFly__)
265 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
266 #endif
267 static int	iwm_alloc_fwmem(struct iwm_softc *);
268 static int	iwm_alloc_sched(struct iwm_softc *);
269 static int	iwm_alloc_kw(struct iwm_softc *);
270 static int	iwm_alloc_ict(struct iwm_softc *);
271 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275                                   int);
276 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void	iwm_enable_interrupts(struct iwm_softc *);
279 static void	iwm_restore_interrupts(struct iwm_softc *);
280 static void	iwm_disable_interrupts(struct iwm_softc *);
281 static void	iwm_ict_reset(struct iwm_softc *);
282 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void	iwm_stop_device(struct iwm_softc *);
284 static void	iwm_mvm_nic_config(struct iwm_softc *);
285 static int	iwm_nic_rx_init(struct iwm_softc *);
286 static int	iwm_nic_tx_init(struct iwm_softc *);
287 static int	iwm_nic_init(struct iwm_softc *);
288 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
289 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
290 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
291                                    uint16_t, uint8_t *, uint16_t *);
292 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
293 				     uint16_t *, uint32_t);
294 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
295 static void	iwm_add_channel_band(struct iwm_softc *,
296 		    struct ieee80211_channel[], int, int *, int, size_t,
297 		    const uint8_t[]);
298 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
299 		    struct ieee80211_channel[]);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302 			   const uint16_t *, const uint16_t *,
303 			   const uint16_t *, const uint16_t *,
304 			   const uint16_t *);
305 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
306 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
307 					       struct iwm_nvm_data *,
308 					       const uint16_t *,
309 					       const uint16_t *);
310 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
311 			    const uint16_t *);
312 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
313 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
314 				  const uint16_t *);
315 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
316 				   const uint16_t *);
317 static void	iwm_set_radio_cfg(const struct iwm_softc *,
318 				  struct iwm_nvm_data *, uint32_t);
319 static struct iwm_nvm_data *
320 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
321 static int	iwm_nvm_init(struct iwm_softc *);
322 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
323 				      const struct iwm_fw_desc *);
324 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
325 					     bus_addr_t, uint32_t);
326 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
327 						const struct iwm_fw_sects *,
328 						int, int *);
329 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
330 					   const struct iwm_fw_sects *,
331 					   int, int *);
332 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
333 					       const struct iwm_fw_sects *);
334 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
335 					  const struct iwm_fw_sects *);
336 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
337 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
338 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
339 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
340                                               enum iwm_ucode_type);
341 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
342 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
344 					    struct iwm_rx_phy_info *);
345 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
346                                       struct iwm_rx_packet *);
347 static int	iwm_get_noise(struct iwm_softc *sc,
348 		    const struct iwm_mvm_statistics_rx_non_phy *);
349 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *);
350 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
351                                          struct iwm_rx_packet *,
352 				         struct iwm_node *);
353 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
354 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
355 #if 0
356 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
357                                  uint16_t);
358 #endif
359 static const struct iwm_rate *
360 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
361 			struct ieee80211_frame *, struct iwm_tx_cmd *);
362 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
363                        struct ieee80211_node *, int);
364 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
365 			     const struct ieee80211_bpf_params *);
366 static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
367 				      uint32_t tfd_msk, uint32_t flags);
368 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
369 					        struct iwm_mvm_add_sta_cmd *,
370                                                 int *);
371 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
372                                        int);
373 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
374 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
375 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
376                                            struct iwm_int_sta *,
377 				           const uint8_t *, uint16_t, uint16_t);
378 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
379 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
380 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
381 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
382 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
383 static struct ieee80211_node *
384 		iwm_node_alloc(struct ieee80211vap *,
385 		               const uint8_t[IEEE80211_ADDR_LEN]);
386 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
387 static int	iwm_media_change(struct ifnet *);
388 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
389 static void	iwm_endscan_cb(void *, int);
390 static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
391 					struct iwm_sf_cfg_cmd *,
392 					struct ieee80211_node *);
393 static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
394 static int	iwm_send_bt_init_conf(struct iwm_softc *);
395 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
396 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
397 static int	iwm_init_hw(struct iwm_softc *);
398 static void	iwm_init(struct iwm_softc *);
399 static void	iwm_start(struct iwm_softc *);
400 static void	iwm_stop(struct iwm_softc *);
401 static void	iwm_watchdog(void *);
402 static void	iwm_parent(struct ieee80211com *);
403 #ifdef IWM_DEBUG
404 static const char *
405 		iwm_desc_lookup(uint32_t);
406 static void	iwm_nic_error(struct iwm_softc *);
407 static void	iwm_nic_umac_error(struct iwm_softc *);
408 #endif
409 static void	iwm_notif_intr(struct iwm_softc *);
410 static void	iwm_intr(void *);
411 static int	iwm_attach(device_t);
412 static int	iwm_is_valid_ether_addr(uint8_t *);
413 static void	iwm_preinit(void *);
414 static int	iwm_detach_local(struct iwm_softc *sc, int);
415 static void	iwm_init_task(void *);
416 static void	iwm_radiotap_attach(struct iwm_softc *);
417 static struct ieee80211vap *
418 		iwm_vap_create(struct ieee80211com *,
419 		               const char [IFNAMSIZ], int,
420 		               enum ieee80211_opmode, int,
421 		               const uint8_t [IEEE80211_ADDR_LEN],
422 		               const uint8_t [IEEE80211_ADDR_LEN]);
423 static void	iwm_vap_delete(struct ieee80211vap *);
424 static void	iwm_scan_start(struct ieee80211com *);
425 static void	iwm_scan_end(struct ieee80211com *);
426 static void	iwm_update_mcast(struct ieee80211com *);
427 static void	iwm_set_channel(struct ieee80211com *);
428 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
429 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
430 static int	iwm_detach(device_t);
431 
432 #if defined(__DragonFly__)
433 static int	iwm_msi_enable = 1;
434 
435 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
436 
437 #endif
438 
439 /*
440  * Firmware parser.
441  */
442 
443 static int
444 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
445 {
446 	const struct iwm_fw_cscheme_list *l = (const void *)data;
447 
448 	if (dlen < sizeof(*l) ||
449 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
450 		return EINVAL;
451 
452 	/* we don't actually store anything for now, always use s/w crypto */
453 
454 	return 0;
455 }
456 
457 static int
458 iwm_firmware_store_section(struct iwm_softc *sc,
459     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
460 {
461 	struct iwm_fw_sects *fws;
462 	struct iwm_fw_desc *fwone;
463 
464 	if (type >= IWM_UCODE_TYPE_MAX)
465 		return EINVAL;
466 	if (dlen < sizeof(uint32_t))
467 		return EINVAL;
468 
469 	fws = &sc->sc_fw.fw_sects[type];
470 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
471 		return EINVAL;
472 
473 	fwone = &fws->fw_sect[fws->fw_count];
474 
475 	/* first 32bit are device load offset */
476 	memcpy(&fwone->offset, data, sizeof(uint32_t));
477 
478 	/* rest is data */
479 	fwone->data = data + sizeof(uint32_t);
480 	fwone->len = dlen - sizeof(uint32_t);
481 
482 	fws->fw_count++;
483 
484 	return 0;
485 }
486 
487 #define IWM_DEFAULT_SCAN_CHANNELS 40
488 
489 struct iwm_tlv_calib_data {
490 	uint32_t ucode_type;
491 	struct iwm_tlv_calib_ctrl calib;
492 } __packed;
493 
494 static int
495 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
496 {
497 	const struct iwm_tlv_calib_data *def_calib = data;
498 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
499 
500 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
501 		device_printf(sc->sc_dev,
502 		    "Wrong ucode_type %u for default "
503 		    "calibration.\n", ucode_type);
504 		return EINVAL;
505 	}
506 
507 	sc->sc_default_calib[ucode_type].flow_trigger =
508 	    def_calib->calib.flow_trigger;
509 	sc->sc_default_calib[ucode_type].event_trigger =
510 	    def_calib->calib.event_trigger;
511 
512 	return 0;
513 }
514 
515 static int
516 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
517 			struct iwm_ucode_capabilities *capa)
518 {
519 	const struct iwm_ucode_api *ucode_api = (const void *)data;
520 	uint32_t api_index = le32toh(ucode_api->api_index);
521 	uint32_t api_flags = le32toh(ucode_api->api_flags);
522 	int i;
523 
524 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
525 		device_printf(sc->sc_dev,
526 		    "api flags index %d larger than supported by driver\n",
527 		    api_index);
528 		/* don't return an error so we can load FW that has more bits */
529 		return 0;
530 	}
531 
532 	for (i = 0; i < 32; i++) {
533 		if (api_flags & (1U << i))
534 			setbit(capa->enabled_api, i + 32 * api_index);
535 	}
536 
537 	return 0;
538 }
539 
540 static int
541 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
542 			   struct iwm_ucode_capabilities *capa)
543 {
544 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
545 	uint32_t api_index = le32toh(ucode_capa->api_index);
546 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
547 	int i;
548 
549 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
550 		device_printf(sc->sc_dev,
551 		    "capa flags index %d larger than supported by driver\n",
552 		    api_index);
553 		/* don't return an error so we can load FW that has more bits */
554 		return 0;
555 	}
556 
557 	for (i = 0; i < 32; i++) {
558 		if (api_flags & (1U << i))
559 			setbit(capa->enabled_capa, i + 32 * api_index);
560 	}
561 
562 	return 0;
563 }
564 
565 static void
566 iwm_fw_info_free(struct iwm_fw_info *fw)
567 {
568 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
569 	fw->fw_fp = NULL;
570 	/* don't touch fw->fw_status */
571 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
572 }
573 
574 static int
575 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
576 {
577 	struct iwm_fw_info *fw = &sc->sc_fw;
578 	const struct iwm_tlv_ucode_header *uhdr;
579 	const struct iwm_ucode_tlv *tlv;
580 	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
581 	enum iwm_ucode_tlv_type tlv_type;
582 	const struct firmware *fwp;
583 	const uint8_t *data;
584 	uint32_t tlv_len;
585 	uint32_t usniffer_img;
586 	const uint8_t *tlv_data;
587 	uint32_t paging_mem_size;
588 	int num_of_cpus;
589 	int error = 0;
590 	size_t len;
591 
592 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
593 	    ucode_type != IWM_UCODE_INIT)
594 		return 0;
595 
596 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
597 #if defined(__DragonFly__)
598 		lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
599 #else
600 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
601 #endif
602 	}
603 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
604 
605 	if (fw->fw_fp != NULL)
606 		iwm_fw_info_free(fw);
607 
608 	/*
609 	 * Load firmware into driver memory.
610 	 * fw_fp will be set.
611 	 */
612 	IWM_UNLOCK(sc);
613 	fwp = firmware_get(sc->cfg->fw_name);
614 	IWM_LOCK(sc);
615 	if (fwp == NULL) {
616 		device_printf(sc->sc_dev,
617 		    "could not read firmware %s (error %d)\n",
618 		    sc->cfg->fw_name, error);
619 		goto out;
620 	}
621 	fw->fw_fp = fwp;
622 
623 	/* (Re-)Initialize default values. */
624 	capa->flags = 0;
625 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
626 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
627 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
628 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
629 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
630 
631 	/*
632 	 * Parse firmware contents
633 	 */
634 
635 	uhdr = (const void *)fw->fw_fp->data;
636 	if (*(const uint32_t *)fw->fw_fp->data != 0
637 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
638 		device_printf(sc->sc_dev, "invalid firmware %s\n",
639 		    sc->cfg->fw_name);
640 		error = EINVAL;
641 		goto out;
642 	}
643 
644 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
645 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
646 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
647 	    IWM_UCODE_API(le32toh(uhdr->ver)));
648 	data = uhdr->data;
649 	len = fw->fw_fp->datasize - sizeof(*uhdr);
650 
651 	while (len >= sizeof(*tlv)) {
652 		len -= sizeof(*tlv);
653 		tlv = (const void *)data;
654 
655 		tlv_len = le32toh(tlv->length);
656 		tlv_type = le32toh(tlv->type);
657 		tlv_data = tlv->data;
658 
659 		if (len < tlv_len) {
660 			device_printf(sc->sc_dev,
661 			    "firmware too short: %zu bytes\n",
662 			    len);
663 			error = EINVAL;
664 			goto parse_out;
665 		}
666 		len -= roundup2(tlv_len, 4);
667 		data += sizeof(tlv) + roundup2(tlv_len, 4);
668 
669 		switch ((int)tlv_type) {
670 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
671 			if (tlv_len != sizeof(uint32_t)) {
672 				device_printf(sc->sc_dev,
673 				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
674 				    __func__,
675 				    (int) tlv_len);
676 				error = EINVAL;
677 				goto parse_out;
678 			}
679 			capa->max_probe_length =
680 			    le32_to_cpup((const uint32_t *)tlv_data);
681 			/* limit it to something sensible */
682 			if (capa->max_probe_length >
683 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
684 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
685 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
686 				    "ridiculous\n", __func__);
687 				error = EINVAL;
688 				goto parse_out;
689 			}
690 			break;
691 		case IWM_UCODE_TLV_PAN:
692 			if (tlv_len) {
693 				device_printf(sc->sc_dev,
694 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
695 				    __func__,
696 				    (int) tlv_len);
697 				error = EINVAL;
698 				goto parse_out;
699 			}
700 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
701 			break;
702 		case IWM_UCODE_TLV_FLAGS:
703 			if (tlv_len < sizeof(uint32_t)) {
704 				device_printf(sc->sc_dev,
705 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
706 				    __func__,
707 				    (int) tlv_len);
708 				error = EINVAL;
709 				goto parse_out;
710 			}
711 			if (tlv_len % sizeof(uint32_t)) {
712 				device_printf(sc->sc_dev,
713 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
714 				    __func__,
715 				    (int) tlv_len);
716 				error = EINVAL;
717 				goto parse_out;
718 			}
719 			/*
720 			 * Apparently there can be many flags, but Linux driver
721 			 * parses only the first one, and so do we.
722 			 *
723 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
724 			 * Intentional or a bug?  Observations from
725 			 * current firmware file:
726 			 *  1) TLV_PAN is parsed first
727 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
728 			 * ==> this resets TLV_PAN to itself... hnnnk
729 			 */
730 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
731 			break;
732 		case IWM_UCODE_TLV_CSCHEME:
733 			if ((error = iwm_store_cscheme(sc,
734 			    tlv_data, tlv_len)) != 0) {
735 				device_printf(sc->sc_dev,
736 				    "%s: iwm_store_cscheme(): returned %d\n",
737 				    __func__,
738 				    error);
739 				goto parse_out;
740 			}
741 			break;
742 		case IWM_UCODE_TLV_NUM_OF_CPU:
743 			if (tlv_len != sizeof(uint32_t)) {
744 				device_printf(sc->sc_dev,
745 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
746 				    __func__,
747 				    (int) tlv_len);
748 				error = EINVAL;
749 				goto parse_out;
750 			}
751 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
752 			if (num_of_cpus == 2) {
753 				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
754 					TRUE;
755 				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
756 					TRUE;
757 				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
758 					TRUE;
759 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
760 				device_printf(sc->sc_dev,
761 				    "%s: Driver supports only 1 or 2 CPUs\n",
762 				    __func__);
763 				error = EINVAL;
764 				goto parse_out;
765 			}
766 			break;
767 		case IWM_UCODE_TLV_SEC_RT:
768 			if ((error = iwm_firmware_store_section(sc,
769 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
770 				device_printf(sc->sc_dev,
771 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
772 				    __func__,
773 				    error);
774 				goto parse_out;
775 			}
776 			break;
777 		case IWM_UCODE_TLV_SEC_INIT:
778 			if ((error = iwm_firmware_store_section(sc,
779 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
780 				device_printf(sc->sc_dev,
781 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
782 				    __func__,
783 				    error);
784 				goto parse_out;
785 			}
786 			break;
787 		case IWM_UCODE_TLV_SEC_WOWLAN:
788 			if ((error = iwm_firmware_store_section(sc,
789 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
790 				device_printf(sc->sc_dev,
791 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
792 				    __func__,
793 				    error);
794 				goto parse_out;
795 			}
796 			break;
797 		case IWM_UCODE_TLV_DEF_CALIB:
798 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
799 				device_printf(sc->sc_dev,
800 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
801 				    __func__,
802 				    (int) tlv_len,
803 				    (int) sizeof(struct iwm_tlv_calib_data));
804 				error = EINVAL;
805 				goto parse_out;
806 			}
807 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
808 				device_printf(sc->sc_dev,
809 				    "%s: iwm_set_default_calib() failed: %d\n",
810 				    __func__,
811 				    error);
812 				goto parse_out;
813 			}
814 			break;
815 		case IWM_UCODE_TLV_PHY_SKU:
816 			if (tlv_len != sizeof(uint32_t)) {
817 				error = EINVAL;
818 				device_printf(sc->sc_dev,
819 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
820 				    __func__,
821 				    (int) tlv_len);
822 				goto parse_out;
823 			}
824 			sc->sc_fw.phy_config =
825 			    le32_to_cpup((const uint32_t *)tlv_data);
826 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
827 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
828 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
829 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
830 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
831 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
832 			break;
833 
834 		case IWM_UCODE_TLV_API_CHANGES_SET: {
835 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
836 				error = EINVAL;
837 				goto parse_out;
838 			}
839 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
840 				error = EINVAL;
841 				goto parse_out;
842 			}
843 			break;
844 		}
845 
846 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
847 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
848 				error = EINVAL;
849 				goto parse_out;
850 			}
851 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
852 				error = EINVAL;
853 				goto parse_out;
854 			}
855 			break;
856 		}
857 
858 		case 48: /* undocumented TLV */
859 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
860 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
861 			/* ignore, not used by current driver */
862 			break;
863 
864 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
865 			if ((error = iwm_firmware_store_section(sc,
866 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
867 			    tlv_len)) != 0)
868 				goto parse_out;
869 			break;
870 
871 		case IWM_UCODE_TLV_PAGING:
872 			if (tlv_len != sizeof(uint32_t)) {
873 				error = EINVAL;
874 				goto parse_out;
875 			}
876 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
877 
878 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
879 			    "%s: Paging: paging enabled (size = %u bytes)\n",
880 			    __func__, paging_mem_size);
881 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
882 				device_printf(sc->sc_dev,
883 					"%s: Paging: driver supports up to %u bytes for paging image\n",
884 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
885 				error = EINVAL;
886 				goto out;
887 			}
888 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
889 				device_printf(sc->sc_dev,
890 				    "%s: Paging: image isn't multiple %u\n",
891 				    __func__, IWM_FW_PAGING_SIZE);
892 				error = EINVAL;
893 				goto out;
894 			}
895 
896 			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
897 			    paging_mem_size;
898 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
899 			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
900 			    paging_mem_size;
901 			break;
902 
903 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
904 			if (tlv_len != sizeof(uint32_t)) {
905 				error = EINVAL;
906 				goto parse_out;
907 			}
908 			capa->n_scan_channels =
909 			    le32_to_cpup((const uint32_t *)tlv_data);
910 			break;
911 
912 		case IWM_UCODE_TLV_FW_VERSION:
913 			if (tlv_len != sizeof(uint32_t) * 3) {
914 				error = EINVAL;
915 				goto parse_out;
916 			}
917 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
918 			    "%d.%d.%d",
919 			    le32toh(((const uint32_t *)tlv_data)[0]),
920 			    le32toh(((const uint32_t *)tlv_data)[1]),
921 			    le32toh(((const uint32_t *)tlv_data)[2]));
922 			break;
923 
924 		case IWM_UCODE_TLV_FW_MEM_SEG:
925 			break;
926 
927 		default:
928 			device_printf(sc->sc_dev,
929 			    "%s: unknown firmware section %d, abort\n",
930 			    __func__, tlv_type);
931 			error = EINVAL;
932 			goto parse_out;
933 		}
934 	}
935 
936 	KASSERT(error == 0, ("unhandled error"));
937 
938  parse_out:
939 	if (error) {
940 		device_printf(sc->sc_dev, "firmware parse error %d, "
941 		    "section type %d\n", error, tlv_type);
942 	}
943 
944  out:
945 	if (error) {
946 		fw->fw_status = IWM_FW_STATUS_NONE;
947 		if (fw->fw_fp != NULL)
948 			iwm_fw_info_free(fw);
949 	} else
950 		fw->fw_status = IWM_FW_STATUS_DONE;
951 	wakeup(&sc->sc_fw);
952 
953 	return error;
954 }
955 
956 /*
957  * DMA resource routines
958  */
959 
960 /* fwmem is used to load firmware onto the card */
961 static int
962 iwm_alloc_fwmem(struct iwm_softc *sc)
963 {
964 	/* Must be aligned on a 16-byte boundary. */
965 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
966 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
967 }
968 
969 /* tx scheduler rings.  not used? */
970 static int
971 iwm_alloc_sched(struct iwm_softc *sc)
972 {
973 	/* TX scheduler rings must be aligned on a 1KB boundary. */
974 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
975 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
976 }
977 
978 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
979 static int
980 iwm_alloc_kw(struct iwm_softc *sc)
981 {
982 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
983 }
984 
985 /* interrupt cause table */
986 static int
987 iwm_alloc_ict(struct iwm_softc *sc)
988 {
989 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
990 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
991 }
992 
993 static int
994 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
995 {
996 	bus_size_t size;
997 	int i, error;
998 
999 	ring->cur = 0;
1000 
1001 	/* Allocate RX descriptors (256-byte aligned). */
1002 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1003 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1004 	if (error != 0) {
1005 		device_printf(sc->sc_dev,
1006 		    "could not allocate RX ring DMA memory\n");
1007 		goto fail;
1008 	}
1009 	ring->desc = ring->desc_dma.vaddr;
1010 
1011 	/* Allocate RX status area (16-byte aligned). */
1012 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1013 	    sizeof(*ring->stat), 16);
1014 	if (error != 0) {
1015 		device_printf(sc->sc_dev,
1016 		    "could not allocate RX status DMA memory\n");
1017 		goto fail;
1018 	}
1019 	ring->stat = ring->stat_dma.vaddr;
1020 
1021         /* Create RX buffer DMA tag. */
1022 #if defined(__DragonFly__)
1023         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1024 				   0,
1025 				   BUS_SPACE_MAXADDR_32BIT,
1026 				   BUS_SPACE_MAXADDR,
1027 				   NULL, NULL,
1028 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1029 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1030 #else
1031         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1032             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1033             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1034 #endif
1035         if (error != 0) {
1036                 device_printf(sc->sc_dev,
1037                     "%s: could not create RX buf DMA tag, error %d\n",
1038                     __func__, error);
1039                 goto fail;
1040         }
1041 
1042 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1043 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1044 	if (error != 0) {
1045 		device_printf(sc->sc_dev,
1046 		    "%s: could not create RX buf DMA map, error %d\n",
1047 		    __func__, error);
1048 		goto fail;
1049 	}
1050 	/*
1051 	 * Allocate and map RX buffers.
1052 	 */
1053 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1054 		struct iwm_rx_data *data = &ring->data[i];
1055 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1056 		if (error != 0) {
1057 			device_printf(sc->sc_dev,
1058 			    "%s: could not create RX buf DMA map, error %d\n",
1059 			    __func__, error);
1060 			goto fail;
1061 		}
1062 		data->m = NULL;
1063 
1064 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1065 			goto fail;
1066 		}
1067 	}
1068 	return 0;
1069 
1070 fail:	iwm_free_rx_ring(sc, ring);
1071 	return error;
1072 }
1073 
1074 static void
1075 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1076 {
1077 	/* Reset the ring state */
1078 	ring->cur = 0;
1079 
1080 	/*
1081 	 * The hw rx ring index in shared memory must also be cleared,
1082 	 * otherwise the discrepancy can cause reprocessing chaos.
1083 	 */
1084 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1085 }
1086 
1087 static void
1088 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1089 {
1090 	int i;
1091 
1092 	iwm_dma_contig_free(&ring->desc_dma);
1093 	iwm_dma_contig_free(&ring->stat_dma);
1094 
1095 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1096 		struct iwm_rx_data *data = &ring->data[i];
1097 
1098 		if (data->m != NULL) {
1099 			bus_dmamap_sync(ring->data_dmat, data->map,
1100 			    BUS_DMASYNC_POSTREAD);
1101 			bus_dmamap_unload(ring->data_dmat, data->map);
1102 			m_freem(data->m);
1103 			data->m = NULL;
1104 		}
1105 		if (data->map != NULL) {
1106 			bus_dmamap_destroy(ring->data_dmat, data->map);
1107 			data->map = NULL;
1108 		}
1109 	}
1110 	if (ring->spare_map != NULL) {
1111 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1112 		ring->spare_map = NULL;
1113 	}
1114 	if (ring->data_dmat != NULL) {
1115 		bus_dma_tag_destroy(ring->data_dmat);
1116 		ring->data_dmat = NULL;
1117 	}
1118 }
1119 
1120 static int
1121 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1122 {
1123 	bus_addr_t paddr;
1124 	bus_size_t size;
1125 	size_t maxsize;
1126 	int nsegments;
1127 	int i, error;
1128 
1129 	ring->qid = qid;
1130 	ring->queued = 0;
1131 	ring->cur = 0;
1132 
1133 	/* Allocate TX descriptors (256-byte aligned). */
1134 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1135 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1136 	if (error != 0) {
1137 		device_printf(sc->sc_dev,
1138 		    "could not allocate TX ring DMA memory\n");
1139 		goto fail;
1140 	}
1141 	ring->desc = ring->desc_dma.vaddr;
1142 
1143 	/*
1144 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1145 	 * to allocate commands space for other rings.
1146 	 */
1147 	if (qid > IWM_MVM_CMD_QUEUE)
1148 		return 0;
1149 
1150 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1151 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1152 	if (error != 0) {
1153 		device_printf(sc->sc_dev,
1154 		    "could not allocate TX cmd DMA memory\n");
1155 		goto fail;
1156 	}
1157 	ring->cmd = ring->cmd_dma.vaddr;
1158 
1159 	/* FW commands may require more mapped space than packets. */
1160 	if (qid == IWM_MVM_CMD_QUEUE) {
1161 		maxsize = IWM_RBUF_SIZE;
1162 		nsegments = 1;
1163 	} else {
1164 		maxsize = MCLBYTES;
1165 		nsegments = IWM_MAX_SCATTER - 2;
1166 	}
1167 
1168 #if defined(__DragonFly__)
1169 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1170 				   0,
1171 				   BUS_SPACE_MAXADDR_32BIT,
1172 				   BUS_SPACE_MAXADDR,
1173 				   NULL, NULL,
1174 				   maxsize, nsegments, maxsize,
1175 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1176 #else
1177 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1178 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1179             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1180 #endif
1181 	if (error != 0) {
1182 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1183 		goto fail;
1184 	}
1185 
1186 	paddr = ring->cmd_dma.paddr;
1187 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1188 		struct iwm_tx_data *data = &ring->data[i];
1189 
1190 		data->cmd_paddr = paddr;
1191 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1192 		    + offsetof(struct iwm_tx_cmd, scratch);
1193 		paddr += sizeof(struct iwm_device_cmd);
1194 
1195 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1196 		if (error != 0) {
1197 			device_printf(sc->sc_dev,
1198 			    "could not create TX buf DMA map\n");
1199 			goto fail;
1200 		}
1201 	}
1202 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1203 	    ("invalid physical address"));
1204 	return 0;
1205 
1206 fail:	iwm_free_tx_ring(sc, ring);
1207 	return error;
1208 }
1209 
1210 static void
1211 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1212 {
1213 	int i;
1214 
1215 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1216 		struct iwm_tx_data *data = &ring->data[i];
1217 
1218 		if (data->m != NULL) {
1219 			bus_dmamap_sync(ring->data_dmat, data->map,
1220 			    BUS_DMASYNC_POSTWRITE);
1221 			bus_dmamap_unload(ring->data_dmat, data->map);
1222 			m_freem(data->m);
1223 			data->m = NULL;
1224 		}
1225 	}
1226 	/* Clear TX descriptors. */
1227 	memset(ring->desc, 0, ring->desc_dma.size);
1228 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1229 	    BUS_DMASYNC_PREWRITE);
1230 	sc->qfullmsk &= ~(1 << ring->qid);
1231 	ring->queued = 0;
1232 	ring->cur = 0;
1233 
1234 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1235 		iwm_pcie_clear_cmd_in_flight(sc);
1236 }
1237 
1238 static void
1239 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1240 {
1241 	int i;
1242 
1243 	iwm_dma_contig_free(&ring->desc_dma);
1244 	iwm_dma_contig_free(&ring->cmd_dma);
1245 
1246 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1247 		struct iwm_tx_data *data = &ring->data[i];
1248 
1249 		if (data->m != NULL) {
1250 			bus_dmamap_sync(ring->data_dmat, data->map,
1251 			    BUS_DMASYNC_POSTWRITE);
1252 			bus_dmamap_unload(ring->data_dmat, data->map);
1253 			m_freem(data->m);
1254 			data->m = NULL;
1255 		}
1256 		if (data->map != NULL) {
1257 			bus_dmamap_destroy(ring->data_dmat, data->map);
1258 			data->map = NULL;
1259 		}
1260 	}
1261 	if (ring->data_dmat != NULL) {
1262 		bus_dma_tag_destroy(ring->data_dmat);
1263 		ring->data_dmat = NULL;
1264 	}
1265 }
1266 
1267 /*
1268  * High-level hardware frobbing routines
1269  */
1270 
1271 static void
1272 iwm_enable_interrupts(struct iwm_softc *sc)
1273 {
1274 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1275 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1276 }
1277 
1278 static void
1279 iwm_restore_interrupts(struct iwm_softc *sc)
1280 {
1281 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1282 }
1283 
1284 static void
1285 iwm_disable_interrupts(struct iwm_softc *sc)
1286 {
1287 	/* disable interrupts */
1288 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1289 
1290 	/* acknowledge all interrupts */
1291 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1292 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1293 }
1294 
1295 static void
1296 iwm_ict_reset(struct iwm_softc *sc)
1297 {
1298 	iwm_disable_interrupts(sc);
1299 
1300 	/* Reset ICT table. */
1301 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1302 	sc->ict_cur = 0;
1303 
1304 	/* Set physical address of ICT table (4KB aligned). */
1305 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1306 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1307 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1308 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1309 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1310 
1311 	/* Switch to ICT interrupt mode in driver. */
1312 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1313 
1314 	/* Re-enable interrupts. */
1315 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1316 	iwm_enable_interrupts(sc);
1317 }
1318 
1319 /*
1320  * Since this .. hard-resets things, it's time to actually
1321  * mark the first vap (if any) as having no mac context.
1322  * It's annoying, but since the driver is potentially being
1323  * stop/start'ed whilst active (thanks openbsd port!) we
1324  * have to correctly track this.
1325  */
1326 static void
1327 iwm_stop_device(struct iwm_softc *sc)
1328 {
1329 	struct ieee80211com *ic = &sc->sc_ic;
1330 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1331 	int chnl, qid;
1332 	uint32_t mask = 0;
1333 
1334 	/* tell the device to stop sending interrupts */
1335 	iwm_disable_interrupts(sc);
1336 
1337 	/*
1338 	 * FreeBSD-local: mark the first vap as not-uploaded,
1339 	 * so the next transition through auth/assoc
1340 	 * will correctly populate the MAC context.
1341 	 */
1342 	if (vap) {
1343 		struct iwm_vap *iv = IWM_VAP(vap);
1344 		iv->phy_ctxt = NULL;
1345 		iv->is_uploaded = 0;
1346 	}
1347 
1348 	/* device going down, Stop using ICT table */
1349 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1350 
1351 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1352 
1353 	if (iwm_nic_lock(sc)) {
1354 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1355 
1356 		/* Stop each Tx DMA channel */
1357 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1358 			IWM_WRITE(sc,
1359 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1360 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1361 		}
1362 
1363 		/* Wait for DMA channels to be idle */
1364 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1365 		    5000)) {
1366 			device_printf(sc->sc_dev,
1367 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1368 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1369 		}
1370 		iwm_nic_unlock(sc);
1371 	}
1372 	iwm_pcie_rx_stop(sc);
1373 
1374 	/* Stop RX ring. */
1375 	iwm_reset_rx_ring(sc, &sc->rxq);
1376 
1377 	/* Reset all TX rings. */
1378 	for (qid = 0; qid < nitems(sc->txq); qid++)
1379 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1380 
1381 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1382 		/* Power-down device's busmaster DMA clocks */
1383 		if (iwm_nic_lock(sc)) {
1384 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1385 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1386 			iwm_nic_unlock(sc);
1387 		}
1388 		DELAY(5);
1389 	}
1390 
1391 	/* Make sure (redundant) we've released our request to stay awake */
1392 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1393 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1394 
1395 	/* Stop the device, and put it in low power state */
1396 	iwm_apm_stop(sc);
1397 
1398 	/* stop and reset the on-board processor */
1399 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1400 	DELAY(1000);
1401 
1402 	/*
1403 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1404 	 * This is a bug in certain verions of the hardware.
1405 	 * Certain devices also keep sending HW RF kill interrupt all
1406 	 * the time, unless the interrupt is ACKed even if the interrupt
1407 	 * should be masked. Re-ACK all the interrupts here.
1408 	 */
1409 	iwm_disable_interrupts(sc);
1410 
1411 	/*
1412 	 * Even if we stop the HW, we still want the RF kill
1413 	 * interrupt
1414 	 */
1415 	iwm_enable_rfkill_int(sc);
1416 	iwm_check_rfkill(sc);
1417 }
1418 
1419 static void
1420 iwm_mvm_nic_config(struct iwm_softc *sc)
1421 {
1422 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1423 	uint32_t reg_val = 0;
1424 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1425 
1426 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1427 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1428 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1429 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1430 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1431 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1432 
1433 	/* SKU control */
1434 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1435 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1436 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1437 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1438 
1439 	/* radio configuration */
1440 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1441 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1442 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1443 
1444 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1445 
1446 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1447 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1448 	    radio_cfg_step, radio_cfg_dash);
1449 
1450 	/*
1451 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1452 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1453 	 * to lose ownership and not being able to obtain it back.
1454 	 */
1455 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1456 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1457 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1458 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1459 	}
1460 }
1461 
1462 static int
1463 iwm_nic_rx_init(struct iwm_softc *sc)
1464 {
1465 	/*
1466 	 * Initialize RX ring.  This is from the iwn driver.
1467 	 */
1468 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1469 
1470 	/* Stop Rx DMA */
1471 	iwm_pcie_rx_stop(sc);
1472 
1473 	if (!iwm_nic_lock(sc))
1474 		return EBUSY;
1475 
1476 	/* reset and flush pointers */
1477 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1478 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1479 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1480 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1481 
1482 	/* Set physical address of RX ring (256-byte aligned). */
1483 	IWM_WRITE(sc,
1484 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1485 
1486 	/* Set physical address of RX status (16-byte aligned). */
1487 	IWM_WRITE(sc,
1488 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1489 
1490 #if defined(__DragonFly__)
1491 	/* Force serialization (probably not needed but don't trust the HW) */
1492 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1493 #endif
1494 
1495 	/* Enable RX. */
1496 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1497 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1498 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1499 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1500 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1501 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1502 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1503 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1504 
1505 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1506 
1507 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1508 	if (sc->cfg->host_interrupt_operation_mode)
1509 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1510 
1511 	/*
1512 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1513 	 *
1514 	 * This value should initially be 0 (before preparing any
1515 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1516 	 */
1517 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1518 
1519 	iwm_nic_unlock(sc);
1520 
1521 	return 0;
1522 }
1523 
1524 static int
1525 iwm_nic_tx_init(struct iwm_softc *sc)
1526 {
1527 	int qid;
1528 
1529 	if (!iwm_nic_lock(sc))
1530 		return EBUSY;
1531 
1532 	/* Deactivate TX scheduler. */
1533 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1534 
1535 	/* Set physical address of "keep warm" page (16-byte aligned). */
1536 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1537 
1538 	/* Initialize TX rings. */
1539 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1540 		struct iwm_tx_ring *txq = &sc->txq[qid];
1541 
1542 		/* Set physical address of TX ring (256-byte aligned). */
1543 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1544 		    txq->desc_dma.paddr >> 8);
1545 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1546 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1547 		    __func__,
1548 		    qid, txq->desc,
1549 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1550 	}
1551 
1552 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1553 
1554 	iwm_nic_unlock(sc);
1555 
1556 	return 0;
1557 }
1558 
1559 static int
1560 iwm_nic_init(struct iwm_softc *sc)
1561 {
1562 	int error;
1563 
1564 	iwm_apm_init(sc);
1565 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1566 		iwm_set_pwr(sc);
1567 
1568 	iwm_mvm_nic_config(sc);
1569 
1570 	if ((error = iwm_nic_rx_init(sc)) != 0)
1571 		return error;
1572 
1573 	/*
1574 	 * Ditto for TX, from iwn
1575 	 */
1576 	if ((error = iwm_nic_tx_init(sc)) != 0)
1577 		return error;
1578 
1579 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1580 	    "%s: shadow registers enabled\n", __func__);
1581 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1582 
1583 	return 0;
1584 }
1585 
1586 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1587 	IWM_MVM_TX_FIFO_VO,
1588 	IWM_MVM_TX_FIFO_VI,
1589 	IWM_MVM_TX_FIFO_BE,
1590 	IWM_MVM_TX_FIFO_BK,
1591 };
1592 
1593 static int
1594 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1595 {
1596 	if (!iwm_nic_lock(sc)) {
1597 		device_printf(sc->sc_dev,
1598 		    "%s: cannot enable txq %d\n",
1599 		    __func__,
1600 		    qid);
1601 		return EBUSY;
1602 	}
1603 
1604 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1605 
1606 	if (qid == IWM_MVM_CMD_QUEUE) {
1607 		/* unactivate before configuration */
1608 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1609 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1610 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1611 
1612 		iwm_nic_unlock(sc);
1613 
1614 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1615 
1616 		if (!iwm_nic_lock(sc)) {
1617 			device_printf(sc->sc_dev,
1618 			    "%s: cannot enable txq %d\n", __func__, qid);
1619 			return EBUSY;
1620 		}
1621 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1622 		iwm_nic_unlock(sc);
1623 
1624 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1625 		/* Set scheduler window size and frame limit. */
1626 		iwm_write_mem32(sc,
1627 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1628 		    sizeof(uint32_t),
1629 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1630 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1631 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1632 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1633 
1634 		if (!iwm_nic_lock(sc)) {
1635 			device_printf(sc->sc_dev,
1636 			    "%s: cannot enable txq %d\n", __func__, qid);
1637 			return EBUSY;
1638 		}
1639 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1640 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1641 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1642 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1643 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1644 	} else {
1645 		struct iwm_scd_txq_cfg_cmd cmd;
1646 		int error;
1647 
1648 		iwm_nic_unlock(sc);
1649 
1650 		memset(&cmd, 0, sizeof(cmd));
1651 		cmd.scd_queue = qid;
1652 		cmd.enable = 1;
1653 		cmd.sta_id = sta_id;
1654 		cmd.tx_fifo = fifo;
1655 		cmd.aggregate = 0;
1656 		cmd.window = IWM_FRAME_LIMIT;
1657 
1658 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1659 		    sizeof(cmd), &cmd);
1660 		if (error) {
1661 			device_printf(sc->sc_dev,
1662 			    "cannot enable txq %d\n", qid);
1663 			return error;
1664 		}
1665 
1666 		if (!iwm_nic_lock(sc))
1667 			return EBUSY;
1668 	}
1669 
1670 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1671 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1672 
1673 	iwm_nic_unlock(sc);
1674 
1675 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1676 	    __func__, qid, fifo);
1677 
1678 	return 0;
1679 }
1680 
1681 static int
1682 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1683 {
1684 	int error, chnl;
1685 
1686 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1687 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1688 
1689 	if (!iwm_nic_lock(sc))
1690 		return EBUSY;
1691 
1692 	iwm_ict_reset(sc);
1693 
1694 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1695 	if (scd_base_addr != 0 &&
1696 	    scd_base_addr != sc->scd_base_addr) {
1697 		device_printf(sc->sc_dev,
1698 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1699 		    __func__, sc->scd_base_addr, scd_base_addr);
1700 	}
1701 
1702 	iwm_nic_unlock(sc);
1703 
1704 	/* reset context data, TX status and translation data */
1705 	error = iwm_write_mem(sc,
1706 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1707 	    NULL, clear_dwords);
1708 	if (error)
1709 		return EBUSY;
1710 
1711 	if (!iwm_nic_lock(sc))
1712 		return EBUSY;
1713 
1714 	/* Set physical address of TX scheduler rings (1KB aligned). */
1715 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1716 
1717 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1718 
1719 	iwm_nic_unlock(sc);
1720 
1721 	/* enable command channel */
1722 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1723 	if (error)
1724 		return error;
1725 
1726 	if (!iwm_nic_lock(sc))
1727 		return EBUSY;
1728 
1729 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1730 
1731 	/* Enable DMA channels. */
1732 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1733 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1734 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1735 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1736 	}
1737 
1738 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1739 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1740 
1741 	iwm_nic_unlock(sc);
1742 
1743 	/* Enable L1-Active */
1744 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1745 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1746 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1747 	}
1748 
1749 	return error;
1750 }
1751 
1752 /*
1753  * NVM read access and content parsing.  We do not support
1754  * external NVM or writing NVM.
1755  * iwlwifi/mvm/nvm.c
1756  */
1757 
1758 /* Default NVM size to read */
1759 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1760 
1761 #define IWM_NVM_WRITE_OPCODE 1
1762 #define IWM_NVM_READ_OPCODE 0
1763 
1764 /* load nvm chunk response */
1765 enum {
1766 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1767 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1768 };
1769 
1770 static int
1771 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1772 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1773 {
1774 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1775 		.offset = htole16(offset),
1776 		.length = htole16(length),
1777 		.type = htole16(section),
1778 		.op_code = IWM_NVM_READ_OPCODE,
1779 	};
1780 	struct iwm_nvm_access_resp *nvm_resp;
1781 	struct iwm_rx_packet *pkt;
1782 	struct iwm_host_cmd cmd = {
1783 		.id = IWM_NVM_ACCESS_CMD,
1784 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1785 		.data = { &nvm_access_cmd, },
1786 	};
1787 	int ret, bytes_read, offset_read;
1788 	uint8_t *resp_data;
1789 
1790 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1791 
1792 	ret = iwm_send_cmd(sc, &cmd);
1793 	if (ret) {
1794 		device_printf(sc->sc_dev,
1795 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1796 		return ret;
1797 	}
1798 
1799 	pkt = cmd.resp_pkt;
1800 
1801 	/* Extract NVM response */
1802 	nvm_resp = (void *)pkt->data;
1803 	ret = le16toh(nvm_resp->status);
1804 	bytes_read = le16toh(nvm_resp->length);
1805 	offset_read = le16toh(nvm_resp->offset);
1806 	resp_data = nvm_resp->data;
1807 	if (ret) {
1808 		if ((offset != 0) &&
1809 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1810 			/*
1811 			 * meaning of NOT_VALID_ADDRESS:
1812 			 * driver try to read chunk from address that is
1813 			 * multiple of 2K and got an error since addr is empty.
1814 			 * meaning of (offset != 0): driver already
1815 			 * read valid data from another chunk so this case
1816 			 * is not an error.
1817 			 */
1818 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1819 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1820 				    offset);
1821 			*len = 0;
1822 			ret = 0;
1823 		} else {
1824 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 				    "NVM access command failed with status %d\n", ret);
1826 			ret = EIO;
1827 		}
1828 		goto exit;
1829 	}
1830 
1831 	if (offset_read != offset) {
1832 		device_printf(sc->sc_dev,
1833 		    "NVM ACCESS response with invalid offset %d\n",
1834 		    offset_read);
1835 		ret = EINVAL;
1836 		goto exit;
1837 	}
1838 
1839 	if (bytes_read > length) {
1840 		device_printf(sc->sc_dev,
1841 		    "NVM ACCESS response with too much data "
1842 		    "(%d bytes requested, %d bytes received)\n",
1843 		    length, bytes_read);
1844 		ret = EINVAL;
1845 		goto exit;
1846 	}
1847 
1848 	/* Write data to NVM */
1849 	memcpy(data + offset, resp_data, bytes_read);
1850 	*len = bytes_read;
1851 
1852  exit:
1853 	iwm_free_resp(sc, &cmd);
1854 	return ret;
1855 }
1856 
1857 /*
1858  * Reads an NVM section completely.
1859  * NICs prior to 7000 family don't have a real NVM, but just read
1860  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1861  * by uCode, we need to manually check in this case that we don't
1862  * overflow and try to read more than the EEPROM size.
1863  * For 7000 family NICs, we supply the maximal size we can read, and
1864  * the uCode fills the response with as much data as we can,
1865  * without overflowing, so no check is needed.
1866  */
1867 static int
1868 iwm_nvm_read_section(struct iwm_softc *sc,
1869 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1870 {
1871 	uint16_t seglen, length, offset = 0;
1872 	int ret;
1873 
1874 	/* Set nvm section read length */
1875 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1876 
1877 	seglen = length;
1878 
1879 	/* Read the NVM until exhausted (reading less than requested) */
1880 	while (seglen == length) {
1881 		/* Check no memory assumptions fail and cause an overflow */
1882 		if ((size_read + offset + length) >
1883 		    sc->cfg->eeprom_size) {
1884 			device_printf(sc->sc_dev,
1885 			    "EEPROM size is too small for NVM\n");
1886 			return ENOBUFS;
1887 		}
1888 
1889 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1890 		if (ret) {
1891 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1892 				    "Cannot read NVM from section %d offset %d, length %d\n",
1893 				    section, offset, length);
1894 			return ret;
1895 		}
1896 		offset += seglen;
1897 	}
1898 
1899 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1900 		    "NVM section %d read completed\n", section);
1901 	*len = offset;
1902 	return 0;
1903 }
1904 
1905 /* NVM offsets (in words) definitions */
1906 enum iwm_nvm_offsets {
1907 	/* NVM HW-Section offset (in words) definitions */
1908 	IWM_HW_ADDR = 0x15,
1909 
1910 /* NVM SW-Section offset (in words) definitions */
1911 	IWM_NVM_SW_SECTION = 0x1C0,
1912 	IWM_NVM_VERSION = 0,
1913 	IWM_RADIO_CFG = 1,
1914 	IWM_SKU = 2,
1915 	IWM_N_HW_ADDRS = 3,
1916 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1917 
1918 /* NVM calibration section offset (in words) definitions */
1919 	IWM_NVM_CALIB_SECTION = 0x2B8,
1920 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1921 };
1922 
1923 enum iwm_8000_nvm_offsets {
1924 	/* NVM HW-Section offset (in words) definitions */
1925 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1926 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1927 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1928 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1929 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1930 
1931 	/* NVM SW-Section offset (in words) definitions */
1932 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1933 	IWM_NVM_VERSION_8000 = 0,
1934 	IWM_RADIO_CFG_8000 = 0,
1935 	IWM_SKU_8000 = 2,
1936 	IWM_N_HW_ADDRS_8000 = 3,
1937 
1938 	/* NVM REGULATORY -Section offset (in words) definitions */
1939 	IWM_NVM_CHANNELS_8000 = 0,
1940 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1941 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1942 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1943 
1944 	/* NVM calibration section offset (in words) definitions */
1945 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1946 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1947 };
1948 
1949 /* SKU Capabilities (actual values from NVM definition) */
1950 enum nvm_sku_bits {
1951 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1952 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1953 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1954 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1955 };
1956 
1957 /* radio config bits (actual values from NVM definition) */
1958 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1959 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1960 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1961 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1962 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1963 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1964 
1965 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1966 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1967 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1968 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1969 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1970 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1971 
1972 #define DEFAULT_MAX_TX_POWER 16
1973 
1974 /**
1975  * enum iwm_nvm_channel_flags - channel flags in NVM
1976  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1977  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1978  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1979  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1980  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1981  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1982  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1983  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1984  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1985  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1986  */
1987 enum iwm_nvm_channel_flags {
1988 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1989 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1990 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1991 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1992 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1993 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1994 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1995 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1996 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1997 };
1998 
1999 /*
2000  * Translate EEPROM flags to net80211.
2001  */
2002 static uint32_t
2003 iwm_eeprom_channel_flags(uint16_t ch_flags)
2004 {
2005 	uint32_t nflags;
2006 
2007 	nflags = 0;
2008 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2009 		nflags |= IEEE80211_CHAN_PASSIVE;
2010 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2011 		nflags |= IEEE80211_CHAN_NOADHOC;
2012 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2013 		nflags |= IEEE80211_CHAN_DFS;
2014 		/* Just in case. */
2015 		nflags |= IEEE80211_CHAN_NOADHOC;
2016 	}
2017 
2018 	return (nflags);
2019 }
2020 
2021 static void
2022 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2023     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2024     const uint8_t bands[])
2025 {
2026 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2027 	uint32_t nflags;
2028 	uint16_t ch_flags;
2029 	uint8_t ieee;
2030 	int error;
2031 
2032 	for (; ch_idx < ch_num; ch_idx++) {
2033 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2034 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2035 			ieee = iwm_nvm_channels[ch_idx];
2036 		else
2037 			ieee = iwm_nvm_channels_8000[ch_idx];
2038 
2039 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2040 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2041 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2042 			    ieee, ch_flags,
2043 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2044 			    "5.2" : "2.4");
2045 			continue;
2046 		}
2047 
2048 		nflags = iwm_eeprom_channel_flags(ch_flags);
2049 		error = ieee80211_add_channel(chans, maxchans, nchans,
2050 		    ieee, 0, 0, nflags, bands);
2051 		if (error != 0)
2052 			break;
2053 
2054 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2055 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2056 		    ieee, ch_flags,
2057 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2058 		    "5.2" : "2.4");
2059 	}
2060 }
2061 
2062 static void
2063 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2064     struct ieee80211_channel chans[])
2065 {
2066 	struct iwm_softc *sc = ic->ic_softc;
2067 	struct iwm_nvm_data *data = sc->nvm_data;
2068 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2069 	size_t ch_num;
2070 
2071 	memset(bands, 0, sizeof(bands));
2072 	/* 1-13: 11b/g channels. */
2073 	setbit(bands, IEEE80211_MODE_11B);
2074 	setbit(bands, IEEE80211_MODE_11G);
2075 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2076 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2077 
2078 	/* 14: 11b channel only. */
2079 	clrbit(bands, IEEE80211_MODE_11G);
2080 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2081 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2082 
2083 	if (data->sku_cap_band_52GHz_enable) {
2084 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2085 			ch_num = nitems(iwm_nvm_channels);
2086 		else
2087 			ch_num = nitems(iwm_nvm_channels_8000);
2088 		memset(bands, 0, sizeof(bands));
2089 		setbit(bands, IEEE80211_MODE_11A);
2090 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2091 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2092 	}
2093 }
2094 
2095 static void
2096 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2097 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2098 {
2099 	const uint8_t *hw_addr;
2100 
2101 	if (mac_override) {
2102 		static const uint8_t reserved_mac[] = {
2103 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2104 		};
2105 
2106 		hw_addr = (const uint8_t *)(mac_override +
2107 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2108 
2109 		/*
2110 		 * Store the MAC address from MAO section.
2111 		 * No byte swapping is required in MAO section
2112 		 */
2113 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2114 
2115 		/*
2116 		 * Force the use of the OTP MAC address in case of reserved MAC
2117 		 * address in the NVM, or if address is given but invalid.
2118 		 */
2119 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2120 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2121 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2122 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2123 			return;
2124 
2125 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2126 		    "%s: mac address from nvm override section invalid\n",
2127 		    __func__);
2128 	}
2129 
2130 	if (nvm_hw) {
2131 		/* read the mac address from WFMP registers */
2132 		uint32_t mac_addr0 =
2133 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2134 		uint32_t mac_addr1 =
2135 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2136 
2137 		hw_addr = (const uint8_t *)&mac_addr0;
2138 		data->hw_addr[0] = hw_addr[3];
2139 		data->hw_addr[1] = hw_addr[2];
2140 		data->hw_addr[2] = hw_addr[1];
2141 		data->hw_addr[3] = hw_addr[0];
2142 
2143 		hw_addr = (const uint8_t *)&mac_addr1;
2144 		data->hw_addr[4] = hw_addr[1];
2145 		data->hw_addr[5] = hw_addr[0];
2146 
2147 		return;
2148 	}
2149 
2150 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2151 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2152 }
2153 
2154 static int
2155 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2156 	    const uint16_t *phy_sku)
2157 {
2158 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2159 		return le16_to_cpup(nvm_sw + IWM_SKU);
2160 
2161 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2162 }
2163 
2164 static int
2165 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2166 {
2167 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2168 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2169 	else
2170 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2171 						IWM_NVM_VERSION_8000));
2172 }
2173 
2174 static int
2175 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2176 		  const uint16_t *phy_sku)
2177 {
2178         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2179                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2180 
2181         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2182 }
2183 
2184 static int
2185 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2186 {
2187 	int n_hw_addr;
2188 
2189 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2190 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2191 
2192 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2193 
2194         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2195 }
2196 
2197 static void
2198 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2199 		  uint32_t radio_cfg)
2200 {
2201 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2202 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2203 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2204 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2205 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2206 		return;
2207 	}
2208 
2209 	/* set the radio configuration for family 8000 */
2210 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2211 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2212 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2213 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2214 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2215 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2216 }
2217 
2218 static int
2219 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2220 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2221 {
2222 #ifdef notyet /* for FAMILY 9000 */
2223 	if (cfg->mac_addr_from_csr) {
2224 		iwm_set_hw_address_from_csr(sc, data);
2225         } else
2226 #endif
2227 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2228 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2229 
2230 		/* The byte order is little endian 16 bit, meaning 214365 */
2231 		data->hw_addr[0] = hw_addr[1];
2232 		data->hw_addr[1] = hw_addr[0];
2233 		data->hw_addr[2] = hw_addr[3];
2234 		data->hw_addr[3] = hw_addr[2];
2235 		data->hw_addr[4] = hw_addr[5];
2236 		data->hw_addr[5] = hw_addr[4];
2237 	} else {
2238 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2239 	}
2240 
2241 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2242 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2243 		return EINVAL;
2244 	}
2245 
2246 	return 0;
2247 }
2248 
2249 static struct iwm_nvm_data *
2250 iwm_parse_nvm_data(struct iwm_softc *sc,
2251 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2252 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2253 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2254 {
2255 	struct iwm_nvm_data *data;
2256 	uint32_t sku, radio_cfg;
2257 
2258 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2259 		data = kmalloc(sizeof(*data) +
2260 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2261 		    M_DEVBUF, M_WAITOK | M_ZERO);
2262 	} else {
2263 		data = kmalloc(sizeof(*data) +
2264 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2265 		    M_DEVBUF, M_WAITOK | M_ZERO);
2266 	}
2267 	if (!data)
2268 		return NULL;
2269 
2270 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2271 
2272 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2273 	iwm_set_radio_cfg(sc, data, radio_cfg);
2274 
2275 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2276 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2277 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2278 	data->sku_cap_11n_enable = 0;
2279 
2280 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2281 
2282 	/* If no valid mac address was found - bail out */
2283 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2284 		kfree(data, M_DEVBUF);
2285 		return NULL;
2286 	}
2287 
2288 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2289 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2290 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2291 	} else {
2292 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2293 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2294 	}
2295 
2296 	return data;
2297 }
2298 
2299 static void
2300 iwm_free_nvm_data(struct iwm_nvm_data *data)
2301 {
2302 	if (data != NULL)
2303 		kfree(data, M_DEVBUF);
2304 }
2305 
2306 static struct iwm_nvm_data *
2307 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2308 {
2309 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2310 
2311 	/* Checking for required sections */
2312 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2313 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2314 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2315 			device_printf(sc->sc_dev,
2316 			    "Can't parse empty OTP/NVM sections\n");
2317 			return NULL;
2318 		}
2319 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2320 		/* SW and REGULATORY sections are mandatory */
2321 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2322 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2323 			device_printf(sc->sc_dev,
2324 			    "Can't parse empty OTP/NVM sections\n");
2325 			return NULL;
2326 		}
2327 		/* MAC_OVERRIDE or at least HW section must exist */
2328 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2329 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2330 			device_printf(sc->sc_dev,
2331 			    "Can't parse mac_address, empty sections\n");
2332 			return NULL;
2333 		}
2334 
2335 		/* PHY_SKU section is mandatory in B0 */
2336 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2337 			device_printf(sc->sc_dev,
2338 			    "Can't parse phy_sku in B0, empty sections\n");
2339 			return NULL;
2340 		}
2341 	} else {
2342 		panic("unknown device family %d\n", sc->cfg->device_family);
2343 	}
2344 
2345 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2346 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2347 	calib = (const uint16_t *)
2348 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2349 	regulatory = (const uint16_t *)
2350 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2351 	mac_override = (const uint16_t *)
2352 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2353 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2354 
2355 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2356 	    phy_sku, regulatory);
2357 }
2358 
2359 static int
2360 iwm_nvm_init(struct iwm_softc *sc)
2361 {
2362 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2363 	int i, ret, section;
2364 	uint32_t size_read = 0;
2365 	uint8_t *nvm_buffer, *temp;
2366 	uint16_t len;
2367 
2368 	memset(nvm_sections, 0, sizeof(nvm_sections));
2369 
2370 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2371 		return EINVAL;
2372 
2373 	/* load NVM values from nic */
2374 	/* Read From FW NVM */
2375 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2376 
2377 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2378 	    M_INTWAIT | M_ZERO);
2379 	if (!nvm_buffer)
2380 		return ENOMEM;
2381 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2382 		/* we override the constness for initial read */
2383 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2384 					   &len, size_read);
2385 		if (ret)
2386 			continue;
2387 		size_read += len;
2388 		temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2389 		if (!temp) {
2390 			ret = ENOMEM;
2391 			break;
2392 		}
2393 		memcpy(temp, nvm_buffer, len);
2394 
2395 		nvm_sections[section].data = temp;
2396 		nvm_sections[section].length = len;
2397 	}
2398 	if (!size_read)
2399 		device_printf(sc->sc_dev, "OTP is blank\n");
2400 	kfree(nvm_buffer, M_DEVBUF);
2401 
2402 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2403 	if (!sc->nvm_data)
2404 		return EINVAL;
2405 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2406 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2407 
2408 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2409 		if (nvm_sections[i].data != NULL)
2410 			kfree(nvm_sections[i].data, M_DEVBUF);
2411 	}
2412 
2413 	return 0;
2414 }
2415 
2416 static int
2417 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2418 	const struct iwm_fw_desc *section)
2419 {
2420 	struct iwm_dma_info *dma = &sc->fw_dma;
2421 	uint8_t *v_addr;
2422 	bus_addr_t p_addr;
2423 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2424 	int ret = 0;
2425 
2426 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2427 		    "%s: [%d] uCode section being loaded...\n",
2428 		    __func__, section_num);
2429 
2430 	v_addr = dma->vaddr;
2431 	p_addr = dma->paddr;
2432 
2433 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2434 		uint32_t copy_size, dst_addr;
2435 		int extended_addr = FALSE;
2436 
2437 		copy_size = MIN(chunk_sz, section->len - offset);
2438 		dst_addr = section->offset + offset;
2439 
2440 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2441 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2442 			extended_addr = TRUE;
2443 
2444 		if (extended_addr)
2445 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2446 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2447 
2448 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2449 		    copy_size);
2450 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2451 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2452 						   copy_size);
2453 
2454 		if (extended_addr)
2455 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2456 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2457 
2458 		if (ret) {
2459 			device_printf(sc->sc_dev,
2460 			    "%s: Could not load the [%d] uCode section\n",
2461 			    __func__, section_num);
2462 			break;
2463 		}
2464 	}
2465 
2466 	return ret;
2467 }
2468 
2469 /*
2470  * ucode
2471  */
2472 static int
2473 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2474 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2475 {
2476 	int ret;
2477 
2478 	sc->sc_fw_chunk_done = 0;
2479 
2480 	if (!iwm_nic_lock(sc))
2481 		return EBUSY;
2482 
2483 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2484 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2485 
2486 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2487 	    dst_addr);
2488 
2489 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2490 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2491 
2492 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2493 	    (iwm_get_dma_hi_addr(phy_addr)
2494 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2495 
2496 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2497 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2498 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2499 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2500 
2501 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2502 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2503 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2504 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2505 
2506 	iwm_nic_unlock(sc);
2507 
2508 	/* wait up to 5s for this segment to load */
2509 	ret = 0;
2510 	while (!sc->sc_fw_chunk_done) {
2511 #if defined(__DragonFly__)
2512 		ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2513 #else
2514 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2515 #endif
2516 		if (ret)
2517 			break;
2518 	}
2519 
2520 	if (ret != 0) {
2521 		device_printf(sc->sc_dev,
2522 		    "fw chunk addr 0x%x len %d failed to load\n",
2523 		    dst_addr, byte_cnt);
2524 		return ETIMEDOUT;
2525 	}
2526 
2527 	return 0;
2528 }
2529 
2530 static int
2531 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2532 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2533 {
2534 	int shift_param;
2535 	int i, ret = 0, sec_num = 0x1;
2536 	uint32_t val, last_read_idx = 0;
2537 
2538 	if (cpu == 1) {
2539 		shift_param = 0;
2540 		*first_ucode_section = 0;
2541 	} else {
2542 		shift_param = 16;
2543 		(*first_ucode_section)++;
2544 	}
2545 
2546 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2547 		last_read_idx = i;
2548 
2549 		/*
2550 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2551 		 * CPU1 to CPU2.
2552 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2553 		 * CPU2 non paged to CPU2 paging sec.
2554 		 */
2555 		if (!image->fw_sect[i].data ||
2556 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2557 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2558 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2559 				    "Break since Data not valid or Empty section, sec = %d\n",
2560 				    i);
2561 			break;
2562 		}
2563 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2564 		if (ret)
2565 			return ret;
2566 
2567 		/* Notify the ucode of the loaded section number and status */
2568 		if (iwm_nic_lock(sc)) {
2569 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2570 			val = val | (sec_num << shift_param);
2571 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2572 			sec_num = (sec_num << 1) | 0x1;
2573 			iwm_nic_unlock(sc);
2574 		}
2575 	}
2576 
2577 	*first_ucode_section = last_read_idx;
2578 
2579 	iwm_enable_interrupts(sc);
2580 
2581 	if (iwm_nic_lock(sc)) {
2582 		if (cpu == 1)
2583 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2584 		else
2585 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2586 		iwm_nic_unlock(sc);
2587 	}
2588 
2589 	return 0;
2590 }
2591 
2592 static int
2593 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2594 	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2595 {
2596 	int shift_param;
2597 	int i, ret = 0;
2598 	uint32_t last_read_idx = 0;
2599 
2600 	if (cpu == 1) {
2601 		shift_param = 0;
2602 		*first_ucode_section = 0;
2603 	} else {
2604 		shift_param = 16;
2605 		(*first_ucode_section)++;
2606 	}
2607 
2608 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2609 		last_read_idx = i;
2610 
2611 		/*
2612 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2613 		 * CPU1 to CPU2.
2614 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2615 		 * CPU2 non paged to CPU2 paging sec.
2616 		 */
2617 		if (!image->fw_sect[i].data ||
2618 		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2619 		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2620 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2621 				    "Break since Data not valid or Empty section, sec = %d\n",
2622 				     i);
2623 			break;
2624 		}
2625 
2626 		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2627 		if (ret)
2628 			return ret;
2629 	}
2630 
2631 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2632 		iwm_set_bits_prph(sc,
2633 				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2634 				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2635 				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2636 				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2637 					shift_param);
2638 
2639 	*first_ucode_section = last_read_idx;
2640 
2641 	return 0;
2642 
2643 }
2644 
2645 static int
2646 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2647 	const struct iwm_fw_sects *image)
2648 {
2649 	int ret = 0;
2650 	int first_ucode_section;
2651 
2652 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2653 		     image->is_dual_cpus ? "Dual" : "Single");
2654 
2655 	/* load to FW the binary non secured sections of CPU1 */
2656 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2657 	if (ret)
2658 		return ret;
2659 
2660 	if (image->is_dual_cpus) {
2661 		/* set CPU2 header address */
2662 		if (iwm_nic_lock(sc)) {
2663 			iwm_write_prph(sc,
2664 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2665 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2666 			iwm_nic_unlock(sc);
2667 		}
2668 
2669 		/* load to FW the binary sections of CPU2 */
2670 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2671 						 &first_ucode_section);
2672 		if (ret)
2673 			return ret;
2674 	}
2675 
2676 	iwm_enable_interrupts(sc);
2677 
2678 	/* release CPU reset */
2679 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2680 
2681 	return 0;
2682 }
2683 
2684 int
2685 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2686 	const struct iwm_fw_sects *image)
2687 {
2688 	int ret = 0;
2689 	int first_ucode_section;
2690 
2691 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2692 		    image->is_dual_cpus ? "Dual" : "Single");
2693 
2694 	/* configure the ucode to be ready to get the secured image */
2695 	/* release CPU reset */
2696 	if (iwm_nic_lock(sc)) {
2697 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2698 		    IWM_RELEASE_CPU_RESET_BIT);
2699 		iwm_nic_unlock(sc);
2700 	}
2701 
2702 	/* load to FW the binary Secured sections of CPU1 */
2703 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2704 	    &first_ucode_section);
2705 	if (ret)
2706 		return ret;
2707 
2708 	/* load to FW the binary sections of CPU2 */
2709 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2710 	    &first_ucode_section);
2711 }
2712 
2713 /* XXX Get rid of this definition */
2714 static inline void
2715 iwm_enable_fw_load_int(struct iwm_softc *sc)
2716 {
2717 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2718 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2719 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2720 }
2721 
2722 /* XXX Add proper rfkill support code */
2723 static int
2724 iwm_start_fw(struct iwm_softc *sc,
2725 	const struct iwm_fw_sects *fw)
2726 {
2727 	int ret;
2728 
2729 	/* This may fail if AMT took ownership of the device */
2730 	if (iwm_prepare_card_hw(sc)) {
2731 		device_printf(sc->sc_dev,
2732 		    "%s: Exit HW not ready\n", __func__);
2733 		ret = EIO;
2734 		goto out;
2735 	}
2736 
2737 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2738 
2739 	iwm_disable_interrupts(sc);
2740 
2741 	/* make sure rfkill handshake bits are cleared */
2742 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2743 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2744 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2745 
2746 	/* clear (again), then enable host interrupts */
2747 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2748 
2749 	ret = iwm_nic_init(sc);
2750 	if (ret) {
2751 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2752 		goto out;
2753 	}
2754 
2755 	/*
2756 	 * Now, we load the firmware and don't want to be interrupted, even
2757 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2758 	 * FH_TX interrupt which is needed to load the firmware). If the
2759 	 * RF-Kill switch is toggled, we will find out after having loaded
2760 	 * the firmware and return the proper value to the caller.
2761 	 */
2762 	iwm_enable_fw_load_int(sc);
2763 
2764 	/* really make sure rfkill handshake bits are cleared */
2765 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2766 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2767 
2768 	/* Load the given image to the HW */
2769 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2770 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2771 	else
2772 		ret = iwm_pcie_load_given_ucode(sc, fw);
2773 
2774 	/* XXX re-check RF-Kill state */
2775 
2776 out:
2777 	return ret;
2778 }
2779 
2780 static int
2781 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2782 {
2783 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2784 		.valid = htole32(valid_tx_ant),
2785 	};
2786 
2787 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2788 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2789 }
2790 
2791 static int
2792 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2793 {
2794 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2795 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2796 
2797 	/* Set parameters */
2798 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2799 	phy_cfg_cmd.calib_control.event_trigger =
2800 	    sc->sc_default_calib[ucode_type].event_trigger;
2801 	phy_cfg_cmd.calib_control.flow_trigger =
2802 	    sc->sc_default_calib[ucode_type].flow_trigger;
2803 
2804 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2805 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2806 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2807 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2808 }
2809 
2810 static int
2811 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2812 {
2813 	struct iwm_mvm_alive_data *alive_data = data;
2814 	struct iwm_mvm_alive_resp_ver1 *palive1;
2815 	struct iwm_mvm_alive_resp_ver2 *palive2;
2816 	struct iwm_mvm_alive_resp *palive;
2817 
2818 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2819 		palive1 = (void *)pkt->data;
2820 
2821 		sc->support_umac_log = FALSE;
2822                 sc->error_event_table =
2823                         le32toh(palive1->error_event_table_ptr);
2824                 sc->log_event_table =
2825                         le32toh(palive1->log_event_table_ptr);
2826                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2827 
2828                 alive_data->valid = le16toh(palive1->status) ==
2829                                     IWM_ALIVE_STATUS_OK;
2830                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2831 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2832 			     le16toh(palive1->status), palive1->ver_type,
2833                              palive1->ver_subtype, palive1->flags);
2834 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2835 		palive2 = (void *)pkt->data;
2836 		sc->error_event_table =
2837 			le32toh(palive2->error_event_table_ptr);
2838 		sc->log_event_table =
2839 			le32toh(palive2->log_event_table_ptr);
2840 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2841 		sc->umac_error_event_table =
2842                         le32toh(palive2->error_info_addr);
2843 
2844 		alive_data->valid = le16toh(palive2->status) ==
2845 				    IWM_ALIVE_STATUS_OK;
2846 		if (sc->umac_error_event_table)
2847 			sc->support_umac_log = TRUE;
2848 
2849 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2850 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2851 			    le16toh(palive2->status), palive2->ver_type,
2852 			    palive2->ver_subtype, palive2->flags);
2853 
2854 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2855 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2856 			    palive2->umac_major, palive2->umac_minor);
2857 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2858 		palive = (void *)pkt->data;
2859 
2860 		sc->error_event_table =
2861 			le32toh(palive->error_event_table_ptr);
2862 		sc->log_event_table =
2863 			le32toh(palive->log_event_table_ptr);
2864 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2865 		sc->umac_error_event_table =
2866 			le32toh(palive->error_info_addr);
2867 
2868 		alive_data->valid = le16toh(palive->status) ==
2869 				    IWM_ALIVE_STATUS_OK;
2870 		if (sc->umac_error_event_table)
2871 			sc->support_umac_log = TRUE;
2872 
2873 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2874 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2875 			    le16toh(palive->status), palive->ver_type,
2876 			    palive->ver_subtype, palive->flags);
2877 
2878 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2879 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2880 			    le32toh(palive->umac_major),
2881 			    le32toh(palive->umac_minor));
2882 	}
2883 
2884 	return TRUE;
2885 }
2886 
2887 static int
2888 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2889 	struct iwm_rx_packet *pkt, void *data)
2890 {
2891 	struct iwm_phy_db *phy_db = data;
2892 
2893 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2894 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2895 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2896 			    __func__, pkt->hdr.code);
2897 		}
2898 		return TRUE;
2899 	}
2900 
2901 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2902 		device_printf(sc->sc_dev,
2903 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2904 	}
2905 
2906 	return FALSE;
2907 }
2908 
2909 static int
2910 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2911 	enum iwm_ucode_type ucode_type)
2912 {
2913 	struct iwm_notification_wait alive_wait;
2914 	struct iwm_mvm_alive_data alive_data;
2915 	const struct iwm_fw_sects *fw;
2916 	enum iwm_ucode_type old_type = sc->cur_ucode;
2917 	int error;
2918 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2919 
2920 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2921 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2922 			error);
2923 		return error;
2924 	}
2925 	fw = &sc->sc_fw.fw_sects[ucode_type];
2926 	sc->cur_ucode = ucode_type;
2927 	sc->ucode_loaded = FALSE;
2928 
2929 	memset(&alive_data, 0, sizeof(alive_data));
2930 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2931 				   alive_cmd, NELEM(alive_cmd),
2932 				   iwm_alive_fn, &alive_data);
2933 
2934 	error = iwm_start_fw(sc, fw);
2935 	if (error) {
2936 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2937 		sc->cur_ucode = old_type;
2938 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2939 		return error;
2940 	}
2941 
2942 	/*
2943 	 * Some things may run in the background now, but we
2944 	 * just wait for the ALIVE notification here.
2945 	 */
2946 	IWM_UNLOCK(sc);
2947 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2948 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2949 	IWM_LOCK(sc);
2950 	if (error) {
2951 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2952 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2953 			if (iwm_nic_lock(sc)) {
2954 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2955 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2956 				iwm_nic_unlock(sc);
2957 			}
2958 			device_printf(sc->sc_dev,
2959 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2960 			    a, b);
2961 		}
2962 		sc->cur_ucode = old_type;
2963 		return error;
2964 	}
2965 
2966 	if (!alive_data.valid) {
2967 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2968 		    __func__);
2969 		sc->cur_ucode = old_type;
2970 		return EIO;
2971 	}
2972 
2973 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2974 
2975 	/*
2976 	 * configure and operate fw paging mechanism.
2977 	 * driver configures the paging flow only once, CPU2 paging image
2978 	 * included in the IWM_UCODE_INIT image.
2979 	 */
2980 	if (fw->paging_mem_size) {
2981 		error = iwm_save_fw_paging(sc, fw);
2982 		if (error) {
2983 			device_printf(sc->sc_dev,
2984 			    "%s: failed to save the FW paging image\n",
2985 			    __func__);
2986 			return error;
2987 		}
2988 
2989 		error = iwm_send_paging_cmd(sc, fw);
2990 		if (error) {
2991 			device_printf(sc->sc_dev,
2992 			    "%s: failed to send the paging cmd\n", __func__);
2993 			iwm_free_fw_paging(sc);
2994 			return error;
2995 		}
2996 	}
2997 
2998 	if (!error)
2999 		sc->ucode_loaded = TRUE;
3000 	return error;
3001 }
3002 
3003 /*
3004  * mvm misc bits
3005  */
3006 
3007 static int
3008 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3009 {
3010 	struct iwm_notification_wait calib_wait;
3011 	static const uint16_t init_complete[] = {
3012 		IWM_INIT_COMPLETE_NOTIF,
3013 		IWM_CALIB_RES_NOTIF_PHY_DB
3014 	};
3015 	int ret;
3016 
3017 	/* do not operate with rfkill switch turned on */
3018 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3019 		device_printf(sc->sc_dev,
3020 		    "radio is disabled by hardware switch\n");
3021 		return EPERM;
3022 	}
3023 
3024 	iwm_init_notification_wait(sc->sc_notif_wait,
3025 				   &calib_wait,
3026 				   init_complete,
3027 				   NELEM(init_complete),
3028 				   iwm_wait_phy_db_entry,
3029 				   sc->sc_phy_db);
3030 
3031 	/* Will also start the device */
3032 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3033 	if (ret) {
3034 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3035 		    ret);
3036 		goto error;
3037 	}
3038 
3039 	if (justnvm) {
3040 		/* Read nvm */
3041 		ret = iwm_nvm_init(sc);
3042 		if (ret) {
3043 			device_printf(sc->sc_dev, "failed to read nvm\n");
3044 			goto error;
3045 		}
3046 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3047 		goto error;
3048 	}
3049 
3050 	ret = iwm_send_bt_init_conf(sc);
3051 	if (ret) {
3052 		device_printf(sc->sc_dev,
3053 		    "failed to send bt coex configuration: %d\n", ret);
3054 		goto error;
3055 	}
3056 
3057 	/* Init Smart FIFO. */
3058 	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3059 	if (ret)
3060 		goto error;
3061 
3062 	/* Send TX valid antennas before triggering calibrations */
3063 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3064 	if (ret) {
3065 		device_printf(sc->sc_dev,
3066 		    "failed to send antennas before calibration: %d\n", ret);
3067 		goto error;
3068 	}
3069 
3070 	/*
3071 	 * Send phy configurations command to init uCode
3072 	 * to start the 16.0 uCode init image internal calibrations.
3073 	 */
3074 	ret = iwm_send_phy_cfg_cmd(sc);
3075 	if (ret) {
3076 		device_printf(sc->sc_dev,
3077 		    "%s: Failed to run INIT calibrations: %d\n",
3078 		    __func__, ret);
3079 		goto error;
3080 	}
3081 
3082 	/*
3083 	 * Nothing to do but wait for the init complete notification
3084 	 * from the firmware.
3085 	 */
3086 	IWM_UNLOCK(sc);
3087 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3088 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3089 	IWM_LOCK(sc);
3090 
3091 
3092 	goto out;
3093 
3094 error:
3095 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3096 out:
3097 	return ret;
3098 }
3099 
3100 /*
3101  * receive side
3102  */
3103 
3104 /* (re)stock rx ring, called at init-time and at runtime */
3105 static int
3106 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3107 {
3108 	struct iwm_rx_ring *ring = &sc->rxq;
3109 	struct iwm_rx_data *data = &ring->data[idx];
3110 	struct mbuf *m;
3111 	bus_dmamap_t dmamap;
3112 	bus_dma_segment_t seg;
3113 	int nsegs, error;
3114 
3115 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3116 	if (m == NULL)
3117 		return ENOBUFS;
3118 
3119 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3120 #if defined(__DragonFly__)
3121 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3122 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3123 #else
3124 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3125 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3126 #endif
3127 	if (error != 0) {
3128 		device_printf(sc->sc_dev,
3129 		    "%s: can't map mbuf, error %d\n", __func__, error);
3130 		m_freem(m);
3131 		return error;
3132 	}
3133 
3134 	if (data->m != NULL)
3135 		bus_dmamap_unload(ring->data_dmat, data->map);
3136 
3137 	/* Swap ring->spare_map with data->map */
3138 	dmamap = data->map;
3139 	data->map = ring->spare_map;
3140 	ring->spare_map = dmamap;
3141 
3142 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3143 	data->m = m;
3144 
3145 	/* Update RX descriptor. */
3146 	KKASSERT((seg.ds_addr & 255) == 0);
3147 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3148 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3149 	    BUS_DMASYNC_PREWRITE);
3150 
3151 	return 0;
3152 }
3153 
3154 /*
3155  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3156  * values are reported by the fw as positive values - need to negate
3157  * to obtain their dBM.  Account for missing antennas by replacing 0
3158  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3159  */
3160 static int
3161 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3162 {
3163 	int energy_a, energy_b, energy_c, max_energy;
3164 	uint32_t val;
3165 
3166 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3167 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3168 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3169 	energy_a = energy_a ? -energy_a : -256;
3170 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3171 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3172 	energy_b = energy_b ? -energy_b : -256;
3173 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3174 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3175 	energy_c = energy_c ? -energy_c : -256;
3176 	max_energy = MAX(energy_a, energy_b);
3177 	max_energy = MAX(max_energy, energy_c);
3178 
3179 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3180 	    "energy In A %d B %d C %d , and max %d\n",
3181 	    energy_a, energy_b, energy_c, max_energy);
3182 
3183 	return max_energy;
3184 }
3185 
3186 static void
3187 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3188 {
3189 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3190 
3191 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3192 
3193 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3194 }
3195 
3196 /*
3197  * Retrieve the average noise (in dBm) among receivers.
3198  */
3199 static int
3200 iwm_get_noise(struct iwm_softc *sc,
3201 	const struct iwm_mvm_statistics_rx_non_phy *stats)
3202 {
3203 	int i, total, nbant, noise;
3204 
3205 	total = nbant = noise = 0;
3206 	for (i = 0; i < 3; i++) {
3207 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3208 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3209 		    __func__, i, noise);
3210 
3211 		if (noise) {
3212 			total += noise;
3213 			nbant++;
3214 		}
3215 	}
3216 
3217 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3218 	    __func__, nbant, total);
3219 #if 0
3220 	/* There should be at least one antenna but check anyway. */
3221 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3222 #else
3223 	/* For now, just hard-code it to -96 to be safe */
3224 	return (-96);
3225 #endif
3226 }
3227 
3228 /*
3229  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3230  *
3231  * Handles the actual data of the Rx packet from the fw
3232  */
3233 static void
3234 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m)
3235 {
3236 	struct ieee80211com *ic = &sc->sc_ic;
3237 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3238 	struct ieee80211_frame *wh;
3239 	struct ieee80211_node *ni;
3240 	struct ieee80211_rx_stats rxs;
3241 	struct iwm_rx_phy_info *phy_info;
3242 	struct iwm_rx_mpdu_res_start *rx_res;
3243 	struct iwm_rx_packet *pkt = mtod(m, struct iwm_rx_packet *);
3244 	uint32_t len;
3245 	uint32_t rx_pkt_status;
3246 	int rssi;
3247 
3248 	phy_info = &sc->sc_last_phy_info;
3249 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3250 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3251 	len = le16toh(rx_res->byte_count);
3252 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3253 
3254 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3255 		device_printf(sc->sc_dev,
3256 		    "dsp size out of range [0,20]: %d\n",
3257 		    phy_info->cfg_phy_cnt);
3258 		return;
3259 	}
3260 
3261 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3262 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3263 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3264 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3265 		return; /* drop */
3266 	}
3267 
3268 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3269 	/* Note: RSSI is absolute (ie a -ve value) */
3270 	if (rssi < IWM_MIN_DBM)
3271 		rssi = IWM_MIN_DBM;
3272 	else if (rssi > IWM_MAX_DBM)
3273 		rssi = IWM_MAX_DBM;
3274 
3275 	/* Map it to relative value */
3276 	rssi = rssi - sc->sc_noise;
3277 
3278 	/* replenish ring for the buffer we're going to feed to the sharks */
3279 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3280 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3281 		    __func__);
3282 		return;
3283 	}
3284 
3285 	m->m_data = pkt->data + sizeof(*rx_res);
3286 	m->m_pkthdr.len = m->m_len = len;
3287 
3288 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3289 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3290 
3291 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3292 
3293 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3294 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3295 	    __func__,
3296 	    le16toh(phy_info->channel),
3297 	    le16toh(phy_info->phy_flags));
3298 
3299 	/*
3300 	 * Populate an RX state struct with the provided information.
3301 	 */
3302 	bzero(&rxs, sizeof(rxs));
3303 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3304 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3305 	rxs.c_ieee = le16toh(phy_info->channel);
3306 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3307 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3308 	} else {
3309 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3310 	}
3311 	/* rssi is in 1/2db units */
3312 	rxs.rssi = rssi * 2;
3313 	rxs.nf = sc->sc_noise;
3314 
3315 	if (ieee80211_radiotap_active_vap(vap)) {
3316 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3317 
3318 		tap->wr_flags = 0;
3319 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3320 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3321 		tap->wr_chan_freq = htole16(rxs.c_freq);
3322 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3323 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3324 		tap->wr_dbm_antsignal = (int8_t)rssi;
3325 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3326 		tap->wr_tsft = phy_info->system_timestamp;
3327 		switch (phy_info->rate) {
3328 		/* CCK rates. */
3329 		case  10: tap->wr_rate =   2; break;
3330 		case  20: tap->wr_rate =   4; break;
3331 		case  55: tap->wr_rate =  11; break;
3332 		case 110: tap->wr_rate =  22; break;
3333 		/* OFDM rates. */
3334 		case 0xd: tap->wr_rate =  12; break;
3335 		case 0xf: tap->wr_rate =  18; break;
3336 		case 0x5: tap->wr_rate =  24; break;
3337 		case 0x7: tap->wr_rate =  36; break;
3338 		case 0x9: tap->wr_rate =  48; break;
3339 		case 0xb: tap->wr_rate =  72; break;
3340 		case 0x1: tap->wr_rate =  96; break;
3341 		case 0x3: tap->wr_rate = 108; break;
3342 		/* Unknown rate: should not happen. */
3343 		default:  tap->wr_rate =   0;
3344 		}
3345 	}
3346 
3347 	IWM_UNLOCK(sc);
3348 	if (ni != NULL) {
3349 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3350 		ieee80211_input_mimo(ni, m, &rxs);
3351 		ieee80211_free_node(ni);
3352 	} else {
3353 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3354 		ieee80211_input_mimo_all(ic, m, &rxs);
3355 	}
3356 	IWM_LOCK(sc);
3357 }
3358 
3359 static int
3360 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3361 	struct iwm_node *in)
3362 {
3363 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3364 	struct ieee80211_node *ni = &in->in_ni;
3365 	struct ieee80211vap *vap = ni->ni_vap;
3366 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3367 	int failack = tx_resp->failure_frame;
3368 
3369 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3370 
3371 	/* Update rate control statistics. */
3372 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3373 	    __func__,
3374 	    (int) le16toh(tx_resp->status.status),
3375 	    (int) le16toh(tx_resp->status.sequence),
3376 	    tx_resp->frame_count,
3377 	    tx_resp->bt_kill_count,
3378 	    tx_resp->failure_rts,
3379 	    tx_resp->failure_frame,
3380 	    le32toh(tx_resp->initial_rate),
3381 	    (int) le16toh(tx_resp->wireless_media_time));
3382 
3383 	if (status != IWM_TX_STATUS_SUCCESS &&
3384 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3385 		ieee80211_ratectl_tx_complete(vap, ni,
3386 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3387 		return (1);
3388 	} else {
3389 		ieee80211_ratectl_tx_complete(vap, ni,
3390 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3391 		return (0);
3392 	}
3393 }
3394 
3395 static void
3396 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3397 {
3398 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3399 	int idx = cmd_hdr->idx;
3400 	int qid = cmd_hdr->qid;
3401 	struct iwm_tx_ring *ring = &sc->txq[qid];
3402 	struct iwm_tx_data *txd = &ring->data[idx];
3403 	struct iwm_node *in = txd->in;
3404 	struct mbuf *m = txd->m;
3405 	int status;
3406 
3407 	KASSERT(txd->done == 0, ("txd not done"));
3408 	KASSERT(txd->in != NULL, ("txd without node"));
3409 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3410 
3411 	sc->sc_tx_timer = 0;
3412 
3413 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3414 
3415 	/* Unmap and free mbuf. */
3416 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3417 	bus_dmamap_unload(ring->data_dmat, txd->map);
3418 
3419 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3420 	    "free txd %p, in %p\n", txd, txd->in);
3421 	txd->done = 1;
3422 	txd->m = NULL;
3423 	txd->in = NULL;
3424 
3425 	ieee80211_tx_complete(&in->in_ni, m, status);
3426 
3427 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3428 		sc->qfullmsk &= ~(1 << ring->qid);
3429 		if (sc->qfullmsk == 0) {
3430 			iwm_start(sc);
3431 		}
3432 	}
3433 }
3434 
3435 /*
3436  * transmit side
3437  */
3438 
3439 /*
3440  * Process a "command done" firmware notification.  This is where we wakeup
3441  * processes waiting for a synchronous command completion.
3442  * from if_iwn
3443  */
3444 static void
3445 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3446 {
3447 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3448 	struct iwm_tx_data *data;
3449 
3450 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3451 		return;	/* Not a command ack. */
3452 	}
3453 
3454 	data = &ring->data[pkt->hdr.idx];
3455 
3456 	/* If the command was mapped in an mbuf, free it. */
3457 	if (data->m != NULL) {
3458 		bus_dmamap_sync(ring->data_dmat, data->map,
3459 		    BUS_DMASYNC_POSTWRITE);
3460 		bus_dmamap_unload(ring->data_dmat, data->map);
3461 		m_freem(data->m);
3462 		data->m = NULL;
3463 	}
3464 	wakeup(&ring->desc[pkt->hdr.idx]);
3465 
3466 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3467 		device_printf(sc->sc_dev,
3468 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3469 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3470 		/* XXX call iwm_force_nmi() */
3471 	}
3472 
3473 	KKASSERT(ring->queued > 0);
3474 	ring->queued--;
3475 	if (ring->queued == 0)
3476 		iwm_pcie_clear_cmd_in_flight(sc);
3477 }
3478 
3479 #if 0
3480 /*
3481  * necessary only for block ack mode
3482  */
3483 void
3484 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3485 	uint16_t len)
3486 {
3487 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3488 	uint16_t w_val;
3489 
3490 	scd_bc_tbl = sc->sched_dma.vaddr;
3491 
3492 	len += 8; /* magic numbers came naturally from paris */
3493 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3494 		len = roundup(len, 4) / 4;
3495 
3496 	w_val = htole16(sta_id << 12 | len);
3497 
3498 	/* Update TX scheduler. */
3499 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3500 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3501 	    BUS_DMASYNC_PREWRITE);
3502 
3503 	/* I really wonder what this is ?!? */
3504 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3505 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3506 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3507 		    BUS_DMASYNC_PREWRITE);
3508 	}
3509 }
3510 #endif
3511 
3512 /*
3513  * Take an 802.11 (non-n) rate, find the relevant rate
3514  * table entry.  return the index into in_ridx[].
3515  *
3516  * The caller then uses that index back into in_ridx
3517  * to figure out the rate index programmed /into/
3518  * the firmware for this given node.
3519  */
3520 static int
3521 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3522     uint8_t rate)
3523 {
3524 	int i;
3525 	uint8_t r;
3526 
3527 	for (i = 0; i < nitems(in->in_ridx); i++) {
3528 		r = iwm_rates[in->in_ridx[i]].rate;
3529 		if (rate == r)
3530 			return (i);
3531 	}
3532 	/* XXX Return the first */
3533 	/* XXX TODO: have it return the /lowest/ */
3534 	return (0);
3535 }
3536 
3537 /*
3538  * Fill in the rate related information for a transmit command.
3539  */
3540 static const struct iwm_rate *
3541 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3542 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3543 {
3544 	struct ieee80211com *ic = &sc->sc_ic;
3545 	struct ieee80211_node *ni = &in->in_ni;
3546 	const struct iwm_rate *rinfo;
3547 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3548 	int ridx, rate_flags;
3549 
3550 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3551 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3552 
3553 	/*
3554 	 * XXX TODO: everything about the rate selection here is terrible!
3555 	 */
3556 
3557 	if (type == IEEE80211_FC0_TYPE_DATA) {
3558 		int i;
3559 		/* for data frames, use RS table */
3560 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3561 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3562 		ridx = in->in_ridx[i];
3563 
3564 		/* This is the index into the programmed table */
3565 		tx->initial_rate_index = i;
3566 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3567 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3568 		    "%s: start with i=%d, txrate %d\n",
3569 		    __func__, i, iwm_rates[ridx].rate);
3570 	} else {
3571 		/*
3572 		 * For non-data, use the lowest supported rate for the given
3573 		 * operational mode.
3574 		 *
3575 		 * Note: there may not be any rate control information available.
3576 		 * This driver currently assumes if we're transmitting data
3577 		 * frames, use the rate control table.  Grr.
3578 		 *
3579 		 * XXX TODO: use the configured rate for the traffic type!
3580 		 * XXX TODO: this should be per-vap, not curmode; as we later
3581 		 * on we'll want to handle off-channel stuff (eg TDLS).
3582 		 */
3583 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3584 			/*
3585 			 * XXX this assumes the mode is either 11a or not 11a;
3586 			 * definitely won't work for 11n.
3587 			 */
3588 			ridx = IWM_RIDX_OFDM;
3589 		} else {
3590 			ridx = IWM_RIDX_CCK;
3591 		}
3592 	}
3593 
3594 	rinfo = &iwm_rates[ridx];
3595 
3596 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3597 	    __func__, ridx,
3598 	    rinfo->rate,
3599 	    !! (IWM_RIDX_IS_CCK(ridx))
3600 	    );
3601 
3602 	/* XXX TODO: hard-coded TX antenna? */
3603 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3604 	if (IWM_RIDX_IS_CCK(ridx))
3605 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3606 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3607 
3608 	return rinfo;
3609 }
3610 
3611 #define TB0_SIZE 16
3612 static int
3613 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3614 {
3615 	struct ieee80211com *ic = &sc->sc_ic;
3616 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3617 	struct iwm_node *in = IWM_NODE(ni);
3618 	struct iwm_tx_ring *ring;
3619 	struct iwm_tx_data *data;
3620 	struct iwm_tfd *desc;
3621 	struct iwm_device_cmd *cmd;
3622 	struct iwm_tx_cmd *tx;
3623 	struct ieee80211_frame *wh;
3624 	struct ieee80211_key *k = NULL;
3625 #if !defined(__DragonFly__)
3626 	struct mbuf *m1;
3627 #endif
3628 	const struct iwm_rate *rinfo;
3629 	uint32_t flags;
3630 	u_int hdrlen;
3631 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3632 	int nsegs;
3633 	uint8_t tid, type;
3634 	int i, totlen, error, pad;
3635 
3636 	wh = mtod(m, struct ieee80211_frame *);
3637 	hdrlen = ieee80211_anyhdrsize(wh);
3638 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3639 	tid = 0;
3640 	ring = &sc->txq[ac];
3641 	desc = &ring->desc[ring->cur];
3642 	memset(desc, 0, sizeof(*desc));
3643 	data = &ring->data[ring->cur];
3644 
3645 	/* Fill out iwm_tx_cmd to send to the firmware */
3646 	cmd = &ring->cmd[ring->cur];
3647 	cmd->hdr.code = IWM_TX_CMD;
3648 	cmd->hdr.flags = 0;
3649 	cmd->hdr.qid = ring->qid;
3650 	cmd->hdr.idx = ring->cur;
3651 
3652 	tx = (void *)cmd->data;
3653 	memset(tx, 0, sizeof(*tx));
3654 
3655 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3656 
3657 	/* Encrypt the frame if need be. */
3658 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3659 		/* Retrieve key for TX && do software encryption. */
3660 		k = ieee80211_crypto_encap(ni, m);
3661 		if (k == NULL) {
3662 			m_freem(m);
3663 			return (ENOBUFS);
3664 		}
3665 		/* 802.11 header may have moved. */
3666 		wh = mtod(m, struct ieee80211_frame *);
3667 	}
3668 
3669 	if (ieee80211_radiotap_active_vap(vap)) {
3670 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3671 
3672 		tap->wt_flags = 0;
3673 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3674 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3675 		tap->wt_rate = rinfo->rate;
3676 		if (k != NULL)
3677 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3678 		ieee80211_radiotap_tx(vap, m);
3679 	}
3680 
3681 
3682 	totlen = m->m_pkthdr.len;
3683 
3684 	flags = 0;
3685 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3686 		flags |= IWM_TX_CMD_FLG_ACK;
3687 	}
3688 
3689 	if (type == IEEE80211_FC0_TYPE_DATA
3690 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3691 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3692 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3693 	}
3694 
3695 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3696 	    type != IEEE80211_FC0_TYPE_DATA)
3697 		tx->sta_id = sc->sc_aux_sta.sta_id;
3698 	else
3699 		tx->sta_id = IWM_STATION_ID;
3700 
3701 	if (type == IEEE80211_FC0_TYPE_MGT) {
3702 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3703 
3704 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3705 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3706 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3707 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3708 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3709 		} else {
3710 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3711 		}
3712 	} else {
3713 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3714 	}
3715 
3716 	if (hdrlen & 3) {
3717 		/* First segment length must be a multiple of 4. */
3718 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3719 		pad = 4 - (hdrlen & 3);
3720 	} else
3721 		pad = 0;
3722 
3723 	tx->driver_txop = 0;
3724 	tx->next_frame_len = 0;
3725 
3726 	tx->len = htole16(totlen);
3727 	tx->tid_tspec = tid;
3728 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3729 
3730 	/* Set physical address of "scratch area". */
3731 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3732 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3733 
3734 	/* Copy 802.11 header in TX command. */
3735 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3736 
3737 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3738 
3739 	tx->sec_ctl = 0;
3740 	tx->tx_flags |= htole32(flags);
3741 
3742 	/* Trim 802.11 header. */
3743 	m_adj(m, hdrlen);
3744 #if defined(__DragonFly__)
3745 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3746 					    segs, IWM_MAX_SCATTER - 2,
3747 					    &nsegs, BUS_DMA_NOWAIT);
3748 #else
3749 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3750 	    segs, &nsegs, BUS_DMA_NOWAIT);
3751 #endif
3752 	if (error != 0) {
3753 #if defined(__DragonFly__)
3754 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3755 		    error);
3756 		m_freem(m);
3757 		return error;
3758 #else
3759 		if (error != EFBIG) {
3760 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3761 			    error);
3762 			m_freem(m);
3763 			return error;
3764 		}
3765 		/* Too many DMA segments, linearize mbuf. */
3766 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3767 		if (m1 == NULL) {
3768 			device_printf(sc->sc_dev,
3769 			    "%s: could not defrag mbuf\n", __func__);
3770 			m_freem(m);
3771 			return (ENOBUFS);
3772 		}
3773 		m = m1;
3774 
3775 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3776 		    segs, &nsegs, BUS_DMA_NOWAIT);
3777 		if (error != 0) {
3778 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3779 			    error);
3780 			m_freem(m);
3781 			return error;
3782 		}
3783 #endif
3784 	}
3785 	data->m = m;
3786 	data->in = in;
3787 	data->done = 0;
3788 
3789 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3790 	    "sending txd %p, in %p\n", data, data->in);
3791 	KASSERT(data->in != NULL, ("node is NULL"));
3792 
3793 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3794 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3795 	    ring->qid, ring->cur, totlen, nsegs,
3796 	    le32toh(tx->tx_flags),
3797 	    le32toh(tx->rate_n_flags),
3798 	    tx->initial_rate_index
3799 	    );
3800 
3801 	/* Fill TX descriptor. */
3802 	desc->num_tbs = 2 + nsegs;
3803 
3804 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3805 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3806 	    (TB0_SIZE << 4);
3807 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3808 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3809 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3810 	      + hdrlen + pad - TB0_SIZE) << 4);
3811 
3812 	/* Other DMA segments are for data payload. */
3813 	for (i = 0; i < nsegs; i++) {
3814 		seg = &segs[i];
3815 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3816 		desc->tbs[i+2].hi_n_len = \
3817 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3818 		    | ((seg->ds_len) << 4);
3819 	}
3820 
3821 	bus_dmamap_sync(ring->data_dmat, data->map,
3822 	    BUS_DMASYNC_PREWRITE);
3823 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3824 	    BUS_DMASYNC_PREWRITE);
3825 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3826 	    BUS_DMASYNC_PREWRITE);
3827 
3828 #if 0
3829 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3830 #endif
3831 
3832 	/* Kick TX ring. */
3833 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3834 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3835 
3836 	/* Mark TX ring as full if we reach a certain threshold. */
3837 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3838 		sc->qfullmsk |= 1 << ring->qid;
3839 	}
3840 
3841 	return 0;
3842 }
3843 
3844 static int
3845 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3846     const struct ieee80211_bpf_params *params)
3847 {
3848 	struct ieee80211com *ic = ni->ni_ic;
3849 	struct iwm_softc *sc = ic->ic_softc;
3850 	int error = 0;
3851 
3852 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3853 	    "->%s begin\n", __func__);
3854 
3855 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3856 		m_freem(m);
3857 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3858 		    "<-%s not RUNNING\n", __func__);
3859 		return (ENETDOWN);
3860         }
3861 
3862 	IWM_LOCK(sc);
3863 	/* XXX fix this */
3864         if (params == NULL) {
3865 		error = iwm_tx(sc, m, ni, 0);
3866 	} else {
3867 		error = iwm_tx(sc, m, ni, 0);
3868 	}
3869 	sc->sc_tx_timer = 5;
3870 	IWM_UNLOCK(sc);
3871 
3872         return (error);
3873 }
3874 
3875 /*
3876  * mvm/tx.c
3877  */
3878 
3879 /*
3880  * Note that there are transports that buffer frames before they reach
3881  * the firmware. This means that after flush_tx_path is called, the
3882  * queue might not be empty. The race-free way to handle this is to:
3883  * 1) set the station as draining
3884  * 2) flush the Tx path
3885  * 3) wait for the transport queues to be empty
3886  */
3887 static int
3888 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3889 {
3890 	int ret;
3891 	struct iwm_tx_path_flush_cmd flush_cmd = {
3892 		.queues_ctl = htole32(tfd_msk),
3893 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3894 	};
3895 
3896 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3897 	    sizeof(flush_cmd), &flush_cmd);
3898 	if (ret)
3899                 device_printf(sc->sc_dev,
3900 		    "Flushing tx queue failed: %d\n", ret);
3901 	return ret;
3902 }
3903 
3904 static int
3905 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3906 	struct iwm_mvm_add_sta_cmd *cmd, int *status)
3907 {
3908 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3909 	    cmd, status);
3910 }
3911 
3912 /* send station add/update command to firmware */
3913 static int
3914 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3915 {
3916 	struct iwm_vap *ivp = IWM_VAP(in->in_ni.ni_vap);
3917 	struct iwm_mvm_add_sta_cmd add_sta_cmd;
3918 	int ret;
3919 	uint32_t status;
3920 
3921 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3922 
3923 	add_sta_cmd.sta_id = IWM_STATION_ID;
3924 	add_sta_cmd.mac_id_n_color
3925 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(ivp->id, ivp->color));
3926 	if (!update) {
3927 		int ac;
3928 		for (ac = 0; ac < WME_NUM_AC; ac++) {
3929 			add_sta_cmd.tfd_queue_msk |=
3930 			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3931 		}
3932 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3933 	}
3934 	add_sta_cmd.add_modify = update ? 1 : 0;
3935 	add_sta_cmd.station_flags_msk
3936 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3937 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3938 	if (update)
3939 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3940 
3941 	status = IWM_ADD_STA_SUCCESS;
3942 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3943 	if (ret)
3944 		return ret;
3945 
3946 	switch (status & IWM_ADD_STA_STATUS_MASK) {
3947 	case IWM_ADD_STA_SUCCESS:
3948 		break;
3949 	default:
3950 		ret = EIO;
3951 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3952 		break;
3953 	}
3954 
3955 	return ret;
3956 }
3957 
3958 static int
3959 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3960 {
3961 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3962 }
3963 
3964 static int
3965 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3966 {
3967 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3968 }
3969 
3970 static int
3971 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3972 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3973 {
3974 	struct iwm_mvm_add_sta_cmd cmd;
3975 	int ret;
3976 	uint32_t status;
3977 
3978 	memset(&cmd, 0, sizeof(cmd));
3979 	cmd.sta_id = sta->sta_id;
3980 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3981 
3982 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3983 	cmd.tid_disable_tx = htole16(0xffff);
3984 
3985 	if (addr)
3986 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3987 
3988 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3989 	if (ret)
3990 		return ret;
3991 
3992 	switch (status & IWM_ADD_STA_STATUS_MASK) {
3993 	case IWM_ADD_STA_SUCCESS:
3994 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3995 		    "%s: Internal station added.\n", __func__);
3996 		return 0;
3997 	default:
3998 		device_printf(sc->sc_dev,
3999 		    "%s: Add internal station failed, status=0x%x\n",
4000 		    __func__, status);
4001 		ret = EIO;
4002 		break;
4003 	}
4004 	return ret;
4005 }
4006 
4007 static int
4008 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4009 {
4010 	int ret;
4011 
4012 	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4013 	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4014 
4015 	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4016 	if (ret)
4017 		return ret;
4018 
4019 	ret = iwm_mvm_add_int_sta_common(sc,
4020 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4021 
4022 	if (ret)
4023 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4024 	return ret;
4025 }
4026 
4027 static int
4028 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4029 {
4030 	struct iwm_time_quota_cmd cmd;
4031 	int i, idx, ret, num_active_macs, quota, quota_rem;
4032 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4033 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4034 	uint16_t id;
4035 
4036 	memset(&cmd, 0, sizeof(cmd));
4037 
4038 	/* currently, PHY ID == binding ID */
4039 	if (ivp) {
4040 		id = ivp->phy_ctxt->id;
4041 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4042 		colors[id] = ivp->phy_ctxt->color;
4043 
4044 		if (1)
4045 			n_ifs[id] = 1;
4046 	}
4047 
4048 	/*
4049 	 * The FW's scheduling session consists of
4050 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4051 	 * equally between all the bindings that require quota
4052 	 */
4053 	num_active_macs = 0;
4054 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4055 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4056 		num_active_macs += n_ifs[i];
4057 	}
4058 
4059 	quota = 0;
4060 	quota_rem = 0;
4061 	if (num_active_macs) {
4062 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4063 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4064 	}
4065 
4066 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4067 		if (colors[i] < 0)
4068 			continue;
4069 
4070 		cmd.quotas[idx].id_and_color =
4071 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4072 
4073 		if (n_ifs[i] <= 0) {
4074 			cmd.quotas[idx].quota = htole32(0);
4075 			cmd.quotas[idx].max_duration = htole32(0);
4076 		} else {
4077 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4078 			cmd.quotas[idx].max_duration = htole32(0);
4079 		}
4080 		idx++;
4081 	}
4082 
4083 	/* Give the remainder of the session to the first binding */
4084 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4085 
4086 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4087 	    sizeof(cmd), &cmd);
4088 	if (ret)
4089 		device_printf(sc->sc_dev,
4090 		    "%s: Failed to send quota: %d\n", __func__, ret);
4091 	return ret;
4092 }
4093 
4094 /*
4095  * ieee80211 routines
4096  */
4097 
4098 /*
4099  * Change to AUTH state in 80211 state machine.  Roughly matches what
4100  * Linux does in bss_info_changed().
4101  */
4102 static int
4103 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4104 {
4105 	struct ieee80211_node *ni;
4106 	struct iwm_node *in;
4107 	struct iwm_vap *iv = IWM_VAP(vap);
4108 	uint32_t duration;
4109 	int error;
4110 
4111 	/*
4112 	 * XXX i have a feeling that the vap node is being
4113 	 * freed from underneath us. Grr.
4114 	 */
4115 	ni = ieee80211_ref_node(vap->iv_bss);
4116 	in = IWM_NODE(ni);
4117 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4118 	    "%s: called; vap=%p, bss ni=%p\n",
4119 	    __func__,
4120 	    vap,
4121 	    ni);
4122 
4123 	in->in_assoc = 0;
4124 
4125 	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4126 	if (error != 0)
4127 		return error;
4128 
4129 	error = iwm_allow_mcast(vap, sc);
4130 	if (error) {
4131 		device_printf(sc->sc_dev,
4132 		    "%s: failed to set multicast\n", __func__);
4133 		goto out;
4134 	}
4135 
4136 	/*
4137 	 * This is where it deviates from what Linux does.
4138 	 *
4139 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4140 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4141 	 * and always does a mac_ctx_changed().
4142 	 *
4143 	 * The openbsd port doesn't attempt to do that - it reset things
4144 	 * at odd states and does the add here.
4145 	 *
4146 	 * So, until the state handling is fixed (ie, we never reset
4147 	 * the NIC except for a firmware failure, which should drag
4148 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4149 	 * contexts that are required), let's do a dirty hack here.
4150 	 */
4151 	if (iv->is_uploaded) {
4152 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4153 			device_printf(sc->sc_dev,
4154 			    "%s: failed to update MAC\n", __func__);
4155 			goto out;
4156 		}
4157 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4158 		    in->in_ni.ni_chan, 1, 1)) != 0) {
4159 			device_printf(sc->sc_dev,
4160 			    "%s: failed update phy ctxt\n", __func__);
4161 			goto out;
4162 		}
4163 		iv->phy_ctxt = &sc->sc_phyctxt[0];
4164 
4165 		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4166 			device_printf(sc->sc_dev,
4167 			    "%s: binding update cmd\n", __func__);
4168 			goto out;
4169 		}
4170 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4171 			device_printf(sc->sc_dev,
4172 			    "%s: failed to update sta\n", __func__);
4173 			goto out;
4174 		}
4175 	} else {
4176 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4177 			device_printf(sc->sc_dev,
4178 			    "%s: failed to add MAC\n", __func__);
4179 			goto out;
4180 		}
4181 		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4182 			device_printf(sc->sc_dev,
4183 			    "%s: failed to update power management\n",
4184 			    __func__);
4185 			goto out;
4186 		}
4187 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4188 		    in->in_ni.ni_chan, 1, 1)) != 0) {
4189 			device_printf(sc->sc_dev,
4190 			    "%s: failed add phy ctxt!\n", __func__);
4191 			error = ETIMEDOUT;
4192 			goto out;
4193 		}
4194 		iv->phy_ctxt = &sc->sc_phyctxt[0];
4195 
4196 		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4197 			device_printf(sc->sc_dev,
4198 			    "%s: binding add cmd\n", __func__);
4199 			goto out;
4200 		}
4201 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4202 			device_printf(sc->sc_dev,
4203 			    "%s: failed to add sta\n", __func__);
4204 			goto out;
4205 		}
4206 	}
4207 
4208 	/*
4209 	 * Prevent the FW from wandering off channel during association
4210 	 * by "protecting" the session with a time event.
4211 	 */
4212 	/* XXX duration is in units of TU, not MS */
4213 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4214 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4215 	DELAY(100);
4216 
4217 	error = 0;
4218 out:
4219 	ieee80211_free_node(ni);
4220 	return (error);
4221 }
4222 
4223 static int
4224 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4225 {
4226 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4227 	int error;
4228 
4229 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4230 		device_printf(sc->sc_dev,
4231 		    "%s: failed to update STA\n", __func__);
4232 		return error;
4233 	}
4234 
4235 	in->in_assoc = 1;
4236 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4237 		device_printf(sc->sc_dev,
4238 		    "%s: failed to update MAC\n", __func__);
4239 		return error;
4240 	}
4241 
4242 	return 0;
4243 }
4244 
4245 static int
4246 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4247 {
4248 	uint32_t tfd_msk;
4249 
4250 	/*
4251 	 * Ok, so *technically* the proper set of calls for going
4252 	 * from RUN back to SCAN is:
4253 	 *
4254 	 * iwm_mvm_power_mac_disable(sc, in);
4255 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4256 	 * iwm_mvm_rm_sta(sc, in);
4257 	 * iwm_mvm_update_quotas(sc, NULL);
4258 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4259 	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4260 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4261 	 *
4262 	 * However, that freezes the device not matter which permutations
4263 	 * and modifications are attempted.  Obviously, this driver is missing
4264 	 * something since it works in the Linux driver, but figuring out what
4265 	 * is missing is a little more complicated.  Now, since we're going
4266 	 * back to nothing anyway, we'll just do a complete device reset.
4267 	 * Up your's, device!
4268 	 */
4269 	/*
4270 	 * Just using 0xf for the queues mask is fine as long as we only
4271 	 * get here from RUN state.
4272 	 */
4273 	tfd_msk = 0xf;
4274 	mbufq_drain(&sc->sc_snd);
4275 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4276 	/*
4277 	 * We seem to get away with just synchronously sending the
4278 	 * IWM_TXPATH_FLUSH command.
4279 	 */
4280 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4281 	iwm_stop_device(sc);
4282 	iwm_init_hw(sc);
4283 	if (in)
4284 		in->in_assoc = 0;
4285 	return 0;
4286 
4287 #if 0
4288 	int error;
4289 
4290 	iwm_mvm_power_mac_disable(sc, in);
4291 
4292 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4293 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4294 		return error;
4295 	}
4296 
4297 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4298 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4299 		return error;
4300 	}
4301 	error = iwm_mvm_rm_sta(sc, in);
4302 	in->in_assoc = 0;
4303 	iwm_mvm_update_quotas(sc, NULL);
4304 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4305 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4306 		return error;
4307 	}
4308 	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4309 
4310 	iwm_mvm_mac_ctxt_remove(sc, in);
4311 
4312 	return error;
4313 #endif
4314 }
4315 
4316 static struct ieee80211_node *
4317 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4318 {
4319 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4320 	    M_INTWAIT | M_ZERO);
4321 }
4322 
4323 uint8_t
4324 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4325 {
4326 	int i;
4327 	uint8_t rval;
4328 
4329 	for (i = 0; i < rs->rs_nrates; i++) {
4330 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4331 		if (rval == iwm_rates[ridx].rate)
4332 			return rs->rs_rates[i];
4333 	}
4334 
4335 	return 0;
4336 }
4337 
4338 static void
4339 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4340 {
4341 	struct ieee80211_node *ni = &in->in_ni;
4342 	struct iwm_lq_cmd *lq = &in->in_lq;
4343 	int nrates = ni->ni_rates.rs_nrates;
4344 	int i, ridx, tab = 0;
4345 	int txant = 0;
4346 
4347 	if (nrates > nitems(lq->rs_table)) {
4348 		device_printf(sc->sc_dev,
4349 		    "%s: node supports %d rates, driver handles "
4350 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4351 		return;
4352 	}
4353 	if (nrates == 0) {
4354 		device_printf(sc->sc_dev,
4355 		    "%s: node supports 0 rates, odd!\n", __func__);
4356 		return;
4357 	}
4358 
4359 	/*
4360 	 * XXX .. and most of iwm_node is not initialised explicitly;
4361 	 * it's all just 0x0 passed to the firmware.
4362 	 */
4363 
4364 	/* first figure out which rates we should support */
4365 	/* XXX TODO: this isn't 11n aware /at all/ */
4366 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4367 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4368 	    "%s: nrates=%d\n", __func__, nrates);
4369 
4370 	/*
4371 	 * Loop over nrates and populate in_ridx from the highest
4372 	 * rate to the lowest rate.  Remember, in_ridx[] has
4373 	 * IEEE80211_RATE_MAXSIZE entries!
4374 	 */
4375 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4376 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4377 
4378 		/* Map 802.11 rate to HW rate index. */
4379 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4380 			if (iwm_rates[ridx].rate == rate)
4381 				break;
4382 		if (ridx > IWM_RIDX_MAX) {
4383 			device_printf(sc->sc_dev,
4384 			    "%s: WARNING: device rate for %d not found!\n",
4385 			    __func__, rate);
4386 		} else {
4387 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4388 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4389 			    __func__,
4390 			    i,
4391 			    rate,
4392 			    ridx);
4393 			in->in_ridx[i] = ridx;
4394 		}
4395 	}
4396 
4397 	/* then construct a lq_cmd based on those */
4398 	memset(lq, 0, sizeof(*lq));
4399 	lq->sta_id = IWM_STATION_ID;
4400 
4401 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4402 	if (ni->ni_flags & IEEE80211_NODE_HT)
4403 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4404 
4405 	/*
4406 	 * are these used? (we don't do SISO or MIMO)
4407 	 * need to set them to non-zero, though, or we get an error.
4408 	 */
4409 	lq->single_stream_ant_msk = 1;
4410 	lq->dual_stream_ant_msk = 1;
4411 
4412 	/*
4413 	 * Build the actual rate selection table.
4414 	 * The lowest bits are the rates.  Additionally,
4415 	 * CCK needs bit 9 to be set.  The rest of the bits
4416 	 * we add to the table select the tx antenna
4417 	 * Note that we add the rates in the highest rate first
4418 	 * (opposite of ni_rates).
4419 	 */
4420 	/*
4421 	 * XXX TODO: this should be looping over the min of nrates
4422 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4423 	 */
4424 	for (i = 0; i < nrates; i++) {
4425 		int nextant;
4426 
4427 		if (txant == 0)
4428 			txant = iwm_mvm_get_valid_tx_ant(sc);
4429 		nextant = 1<<(ffs(txant)-1);
4430 		txant &= ~nextant;
4431 
4432 		/*
4433 		 * Map the rate id into a rate index into
4434 		 * our hardware table containing the
4435 		 * configuration to use for this rate.
4436 		 */
4437 		ridx = in->in_ridx[i];
4438 		tab = iwm_rates[ridx].plcp;
4439 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4440 		if (IWM_RIDX_IS_CCK(ridx))
4441 			tab |= IWM_RATE_MCS_CCK_MSK;
4442 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4443 		    "station rate i=%d, rate=%d, hw=%x\n",
4444 		    i, iwm_rates[ridx].rate, tab);
4445 		lq->rs_table[i] = htole32(tab);
4446 	}
4447 	/* then fill the rest with the lowest possible rate */
4448 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4449 		KASSERT(tab != 0, ("invalid tab"));
4450 		lq->rs_table[i] = htole32(tab);
4451 	}
4452 }
4453 
4454 static int
4455 iwm_media_change(struct ifnet *ifp)
4456 {
4457 	struct ieee80211vap *vap = ifp->if_softc;
4458 	struct ieee80211com *ic = vap->iv_ic;
4459 	struct iwm_softc *sc = ic->ic_softc;
4460 	int error;
4461 
4462 	error = ieee80211_media_change(ifp);
4463 	if (error != ENETRESET)
4464 		return error;
4465 
4466 	IWM_LOCK(sc);
4467 	if (ic->ic_nrunning > 0) {
4468 		iwm_stop(sc);
4469 		iwm_init(sc);
4470 	}
4471 	IWM_UNLOCK(sc);
4472 	return error;
4473 }
4474 
4475 
4476 static int
4477 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4478 {
4479 	struct iwm_vap *ivp = IWM_VAP(vap);
4480 	struct ieee80211com *ic = vap->iv_ic;
4481 	struct iwm_softc *sc = ic->ic_softc;
4482 	struct iwm_node *in;
4483 	int error;
4484 
4485 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4486 	    "switching state %s -> %s\n",
4487 	    ieee80211_state_name[vap->iv_state],
4488 	    ieee80211_state_name[nstate]);
4489 	IEEE80211_UNLOCK(ic);
4490 	IWM_LOCK(sc);
4491 
4492 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4493 		iwm_led_blink_stop(sc);
4494 
4495 	/* disable beacon filtering if we're hopping out of RUN */
4496 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4497 		iwm_mvm_disable_beacon_filter(sc);
4498 
4499 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4500 			in->in_assoc = 0;
4501 
4502 		if (nstate == IEEE80211_S_INIT) {
4503 			IWM_UNLOCK(sc);
4504 			IEEE80211_LOCK(ic);
4505 			error = ivp->iv_newstate(vap, nstate, arg);
4506 			IEEE80211_UNLOCK(ic);
4507 			IWM_LOCK(sc);
4508 			iwm_release(sc, NULL);
4509 			IWM_UNLOCK(sc);
4510 			IEEE80211_LOCK(ic);
4511 			return error;
4512 		}
4513 
4514 		/*
4515 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4516 		 * above then the card will be completely reinitialized,
4517 		 * so the driver must do everything necessary to bring the card
4518 		 * from INIT to SCAN.
4519 		 *
4520 		 * Additionally, upon receiving deauth frame from AP,
4521 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4522 		 * state. This will also fail with this driver, so bring the FSM
4523 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4524 		 *
4525 		 * XXX TODO: fix this for FreeBSD!
4526 		 */
4527 		if (nstate == IEEE80211_S_SCAN ||
4528 		    nstate == IEEE80211_S_AUTH ||
4529 		    nstate == IEEE80211_S_ASSOC) {
4530 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4531 			    "Force transition to INIT; MGT=%d\n", arg);
4532 			IWM_UNLOCK(sc);
4533 			IEEE80211_LOCK(ic);
4534 			/* Always pass arg as -1 since we can't Tx right now. */
4535 			/*
4536 			 * XXX arg is just ignored anyway when transitioning
4537 			 *     to IEEE80211_S_INIT.
4538 			 */
4539 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4540 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4541 			    "Going INIT->SCAN\n");
4542 			nstate = IEEE80211_S_SCAN;
4543 			IEEE80211_UNLOCK(ic);
4544 			IWM_LOCK(sc);
4545 		}
4546 	}
4547 
4548 	switch (nstate) {
4549 	case IEEE80211_S_INIT:
4550 		break;
4551 
4552 	case IEEE80211_S_AUTH:
4553 		if ((error = iwm_auth(vap, sc)) != 0) {
4554 			device_printf(sc->sc_dev,
4555 			    "%s: could not move to auth state: %d\n",
4556 			    __func__, error);
4557 			break;
4558 		}
4559 		break;
4560 
4561 	case IEEE80211_S_ASSOC:
4562 		/*
4563 		 * EBS may be disabled due to previous failures reported by FW.
4564 		 * Reset EBS status here assuming environment has been changed.
4565 		 */
4566                 sc->last_ebs_successful = TRUE;
4567 		if ((error = iwm_assoc(vap, sc)) != 0) {
4568 			device_printf(sc->sc_dev,
4569 			    "%s: failed to associate: %d\n", __func__,
4570 			    error);
4571 			break;
4572 		}
4573 		break;
4574 
4575 	case IEEE80211_S_RUN:
4576 	{
4577 		struct iwm_host_cmd cmd = {
4578 			.id = IWM_LQ_CMD,
4579 			.len = { sizeof(in->in_lq), },
4580 			.flags = IWM_CMD_SYNC,
4581 		};
4582 
4583 		/* Update the association state, now we have it all */
4584 		/* (eg associd comes in at this point */
4585 		error = iwm_assoc(vap, sc);
4586 		if (error != 0) {
4587 			device_printf(sc->sc_dev,
4588 			    "%s: failed to update association state: %d\n",
4589 			    __func__,
4590 			    error);
4591 			break;
4592 		}
4593 
4594 		in = IWM_NODE(vap->iv_bss);
4595 		iwm_mvm_enable_beacon_filter(sc, in);
4596 		iwm_mvm_power_update_mac(sc);
4597 		iwm_mvm_update_quotas(sc, ivp);
4598 		iwm_setrates(sc, in);
4599 
4600 		cmd.data[0] = &in->in_lq;
4601 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4602 			device_printf(sc->sc_dev,
4603 			    "%s: IWM_LQ_CMD failed\n", __func__);
4604 		}
4605 
4606 		iwm_mvm_led_enable(sc);
4607 		break;
4608 	}
4609 
4610 	default:
4611 		break;
4612 	}
4613 	IWM_UNLOCK(sc);
4614 	IEEE80211_LOCK(ic);
4615 
4616 	return (ivp->iv_newstate(vap, nstate, arg));
4617 }
4618 
4619 void
4620 iwm_endscan_cb(void *arg, int pending)
4621 {
4622 	struct iwm_softc *sc = arg;
4623 	struct ieee80211com *ic = &sc->sc_ic;
4624 
4625 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4626 	    "%s: scan ended\n",
4627 	    __func__);
4628 
4629 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4630 }
4631 
4632 /*
4633  * Aging and idle timeouts for the different possible scenarios
4634  * in default configuration
4635  */
4636 static const uint32_t
4637 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4638 	{
4639 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4640 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4641 	},
4642 	{
4643 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4644 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4645 	},
4646 	{
4647 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4648 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4649 	},
4650 	{
4651 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4652 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4653 	},
4654 	{
4655 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4656 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4657 	},
4658 };
4659 
4660 /*
4661  * Aging and idle timeouts for the different possible scenarios
4662  * in single BSS MAC configuration.
4663  */
4664 static const uint32_t
4665 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4666 	{
4667 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4668 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4669 	},
4670 	{
4671 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4672 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4673 	},
4674 	{
4675 		htole32(IWM_SF_MCAST_AGING_TIMER),
4676 		htole32(IWM_SF_MCAST_IDLE_TIMER)
4677 	},
4678 	{
4679 		htole32(IWM_SF_BA_AGING_TIMER),
4680 		htole32(IWM_SF_BA_IDLE_TIMER)
4681 	},
4682 	{
4683 		htole32(IWM_SF_TX_RE_AGING_TIMER),
4684 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4685 	},
4686 };
4687 
4688 static void
4689 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4690     struct ieee80211_node *ni)
4691 {
4692 	int i, j, watermark;
4693 
4694 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4695 
4696 	/*
4697 	 * If we are in association flow - check antenna configuration
4698 	 * capabilities of the AP station, and choose the watermark accordingly.
4699 	 */
4700 	if (ni) {
4701 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4702 #ifdef notyet
4703 			if (ni->ni_rxmcs[2] != 0)
4704 				watermark = IWM_SF_W_MARK_MIMO3;
4705 			else if (ni->ni_rxmcs[1] != 0)
4706 				watermark = IWM_SF_W_MARK_MIMO2;
4707 			else
4708 #endif
4709 				watermark = IWM_SF_W_MARK_SISO;
4710 		} else {
4711 			watermark = IWM_SF_W_MARK_LEGACY;
4712 		}
4713 	/* default watermark value for unassociated mode. */
4714 	} else {
4715 		watermark = IWM_SF_W_MARK_MIMO2;
4716 	}
4717 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4718 
4719 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4720 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4721 			sf_cmd->long_delay_timeouts[i][j] =
4722 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4723 		}
4724 	}
4725 
4726 	if (ni) {
4727 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4728 		       sizeof(iwm_sf_full_timeout));
4729 	} else {
4730 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4731 		       sizeof(iwm_sf_full_timeout_def));
4732 	}
4733 }
4734 
4735 static int
4736 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4737 {
4738 	struct ieee80211com *ic = &sc->sc_ic;
4739 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4740 	struct iwm_sf_cfg_cmd sf_cmd = {
4741 		.state = htole32(IWM_SF_FULL_ON),
4742 	};
4743 	int ret = 0;
4744 
4745 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4746 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4747 
4748 	switch (new_state) {
4749 	case IWM_SF_UNINIT:
4750 	case IWM_SF_INIT_OFF:
4751 		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4752 		break;
4753 	case IWM_SF_FULL_ON:
4754 		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4755 		break;
4756 	default:
4757 		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4758 		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4759 			  new_state);
4760 		return EINVAL;
4761 	}
4762 
4763 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4764 				   sizeof(sf_cmd), &sf_cmd);
4765 	return ret;
4766 }
4767 
4768 static int
4769 iwm_send_bt_init_conf(struct iwm_softc *sc)
4770 {
4771 	struct iwm_bt_coex_cmd bt_cmd;
4772 
4773 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4774 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4775 
4776 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4777 	    &bt_cmd);
4778 }
4779 
4780 static int
4781 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4782 {
4783 	struct iwm_mcc_update_cmd mcc_cmd;
4784 	struct iwm_host_cmd hcmd = {
4785 		.id = IWM_MCC_UPDATE_CMD,
4786 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4787 		.data = { &mcc_cmd },
4788 	};
4789 	int ret;
4790 #ifdef IWM_DEBUG
4791 	struct iwm_rx_packet *pkt;
4792 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4793 	struct iwm_mcc_update_resp *mcc_resp;
4794 	int n_channels;
4795 	uint16_t mcc;
4796 #endif
4797 	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4798 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4799 
4800 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4801 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4802 	if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4803 	    fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4804 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4805 	else
4806 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4807 
4808 	if (resp_v2)
4809 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4810 	else
4811 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4812 
4813 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4814 	    "send MCC update to FW with '%c%c' src = %d\n",
4815 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4816 
4817 	ret = iwm_send_cmd(sc, &hcmd);
4818 	if (ret)
4819 		return ret;
4820 
4821 #ifdef IWM_DEBUG
4822 	pkt = hcmd.resp_pkt;
4823 
4824 	/* Extract MCC response */
4825 	if (resp_v2) {
4826 		mcc_resp = (void *)pkt->data;
4827 		mcc = mcc_resp->mcc;
4828 		n_channels =  le32toh(mcc_resp->n_channels);
4829 	} else {
4830 		mcc_resp_v1 = (void *)pkt->data;
4831 		mcc = mcc_resp_v1->mcc;
4832 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4833 	}
4834 
4835 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4836 	if (mcc == 0)
4837 		mcc = 0x3030;  /* "00" - world */
4838 
4839 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4840 	    "regulatory domain '%c%c' (%d channels available)\n",
4841 	    mcc >> 8, mcc & 0xff, n_channels);
4842 #endif
4843 	iwm_free_resp(sc, &hcmd);
4844 
4845 	return 0;
4846 }
4847 
4848 static void
4849 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4850 {
4851 	struct iwm_host_cmd cmd = {
4852 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4853 		.len = { sizeof(uint32_t), },
4854 		.data = { &backoff, },
4855 	};
4856 
4857 	if (iwm_send_cmd(sc, &cmd) != 0) {
4858 		device_printf(sc->sc_dev,
4859 		    "failed to change thermal tx backoff\n");
4860 	}
4861 }
4862 
4863 static int
4864 iwm_init_hw(struct iwm_softc *sc)
4865 {
4866 	struct ieee80211com *ic = &sc->sc_ic;
4867 	int error, i, ac;
4868 
4869 	if ((error = iwm_start_hw(sc)) != 0) {
4870 		kprintf("iwm_start_hw: failed %d\n", error);
4871 		return error;
4872 	}
4873 
4874 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4875 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4876 		return error;
4877 	}
4878 
4879 	/*
4880 	 * should stop and start HW since that INIT
4881 	 * image just loaded
4882 	 */
4883 	iwm_stop_device(sc);
4884 	sc->sc_ps_disabled = FALSE;
4885 	if ((error = iwm_start_hw(sc)) != 0) {
4886 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4887 		return error;
4888 	}
4889 
4890 	/* omstart, this time with the regular firmware */
4891 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4892 	if (error) {
4893 		device_printf(sc->sc_dev, "could not load firmware\n");
4894 		goto error;
4895 	}
4896 
4897 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4898 		device_printf(sc->sc_dev, "bt init conf failed\n");
4899 		goto error;
4900 	}
4901 
4902 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4903 	if (error != 0) {
4904 		device_printf(sc->sc_dev, "antenna config failed\n");
4905 		goto error;
4906 	}
4907 
4908 	/* Send phy db control command and then phy db calibration */
4909 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4910 		goto error;
4911 
4912 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4913 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4914 		goto error;
4915 	}
4916 
4917 	/* Add auxiliary station for scanning */
4918 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4919 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4920 		goto error;
4921 	}
4922 
4923 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4924 		/*
4925 		 * The channel used here isn't relevant as it's
4926 		 * going to be overwritten in the other flows.
4927 		 * For now use the first channel we have.
4928 		 */
4929 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4930 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4931 			goto error;
4932 	}
4933 
4934 	/* Initialize tx backoffs to the minimum. */
4935 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4936 		iwm_mvm_tt_tx_backoff(sc, 0);
4937 
4938 	error = iwm_mvm_power_update_device(sc);
4939 	if (error)
4940 		goto error;
4941 
4942 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4943 		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4944 			goto error;
4945 	}
4946 
4947 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4948 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4949 			goto error;
4950 	}
4951 
4952 	/* Enable Tx queues. */
4953 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4954 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4955 		    iwm_mvm_ac_to_tx_fifo[ac]);
4956 		if (error)
4957 			goto error;
4958 	}
4959 
4960 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4961 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4962 		goto error;
4963 	}
4964 
4965 	return 0;
4966 
4967  error:
4968 	iwm_stop_device(sc);
4969 	return error;
4970 }
4971 
4972 /* Allow multicast from our BSSID. */
4973 static int
4974 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4975 {
4976 	struct ieee80211_node *ni = vap->iv_bss;
4977 	struct iwm_mcast_filter_cmd *cmd;
4978 	size_t size;
4979 	int error;
4980 
4981 	size = roundup(sizeof(*cmd), 4);
4982 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4983 	if (cmd == NULL)
4984 		return ENOMEM;
4985 	cmd->filter_own = 1;
4986 	cmd->port_id = 0;
4987 	cmd->count = 0;
4988 	cmd->pass_all = 1;
4989 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4990 
4991 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4992 	    IWM_CMD_SYNC, size, cmd);
4993 	kfree(cmd, M_DEVBUF);
4994 
4995 	return (error);
4996 }
4997 
4998 /*
4999  * ifnet interfaces
5000  */
5001 
5002 static void
5003 iwm_init(struct iwm_softc *sc)
5004 {
5005 	int error;
5006 
5007 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5008 		return;
5009 	}
5010 	sc->sc_generation++;
5011 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5012 
5013 	if ((error = iwm_init_hw(sc)) != 0) {
5014 		kprintf("iwm_init_hw failed %d\n", error);
5015 		iwm_stop(sc);
5016 		return;
5017 	}
5018 
5019 	/*
5020 	 * Ok, firmware loaded and we are jogging
5021 	 */
5022 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5023 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5024 }
5025 
5026 static int
5027 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5028 {
5029 	struct iwm_softc *sc;
5030 	int error;
5031 
5032 	sc = ic->ic_softc;
5033 
5034 	IWM_LOCK(sc);
5035 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5036 		IWM_UNLOCK(sc);
5037 		return (ENXIO);
5038 	}
5039 	error = mbufq_enqueue(&sc->sc_snd, m);
5040 	if (error) {
5041 		IWM_UNLOCK(sc);
5042 		return (error);
5043 	}
5044 	iwm_start(sc);
5045 	IWM_UNLOCK(sc);
5046 	return (0);
5047 }
5048 
5049 /*
5050  * Dequeue packets from sendq and call send.
5051  */
5052 static void
5053 iwm_start(struct iwm_softc *sc)
5054 {
5055 	struct ieee80211_node *ni;
5056 	struct mbuf *m;
5057 	int ac = 0;
5058 
5059 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5060 	while (sc->qfullmsk == 0 &&
5061 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5062 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5063 		if (iwm_tx(sc, m, ni, ac) != 0) {
5064 			if_inc_counter(ni->ni_vap->iv_ifp,
5065 			    IFCOUNTER_OERRORS, 1);
5066 			ieee80211_free_node(ni);
5067 			continue;
5068 		}
5069 		sc->sc_tx_timer = 15;
5070 	}
5071 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5072 }
5073 
5074 static void
5075 iwm_stop(struct iwm_softc *sc)
5076 {
5077 
5078 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5079 	sc->sc_flags |= IWM_FLAG_STOPPED;
5080 	sc->sc_generation++;
5081 	iwm_led_blink_stop(sc);
5082 	sc->sc_tx_timer = 0;
5083 	iwm_stop_device(sc);
5084 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5085 }
5086 
5087 static void
5088 iwm_watchdog(void *arg)
5089 {
5090 	struct iwm_softc *sc = arg;
5091 
5092 	if (sc->sc_tx_timer > 0) {
5093 		if (--sc->sc_tx_timer == 0) {
5094 			device_printf(sc->sc_dev, "device timeout\n");
5095 #ifdef IWM_DEBUG
5096 			iwm_nic_error(sc);
5097 #endif
5098 			iwm_stop(sc);
5099 #if defined(__DragonFly__)
5100 			++sc->sc_ic.ic_oerrors;
5101 #else
5102 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5103 #endif
5104 			return;
5105 		}
5106 	}
5107 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5108 }
5109 
5110 static void
5111 iwm_parent(struct ieee80211com *ic)
5112 {
5113 	struct iwm_softc *sc = ic->ic_softc;
5114 	int startall = 0;
5115 
5116 	IWM_LOCK(sc);
5117 	if (ic->ic_nrunning > 0) {
5118 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5119 			iwm_init(sc);
5120 			startall = 1;
5121 		}
5122 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5123 		iwm_stop(sc);
5124 	IWM_UNLOCK(sc);
5125 	if (startall)
5126 		ieee80211_start_all(ic);
5127 }
5128 
5129 /*
5130  * The interrupt side of things
5131  */
5132 
5133 /*
5134  * error dumping routines are from iwlwifi/mvm/utils.c
5135  */
5136 
5137 /*
5138  * Note: This structure is read from the device with IO accesses,
5139  * and the reading already does the endian conversion. As it is
5140  * read with uint32_t-sized accesses, any members with a different size
5141  * need to be ordered correctly though!
5142  */
5143 struct iwm_error_event_table {
5144 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5145 	uint32_t error_id;		/* type of error */
5146 	uint32_t trm_hw_status0;	/* TRM HW status */
5147 	uint32_t trm_hw_status1;	/* TRM HW status */
5148 	uint32_t blink2;		/* branch link */
5149 	uint32_t ilink1;		/* interrupt link */
5150 	uint32_t ilink2;		/* interrupt link */
5151 	uint32_t data1;		/* error-specific data */
5152 	uint32_t data2;		/* error-specific data */
5153 	uint32_t data3;		/* error-specific data */
5154 	uint32_t bcon_time;		/* beacon timer */
5155 	uint32_t tsf_low;		/* network timestamp function timer */
5156 	uint32_t tsf_hi;		/* network timestamp function timer */
5157 	uint32_t gp1;		/* GP1 timer register */
5158 	uint32_t gp2;		/* GP2 timer register */
5159 	uint32_t fw_rev_type;	/* firmware revision type */
5160 	uint32_t major;		/* uCode version major */
5161 	uint32_t minor;		/* uCode version minor */
5162 	uint32_t hw_ver;		/* HW Silicon version */
5163 	uint32_t brd_ver;		/* HW board version */
5164 	uint32_t log_pc;		/* log program counter */
5165 	uint32_t frame_ptr;		/* frame pointer */
5166 	uint32_t stack_ptr;		/* stack pointer */
5167 	uint32_t hcmd;		/* last host command header */
5168 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5169 				 * rxtx_flag */
5170 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5171 				 * host_flag */
5172 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5173 				 * enc_flag */
5174 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5175 				 * time_flag */
5176 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5177 				 * wico interrupt */
5178 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5179 	uint32_t wait_event;		/* wait event() caller address */
5180 	uint32_t l2p_control;	/* L2pControlField */
5181 	uint32_t l2p_duration;	/* L2pDurationField */
5182 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5183 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5184 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5185 				 * (LMPM_PMG_SEL) */
5186 	uint32_t u_timestamp;	/* indicate when the date and time of the
5187 				 * compilation */
5188 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5189 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5190 
5191 /*
5192  * UMAC error struct - relevant starting from family 8000 chip.
5193  * Note: This structure is read from the device with IO accesses,
5194  * and the reading already does the endian conversion. As it is
5195  * read with u32-sized accesses, any members with a different size
5196  * need to be ordered correctly though!
5197  */
5198 struct iwm_umac_error_event_table {
5199 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5200 	uint32_t error_id;	/* type of error */
5201 	uint32_t blink1;	/* branch link */
5202 	uint32_t blink2;	/* branch link */
5203 	uint32_t ilink1;	/* interrupt link */
5204 	uint32_t ilink2;	/* interrupt link */
5205 	uint32_t data1;		/* error-specific data */
5206 	uint32_t data2;		/* error-specific data */
5207 	uint32_t data3;		/* error-specific data */
5208 	uint32_t umac_major;
5209 	uint32_t umac_minor;
5210 	uint32_t frame_pointer;	/* core register 27*/
5211 	uint32_t stack_pointer;	/* core register 28 */
5212 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5213 	uint32_t nic_isr_pref;	/* ISR status register */
5214 } __packed;
5215 
5216 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5217 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5218 
5219 #ifdef IWM_DEBUG
5220 struct {
5221 	const char *name;
5222 	uint8_t num;
5223 } advanced_lookup[] = {
5224 	{ "NMI_INTERRUPT_WDG", 0x34 },
5225 	{ "SYSASSERT", 0x35 },
5226 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5227 	{ "BAD_COMMAND", 0x38 },
5228 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5229 	{ "FATAL_ERROR", 0x3D },
5230 	{ "NMI_TRM_HW_ERR", 0x46 },
5231 	{ "NMI_INTERRUPT_TRM", 0x4C },
5232 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5233 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5234 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5235 	{ "NMI_INTERRUPT_HOST", 0x66 },
5236 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5237 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5238 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5239 	{ "ADVANCED_SYSASSERT", 0 },
5240 };
5241 
5242 static const char *
5243 iwm_desc_lookup(uint32_t num)
5244 {
5245 	int i;
5246 
5247 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5248 		if (advanced_lookup[i].num == num)
5249 			return advanced_lookup[i].name;
5250 
5251 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5252 	return advanced_lookup[i].name;
5253 }
5254 
5255 static void
5256 iwm_nic_umac_error(struct iwm_softc *sc)
5257 {
5258 	struct iwm_umac_error_event_table table;
5259 	uint32_t base;
5260 
5261 	base = sc->umac_error_event_table;
5262 
5263 	if (base < 0x800000) {
5264 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5265 		    base);
5266 		return;
5267 	}
5268 
5269 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5270 		device_printf(sc->sc_dev, "reading errlog failed\n");
5271 		return;
5272 	}
5273 
5274 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5275 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5276 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5277 		    sc->sc_flags, table.valid);
5278 	}
5279 
5280 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5281 		iwm_desc_lookup(table.error_id));
5282 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5283 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5284 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5285 	    table.ilink1);
5286 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5287 	    table.ilink2);
5288 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5289 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5290 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5291 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5292 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5293 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5294 	    table.frame_pointer);
5295 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5296 	    table.stack_pointer);
5297 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5298 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5299 	    table.nic_isr_pref);
5300 }
5301 
5302 /*
5303  * Support for dumping the error log seemed like a good idea ...
5304  * but it's mostly hex junk and the only sensible thing is the
5305  * hw/ucode revision (which we know anyway).  Since it's here,
5306  * I'll just leave it in, just in case e.g. the Intel guys want to
5307  * help us decipher some "ADVANCED_SYSASSERT" later.
5308  */
5309 static void
5310 iwm_nic_error(struct iwm_softc *sc)
5311 {
5312 	struct iwm_error_event_table table;
5313 	uint32_t base;
5314 
5315 	device_printf(sc->sc_dev, "dumping device error log\n");
5316 	base = sc->error_event_table;
5317 	if (base < 0x800000) {
5318 		device_printf(sc->sc_dev,
5319 		    "Invalid error log pointer 0x%08x\n", base);
5320 		return;
5321 	}
5322 
5323 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5324 		device_printf(sc->sc_dev, "reading errlog failed\n");
5325 		return;
5326 	}
5327 
5328 	if (!table.valid) {
5329 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5330 		return;
5331 	}
5332 
5333 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5334 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5335 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5336 		    sc->sc_flags, table.valid);
5337 	}
5338 
5339 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5340 	    iwm_desc_lookup(table.error_id));
5341 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5342 	    table.trm_hw_status0);
5343 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5344 	    table.trm_hw_status1);
5345 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5346 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5347 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5348 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5349 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5350 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5351 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5352 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5353 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5354 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5355 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5356 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5357 	    table.fw_rev_type);
5358 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5359 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5360 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5361 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5362 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5363 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5364 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5365 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5366 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5367 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5368 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5369 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5370 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5371 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5372 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5373 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5374 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5375 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5376 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5377 
5378 	if (sc->umac_error_event_table)
5379 		iwm_nic_umac_error(sc);
5380 }
5381 #endif
5382 
5383 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5384 
5385 /*
5386  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5387  * Basic structure from if_iwn
5388  */
5389 static void
5390 iwm_notif_intr(struct iwm_softc *sc)
5391 {
5392 	struct ieee80211com *ic = &sc->sc_ic;
5393 	uint16_t hw;
5394 
5395 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5396 	    BUS_DMASYNC_POSTREAD);
5397 
5398 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5399 
5400 	/*
5401 	 * Process responses
5402 	 */
5403 	while (sc->rxq.cur != hw) {
5404 		struct iwm_rx_ring *ring = &sc->rxq;
5405 		struct iwm_rx_data *data = &ring->data[ring->cur];
5406 		struct iwm_rx_packet *pkt;
5407 		struct iwm_cmd_response *cresp;
5408 		int qid, idx, code;
5409 
5410 		bus_dmamap_sync(ring->data_dmat, data->map,
5411 		    BUS_DMASYNC_POSTREAD);
5412 		pkt = mtod(data->m, struct iwm_rx_packet *);
5413 
5414 		qid = pkt->hdr.qid & ~0x80;
5415 		idx = pkt->hdr.idx;
5416 
5417 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5418 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5419 		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5420 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5421 
5422 		/*
5423 		 * randomly get these from the firmware, no idea why.
5424 		 * they at least seem harmless, so just ignore them for now
5425 		 */
5426 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5427 		    || pkt->len_n_flags == htole32(0x55550000))) {
5428 			ADVANCE_RXQ(sc);
5429 			continue;
5430 		}
5431 
5432 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5433 
5434 		switch (code) {
5435 		case IWM_REPLY_RX_PHY_CMD:
5436 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5437 			break;
5438 
5439 		case IWM_REPLY_RX_MPDU_CMD:
5440 			iwm_mvm_rx_rx_mpdu(sc, data->m);
5441 			break;
5442 
5443 		case IWM_TX_CMD:
5444 			iwm_mvm_rx_tx_cmd(sc, pkt);
5445 			break;
5446 
5447 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5448 			struct iwm_missed_beacons_notif *resp;
5449 			int missed;
5450 
5451 			/* XXX look at mac_id to determine interface ID */
5452 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5453 
5454 			resp = (void *)pkt->data;
5455 			missed = le32toh(resp->consec_missed_beacons);
5456 
5457 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5458 			    "%s: MISSED_BEACON: mac_id=%d, "
5459 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5460 			    "num_rx=%d\n",
5461 			    __func__,
5462 			    le32toh(resp->mac_id),
5463 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5464 			    le32toh(resp->consec_missed_beacons),
5465 			    le32toh(resp->num_expected_beacons),
5466 			    le32toh(resp->num_recvd_beacons));
5467 
5468 			/* Be paranoid */
5469 			if (vap == NULL)
5470 				break;
5471 
5472 			/* XXX no net80211 locking? */
5473 			if (vap->iv_state == IEEE80211_S_RUN &&
5474 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5475 				if (missed > vap->iv_bmissthreshold) {
5476 					/* XXX bad locking; turn into task */
5477 					IWM_UNLOCK(sc);
5478 					ieee80211_beacon_miss(ic);
5479 					IWM_LOCK(sc);
5480 				}
5481 			}
5482 
5483 			break; }
5484 
5485 		case IWM_MFUART_LOAD_NOTIFICATION:
5486 			break;
5487 
5488 		case IWM_MVM_ALIVE:
5489 			break;
5490 
5491 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5492 			break;
5493 
5494 		case IWM_STATISTICS_NOTIFICATION: {
5495 			struct iwm_notif_statistics *stats;
5496 			stats = (void *)pkt->data;
5497 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5498 			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5499 			break;
5500 		}
5501 
5502 		case IWM_NVM_ACCESS_CMD:
5503 		case IWM_MCC_UPDATE_CMD:
5504 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5505 				memcpy(sc->sc_cmd_resp,
5506 				    pkt, sizeof(sc->sc_cmd_resp));
5507 			}
5508 			break;
5509 
5510 		case IWM_MCC_CHUB_UPDATE_CMD: {
5511 			struct iwm_mcc_chub_notif *notif;
5512 			notif = (void *)pkt->data;
5513 
5514 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5515 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5516 			sc->sc_fw_mcc[2] = '\0';
5517 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5518 			    "fw source %d sent CC '%s'\n",
5519 			    notif->source_id, sc->sc_fw_mcc);
5520 			break;
5521 		}
5522 
5523 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5524 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5525 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5526 			struct iwm_dts_measurement_notif_v1 *notif;
5527 
5528 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5529 				device_printf(sc->sc_dev,
5530 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5531 				break;
5532 			}
5533 			notif = (void *)pkt->data;
5534 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5535 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5536 			    notif->temp);
5537 			break;
5538 		}
5539 
5540 		case IWM_PHY_CONFIGURATION_CMD:
5541 		case IWM_TX_ANT_CONFIGURATION_CMD:
5542 		case IWM_ADD_STA:
5543 		case IWM_MAC_CONTEXT_CMD:
5544 		case IWM_REPLY_SF_CFG_CMD:
5545 		case IWM_POWER_TABLE_CMD:
5546 		case IWM_PHY_CONTEXT_CMD:
5547 		case IWM_BINDING_CONTEXT_CMD:
5548 		case IWM_TIME_EVENT_CMD:
5549 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5550 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5551 		case IWM_SCAN_ABORT_UMAC:
5552 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5553 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5554 		case IWM_REPLY_BEACON_FILTERING_CMD:
5555 		case IWM_MAC_PM_POWER_TABLE:
5556 		case IWM_TIME_QUOTA_CMD:
5557 		case IWM_REMOVE_STA:
5558 		case IWM_TXPATH_FLUSH:
5559 		case IWM_LQ_CMD:
5560 		case IWM_FW_PAGING_BLOCK_CMD:
5561 		case IWM_BT_CONFIG:
5562 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5563 			cresp = (void *)pkt->data;
5564 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5565 				memcpy(sc->sc_cmd_resp,
5566 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5567 			}
5568 			break;
5569 
5570 		/* ignore */
5571 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5572 			break;
5573 
5574 		case IWM_INIT_COMPLETE_NOTIF:
5575 			break;
5576 
5577 		case IWM_SCAN_OFFLOAD_COMPLETE:
5578 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5579 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5580 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5581 				ieee80211_runtask(ic, &sc->sc_es_task);
5582 			}
5583 			break;
5584 
5585 		case IWM_SCAN_ITERATION_COMPLETE: {
5586 			struct iwm_lmac_scan_complete_notif *notif;
5587 			notif = (void *)pkt->data;
5588 			break;
5589 		}
5590 
5591 		case IWM_SCAN_COMPLETE_UMAC:
5592 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5593 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5594 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5595 				ieee80211_runtask(ic, &sc->sc_es_task);
5596 			}
5597 			break;
5598 
5599 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5600 			struct iwm_umac_scan_iter_complete_notif *notif;
5601 			notif = (void *)pkt->data;
5602 
5603 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5604 			    "complete, status=0x%x, %d channels scanned\n",
5605 			    notif->status, notif->scanned_channels);
5606 			break;
5607 		}
5608 
5609 		case IWM_REPLY_ERROR: {
5610 			struct iwm_error_resp *resp;
5611 			resp = (void *)pkt->data;
5612 
5613 			device_printf(sc->sc_dev,
5614 			    "firmware error 0x%x, cmd 0x%x\n",
5615 			    le32toh(resp->error_type),
5616 			    resp->cmd_id);
5617 			break;
5618 		}
5619 
5620 		case IWM_TIME_EVENT_NOTIFICATION: {
5621 			struct iwm_time_event_notif *notif;
5622 			notif = (void *)pkt->data;
5623 
5624 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5625 			    "TE notif status = 0x%x action = 0x%x\n",
5626 			    notif->status, notif->action);
5627 			break;
5628 		}
5629 
5630 		case IWM_MCAST_FILTER_CMD:
5631 			break;
5632 
5633 		case IWM_SCD_QUEUE_CFG: {
5634 			struct iwm_scd_txq_cfg_rsp *rsp;
5635 			rsp = (void *)pkt->data;
5636 
5637 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5638 			    "queue cfg token=0x%x sta_id=%d "
5639 			    "tid=%d scd_queue=%d\n",
5640 			    rsp->token, rsp->sta_id, rsp->tid,
5641 			    rsp->scd_queue);
5642 			break;
5643 		}
5644 
5645 		default:
5646 			device_printf(sc->sc_dev,
5647 			    "frame %d/%d %x UNHANDLED (this should "
5648 			    "not happen)\n", qid, idx,
5649 			    pkt->len_n_flags);
5650 			break;
5651 		}
5652 
5653 		/*
5654 		 * Why test bit 0x80?  The Linux driver:
5655 		 *
5656 		 * There is one exception:  uCode sets bit 15 when it
5657 		 * originates the response/notification, i.e. when the
5658 		 * response/notification is not a direct response to a
5659 		 * command sent by the driver.  For example, uCode issues
5660 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5661 		 * it is not a direct response to any driver command.
5662 		 *
5663 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5664 		 * uses a slightly different format for pkt->hdr, and "qid"
5665 		 * is actually the upper byte of a two-byte field.
5666 		 */
5667 		if (!(pkt->hdr.qid & (1 << 7))) {
5668 			iwm_cmd_done(sc, pkt);
5669 		}
5670 
5671 		ADVANCE_RXQ(sc);
5672 	}
5673 
5674 	/*
5675 	 * Tell the firmware what we have processed.
5676 	 * Seems like the hardware gets upset unless we align
5677 	 * the write by 8??
5678 	 */
5679 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5680 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5681 }
5682 
5683 static void
5684 iwm_intr(void *arg)
5685 {
5686 	struct iwm_softc *sc = arg;
5687 	int handled = 0;
5688 	int r1, r2, rv = 0;
5689 	int isperiodic = 0;
5690 
5691 #if defined(__DragonFly__)
5692 	if (sc->sc_mem == NULL) {
5693 		kprintf("iwm_intr: detached\n");
5694 		return;
5695 	}
5696 #endif
5697 	IWM_LOCK(sc);
5698 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5699 
5700 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5701 		uint32_t *ict = sc->ict_dma.vaddr;
5702 		int tmp;
5703 
5704 		tmp = htole32(ict[sc->ict_cur]);
5705 		if (!tmp)
5706 			goto out_ena;
5707 
5708 		/*
5709 		 * ok, there was something.  keep plowing until we have all.
5710 		 */
5711 		r1 = r2 = 0;
5712 		while (tmp) {
5713 			r1 |= tmp;
5714 			ict[sc->ict_cur] = 0;
5715 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5716 			tmp = htole32(ict[sc->ict_cur]);
5717 		}
5718 
5719 		/* this is where the fun begins.  don't ask */
5720 		if (r1 == 0xffffffff)
5721 			r1 = 0;
5722 
5723 		/* i am not expected to understand this */
5724 		if (r1 & 0xc0000)
5725 			r1 |= 0x8000;
5726 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5727 	} else {
5728 		r1 = IWM_READ(sc, IWM_CSR_INT);
5729 		/* "hardware gone" (where, fishing?) */
5730 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5731 			goto out;
5732 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5733 	}
5734 	if (r1 == 0 && r2 == 0) {
5735 		goto out_ena;
5736 	}
5737 
5738 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5739 
5740 	/* Safely ignore these bits for debug checks below */
5741 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5742 
5743 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5744 		int i;
5745 		struct ieee80211com *ic = &sc->sc_ic;
5746 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5747 
5748 #ifdef IWM_DEBUG
5749 		iwm_nic_error(sc);
5750 #endif
5751 		/* Dump driver status (TX and RX rings) while we're here. */
5752 		device_printf(sc->sc_dev, "driver status:\n");
5753 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5754 			struct iwm_tx_ring *ring = &sc->txq[i];
5755 			device_printf(sc->sc_dev,
5756 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5757 			    "queued=%-3d\n",
5758 			    i, ring->qid, ring->cur, ring->queued);
5759 		}
5760 		device_printf(sc->sc_dev,
5761 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5762 		device_printf(sc->sc_dev,
5763 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5764 
5765 		/* Don't stop the device; just do a VAP restart */
5766 		IWM_UNLOCK(sc);
5767 
5768 		if (vap == NULL) {
5769 			kprintf("%s: null vap\n", __func__);
5770 			return;
5771 		}
5772 
5773 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5774 		    "restarting\n", __func__, vap->iv_state);
5775 
5776 		ieee80211_restart_all(ic);
5777 		return;
5778 	}
5779 
5780 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5781 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5782 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5783 		iwm_stop(sc);
5784 		rv = 1;
5785 		goto out;
5786 	}
5787 
5788 	/* firmware chunk loaded */
5789 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5790 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5791 		handled |= IWM_CSR_INT_BIT_FH_TX;
5792 		sc->sc_fw_chunk_done = 1;
5793 		wakeup(&sc->sc_fw);
5794 	}
5795 
5796 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5797 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5798 		if (iwm_check_rfkill(sc)) {
5799 			device_printf(sc->sc_dev,
5800 			    "%s: rfkill switch, disabling interface\n",
5801 			    __func__);
5802 			iwm_stop(sc);
5803 		}
5804 	}
5805 
5806 	/*
5807 	 * The Linux driver uses periodic interrupts to avoid races.
5808 	 * We cargo-cult like it's going out of fashion.
5809 	 */
5810 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5811 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5812 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5813 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5814 			IWM_WRITE_1(sc,
5815 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5816 		isperiodic = 1;
5817 	}
5818 
5819 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5820 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5821 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5822 
5823 		iwm_notif_intr(sc);
5824 
5825 		/* enable periodic interrupt, see above */
5826 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5827 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5828 			    IWM_CSR_INT_PERIODIC_ENA);
5829 	}
5830 
5831 	if (__predict_false(r1 & ~handled))
5832 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5833 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5834 	rv = 1;
5835 
5836  out_ena:
5837 	iwm_restore_interrupts(sc);
5838  out:
5839 	IWM_UNLOCK(sc);
5840 	return;
5841 }
5842 
5843 /*
5844  * Autoconf glue-sniffing
5845  */
5846 #define	PCI_VENDOR_INTEL		0x8086
5847 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5848 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5849 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5850 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5851 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5852 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5853 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5854 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5855 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5856 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5857 
5858 static const struct iwm_devices {
5859 	uint16_t		device;
5860 	const struct iwm_cfg	*cfg;
5861 } iwm_devices[] = {
5862 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5863 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5864 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5865 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5866 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5867 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5868 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5869 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5870 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5871 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5872 };
5873 
5874 static int
5875 iwm_probe(device_t dev)
5876 {
5877 	int i;
5878 
5879 	for (i = 0; i < nitems(iwm_devices); i++) {
5880 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5881 		    pci_get_device(dev) == iwm_devices[i].device) {
5882 			device_set_desc(dev, iwm_devices[i].cfg->name);
5883 			return (BUS_PROBE_DEFAULT);
5884 		}
5885 	}
5886 
5887 	return (ENXIO);
5888 }
5889 
5890 static int
5891 iwm_dev_check(device_t dev)
5892 {
5893 	struct iwm_softc *sc;
5894 	uint16_t devid;
5895 	int i;
5896 
5897 	sc = device_get_softc(dev);
5898 
5899 	devid = pci_get_device(dev);
5900 	for (i = 0; i < NELEM(iwm_devices); i++) {
5901 		if (iwm_devices[i].device == devid) {
5902 			sc->cfg = iwm_devices[i].cfg;
5903 			return (0);
5904 		}
5905 	}
5906 	device_printf(dev, "unknown adapter type\n");
5907 	return ENXIO;
5908 }
5909 
5910 /* PCI registers */
5911 #define PCI_CFG_RETRY_TIMEOUT	0x041
5912 
5913 static int
5914 iwm_pci_attach(device_t dev)
5915 {
5916 	struct iwm_softc *sc;
5917 	int count, error, rid;
5918 	uint16_t reg;
5919 #if defined(__DragonFly__)
5920 	int irq_flags;
5921 #endif
5922 
5923 	sc = device_get_softc(dev);
5924 
5925 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5926 	 * PCI Tx retries from interfering with C3 CPU state */
5927 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5928 
5929 	/* Enable bus-mastering and hardware bug workaround. */
5930 	pci_enable_busmaster(dev);
5931 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5932 	/* if !MSI */
5933 	if (reg & PCIM_STATUS_INTxSTATE) {
5934 		reg &= ~PCIM_STATUS_INTxSTATE;
5935 	}
5936 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5937 
5938 	rid = PCIR_BAR(0);
5939 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5940 	    RF_ACTIVE);
5941 	if (sc->sc_mem == NULL) {
5942 		device_printf(sc->sc_dev, "can't map mem space\n");
5943 		return (ENXIO);
5944 	}
5945 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5946 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5947 
5948 	/* Install interrupt handler. */
5949 	count = 1;
5950 	rid = 0;
5951 #if defined(__DragonFly__)
5952 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5953 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5954 #else
5955 	if (pci_alloc_msi(dev, &count) == 0)
5956 		rid = 1;
5957 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5958 	    (rid != 0 ? 0 : RF_SHAREABLE));
5959 #endif
5960 	if (sc->sc_irq == NULL) {
5961 		device_printf(dev, "can't map interrupt\n");
5962 			return (ENXIO);
5963 	}
5964 #if defined(__DragonFly__)
5965 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5966 			       iwm_intr, sc, &sc->sc_ih,
5967 			       &wlan_global_serializer);
5968 #else
5969 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5970 	    NULL, iwm_intr, sc, &sc->sc_ih);
5971 #endif
5972 	if (sc->sc_ih == NULL) {
5973 		device_printf(dev, "can't establish interrupt");
5974 #if defined(__DragonFly__)
5975                 pci_release_msi(dev);
5976 #endif
5977 			return (ENXIO);
5978 	}
5979 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5980 
5981 	return (0);
5982 }
5983 
5984 static void
5985 iwm_pci_detach(device_t dev)
5986 {
5987 	struct iwm_softc *sc = device_get_softc(dev);
5988 
5989 	if (sc->sc_irq != NULL) {
5990 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5991 		bus_release_resource(dev, SYS_RES_IRQ,
5992 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5993 		pci_release_msi(dev);
5994 #if defined(__DragonFly__)
5995 		sc->sc_irq = NULL;
5996 #endif
5997         }
5998 	if (sc->sc_mem != NULL) {
5999 		bus_release_resource(dev, SYS_RES_MEMORY,
6000 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6001 #if defined(__DragonFly__)
6002 		sc->sc_mem = NULL;
6003 #endif
6004 	}
6005 }
6006 
6007 
6008 
6009 static int
6010 iwm_attach(device_t dev)
6011 {
6012 	struct iwm_softc *sc = device_get_softc(dev);
6013 	struct ieee80211com *ic = &sc->sc_ic;
6014 	int error;
6015 	int txq_i, i;
6016 
6017 	sc->sc_dev = dev;
6018 	sc->sc_attached = 1;
6019 	IWM_LOCK_INIT(sc);
6020 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6021 #if defined(__DragonFly__)
6022 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
6023 #else
6024 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6025 #endif
6026 	callout_init(&sc->sc_led_blink_to);
6027 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6028 
6029 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6030 	if (sc->sc_notif_wait == NULL) {
6031 		device_printf(dev, "failed to init notification wait struct\n");
6032 		goto fail;
6033 	}
6034 
6035 	/* Init phy db */
6036 	sc->sc_phy_db = iwm_phy_db_init(sc);
6037 	if (!sc->sc_phy_db) {
6038 		device_printf(dev, "Cannot init phy_db\n");
6039 		goto fail;
6040 	}
6041 
6042 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6043 	sc->last_ebs_successful = TRUE;
6044 
6045 	/* PCI attach */
6046 	error = iwm_pci_attach(dev);
6047 	if (error != 0)
6048 		goto fail;
6049 
6050 	sc->sc_wantresp = -1;
6051 
6052 	/* Check device type */
6053 	error = iwm_dev_check(dev);
6054 	if (error != 0)
6055 		goto fail;
6056 
6057 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6058 	/*
6059 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6060 	 * changed, and now the revision step also includes bit 0-1 (no more
6061 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6062 	 * in the old format.
6063 	 */
6064 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6065 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6066 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6067 
6068 	if (iwm_prepare_card_hw(sc) != 0) {
6069 		device_printf(dev, "could not initialize hardware\n");
6070 		goto fail;
6071 	}
6072 
6073 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6074 		int ret;
6075 		uint32_t hw_step;
6076 
6077 		/*
6078 		 * In order to recognize C step the driver should read the
6079 		 * chip version id located at the AUX bus MISC address.
6080 		 */
6081 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6082 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6083 		DELAY(2);
6084 
6085 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6086 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6087 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6088 				   25000);
6089 		if (!ret) {
6090 			device_printf(sc->sc_dev,
6091 			    "Failed to wake up the nic\n");
6092 			goto fail;
6093 		}
6094 
6095 		if (iwm_nic_lock(sc)) {
6096 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6097 			hw_step |= IWM_ENABLE_WFPM;
6098 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6099 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6100 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6101 			if (hw_step == 0x3)
6102 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6103 						(IWM_SILICON_C_STEP << 2);
6104 			iwm_nic_unlock(sc);
6105 		} else {
6106 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6107 			goto fail;
6108 		}
6109 	}
6110 
6111 	/* special-case 7265D, it has the same PCI IDs. */
6112 	if (sc->cfg == &iwm7265_cfg &&
6113 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6114 		sc->cfg = &iwm7265d_cfg;
6115 	}
6116 
6117 	/* Allocate DMA memory for firmware transfers. */
6118 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6119 		device_printf(dev, "could not allocate memory for firmware\n");
6120 		goto fail;
6121 	}
6122 
6123 	/* Allocate "Keep Warm" page. */
6124 	if ((error = iwm_alloc_kw(sc)) != 0) {
6125 		device_printf(dev, "could not allocate keep warm page\n");
6126 		goto fail;
6127 	}
6128 
6129 	/* We use ICT interrupts */
6130 	if ((error = iwm_alloc_ict(sc)) != 0) {
6131 		device_printf(dev, "could not allocate ICT table\n");
6132 		goto fail;
6133 	}
6134 
6135 	/* Allocate TX scheduler "rings". */
6136 	if ((error = iwm_alloc_sched(sc)) != 0) {
6137 		device_printf(dev, "could not allocate TX scheduler rings\n");
6138 		goto fail;
6139 	}
6140 
6141 	/* Allocate TX rings */
6142 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6143 		if ((error = iwm_alloc_tx_ring(sc,
6144 		    &sc->txq[txq_i], txq_i)) != 0) {
6145 			device_printf(dev,
6146 			    "could not allocate TX ring %d\n",
6147 			    txq_i);
6148 			goto fail;
6149 		}
6150 	}
6151 
6152 	/* Allocate RX ring. */
6153 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6154 		device_printf(dev, "could not allocate RX ring\n");
6155 		goto fail;
6156 	}
6157 
6158 	/* Clear pending interrupts. */
6159 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6160 
6161 	ic->ic_softc = sc;
6162 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6163 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6164 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6165 
6166 	/* Set device capabilities. */
6167 	ic->ic_caps =
6168 	    IEEE80211_C_STA |
6169 	    IEEE80211_C_WPA |		/* WPA/RSN */
6170 	    IEEE80211_C_WME |
6171 	    IEEE80211_C_PMGT |
6172 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6173 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6174 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6175 	    ;
6176 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6177 		sc->sc_phyctxt[i].id = i;
6178 		sc->sc_phyctxt[i].color = 0;
6179 		sc->sc_phyctxt[i].ref = 0;
6180 		sc->sc_phyctxt[i].channel = NULL;
6181 	}
6182 
6183 	/* Default noise floor */
6184 	sc->sc_noise = -96;
6185 
6186 	/* Max RSSI */
6187 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6188 
6189 	sc->sc_preinit_hook.ich_func = iwm_preinit;
6190 	sc->sc_preinit_hook.ich_arg = sc;
6191 	sc->sc_preinit_hook.ich_desc = "iwm";
6192 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6193 		device_printf(dev, "config_intrhook_establish failed\n");
6194 		goto fail;
6195 	}
6196 
6197 #ifdef IWM_DEBUG
6198 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6199 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6200 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6201 #endif
6202 
6203 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6204 	    "<-%s\n", __func__);
6205 
6206 	return 0;
6207 
6208 	/* Free allocated memory if something failed during attachment. */
6209 fail:
6210 	iwm_detach_local(sc, 0);
6211 
6212 	return ENXIO;
6213 }
6214 
6215 static int
6216 iwm_is_valid_ether_addr(uint8_t *addr)
6217 {
6218 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6219 
6220 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6221 		return (FALSE);
6222 
6223 	return (TRUE);
6224 }
6225 
6226 static int
6227 iwm_update_edca(struct ieee80211com *ic)
6228 {
6229 	struct iwm_softc *sc = ic->ic_softc;
6230 
6231 	device_printf(sc->sc_dev, "%s: called\n", __func__);
6232 	return (0);
6233 }
6234 
6235 static void
6236 iwm_preinit(void *arg)
6237 {
6238 	struct iwm_softc *sc = arg;
6239 	device_t dev = sc->sc_dev;
6240 	struct ieee80211com *ic = &sc->sc_ic;
6241 	int error;
6242 
6243 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6244 	    "->%s\n", __func__);
6245 
6246 	IWM_LOCK(sc);
6247 	if ((error = iwm_start_hw(sc)) != 0) {
6248 		device_printf(dev, "could not initialize hardware\n");
6249 		IWM_UNLOCK(sc);
6250 		goto fail;
6251 	}
6252 
6253 	error = iwm_run_init_mvm_ucode(sc, 1);
6254 	iwm_stop_device(sc);
6255 	if (error) {
6256 		IWM_UNLOCK(sc);
6257 		goto fail;
6258 	}
6259 	device_printf(dev,
6260 	    "hw rev 0x%x, fw ver %s, address %s\n",
6261 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6262 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6263 
6264 	/* not all hardware can do 5GHz band */
6265 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6266 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6267 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6268 	IWM_UNLOCK(sc);
6269 
6270 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6271 	    ic->ic_channels);
6272 
6273 	/*
6274 	 * At this point we've committed - if we fail to do setup,
6275 	 * we now also have to tear down the net80211 state.
6276 	 */
6277 	ieee80211_ifattach(ic);
6278 	ic->ic_vap_create = iwm_vap_create;
6279 	ic->ic_vap_delete = iwm_vap_delete;
6280 	ic->ic_raw_xmit = iwm_raw_xmit;
6281 	ic->ic_node_alloc = iwm_node_alloc;
6282 	ic->ic_scan_start = iwm_scan_start;
6283 	ic->ic_scan_end = iwm_scan_end;
6284 	ic->ic_update_mcast = iwm_update_mcast;
6285 	ic->ic_getradiocaps = iwm_init_channel_map;
6286 	ic->ic_set_channel = iwm_set_channel;
6287 	ic->ic_scan_curchan = iwm_scan_curchan;
6288 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6289 	ic->ic_wme.wme_update = iwm_update_edca;
6290 	ic->ic_parent = iwm_parent;
6291 	ic->ic_transmit = iwm_transmit;
6292 	iwm_radiotap_attach(sc);
6293 	if (bootverbose)
6294 		ieee80211_announce(ic);
6295 
6296 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6297 	    "<-%s\n", __func__);
6298 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6299 
6300 	return;
6301 fail:
6302 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6303 	iwm_detach_local(sc, 0);
6304 }
6305 
6306 /*
6307  * Attach the interface to 802.11 radiotap.
6308  */
6309 static void
6310 iwm_radiotap_attach(struct iwm_softc *sc)
6311 {
6312         struct ieee80211com *ic = &sc->sc_ic;
6313 
6314 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6315 	    "->%s begin\n", __func__);
6316         ieee80211_radiotap_attach(ic,
6317             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6318                 IWM_TX_RADIOTAP_PRESENT,
6319             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6320                 IWM_RX_RADIOTAP_PRESENT);
6321 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6322 	    "->%s end\n", __func__);
6323 }
6324 
6325 static struct ieee80211vap *
6326 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6327     enum ieee80211_opmode opmode, int flags,
6328     const uint8_t bssid[IEEE80211_ADDR_LEN],
6329     const uint8_t mac[IEEE80211_ADDR_LEN])
6330 {
6331 	struct iwm_vap *ivp;
6332 	struct ieee80211vap *vap;
6333 
6334 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6335 		return NULL;
6336 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6337 	vap = &ivp->iv_vap;
6338 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6339 	vap->iv_bmissthreshold = 10;            /* override default */
6340 	/* Override with driver methods. */
6341 	ivp->iv_newstate = vap->iv_newstate;
6342 	vap->iv_newstate = iwm_newstate;
6343 
6344 	ivp->id = IWM_DEFAULT_MACID;
6345 	ivp->color = IWM_DEFAULT_COLOR;
6346 
6347 	ieee80211_ratectl_init(vap);
6348 	/* Complete setup. */
6349 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6350 	    mac);
6351 	ic->ic_opmode = opmode;
6352 
6353 	return vap;
6354 }
6355 
6356 static void
6357 iwm_vap_delete(struct ieee80211vap *vap)
6358 {
6359 	struct iwm_vap *ivp = IWM_VAP(vap);
6360 
6361 	ieee80211_ratectl_deinit(vap);
6362 	ieee80211_vap_detach(vap);
6363 	kfree(ivp, M_80211_VAP);
6364 }
6365 
6366 static void
6367 iwm_scan_start(struct ieee80211com *ic)
6368 {
6369 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6370 	struct iwm_softc *sc = ic->ic_softc;
6371 	int error;
6372 
6373 	IWM_LOCK(sc);
6374 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6375 		/* This should not be possible */
6376 		device_printf(sc->sc_dev,
6377 		    "%s: Previous scan not completed yet\n", __func__);
6378 	}
6379 	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6380 		error = iwm_mvm_umac_scan(sc);
6381 	else
6382 		error = iwm_mvm_lmac_scan(sc);
6383 	if (error != 0) {
6384 		device_printf(sc->sc_dev, "could not initiate scan\n");
6385 		IWM_UNLOCK(sc);
6386 		ieee80211_cancel_scan(vap);
6387 	} else {
6388 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6389 		iwm_led_blink_start(sc);
6390 		IWM_UNLOCK(sc);
6391 	}
6392 }
6393 
6394 static void
6395 iwm_scan_end(struct ieee80211com *ic)
6396 {
6397 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6398 	struct iwm_softc *sc = ic->ic_softc;
6399 
6400 	IWM_LOCK(sc);
6401 	iwm_led_blink_stop(sc);
6402 	if (vap->iv_state == IEEE80211_S_RUN)
6403 		iwm_mvm_led_enable(sc);
6404 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6405 		/*
6406 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6407 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6408 		 * taskqueue.
6409 		 */
6410 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6411 		iwm_mvm_scan_stop_wait(sc);
6412 	}
6413 	IWM_UNLOCK(sc);
6414 
6415 	/*
6416 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6417 	 * This is to make sure that it won't call ieee80211_scan_done
6418 	 * when we have already started the next scan.
6419 	 */
6420 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6421 }
6422 
6423 static void
6424 iwm_update_mcast(struct ieee80211com *ic)
6425 {
6426 }
6427 
6428 static void
6429 iwm_set_channel(struct ieee80211com *ic)
6430 {
6431 }
6432 
6433 static void
6434 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6435 {
6436 }
6437 
6438 static void
6439 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6440 {
6441 	return;
6442 }
6443 
6444 void
6445 iwm_init_task(void *arg1)
6446 {
6447 	struct iwm_softc *sc = arg1;
6448 
6449 	IWM_LOCK(sc);
6450 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6451 #if defined(__DragonFly__)
6452 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6453 #else
6454 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6455 #endif
6456 }
6457 	sc->sc_flags |= IWM_FLAG_BUSY;
6458 	iwm_stop(sc);
6459 	if (sc->sc_ic.ic_nrunning > 0)
6460 		iwm_init(sc);
6461 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6462 	wakeup(&sc->sc_flags);
6463 	IWM_UNLOCK(sc);
6464 }
6465 
6466 static int
6467 iwm_resume(device_t dev)
6468 {
6469 	struct iwm_softc *sc = device_get_softc(dev);
6470 	int do_reinit = 0;
6471 
6472 	/*
6473 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6474 	 * PCI Tx retries from interfering with C3 CPU state.
6475 	 */
6476 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6477 	iwm_init_task(device_get_softc(dev));
6478 
6479 	IWM_LOCK(sc);
6480 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6481 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6482 		do_reinit = 1;
6483 	}
6484 	IWM_UNLOCK(sc);
6485 
6486 	if (do_reinit)
6487 		ieee80211_resume_all(&sc->sc_ic);
6488 
6489 	return 0;
6490 }
6491 
6492 static int
6493 iwm_suspend(device_t dev)
6494 {
6495 	int do_stop = 0;
6496 	struct iwm_softc *sc = device_get_softc(dev);
6497 
6498 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6499 
6500 	ieee80211_suspend_all(&sc->sc_ic);
6501 
6502 	if (do_stop) {
6503 		IWM_LOCK(sc);
6504 		iwm_stop(sc);
6505 		sc->sc_flags |= IWM_FLAG_SCANNING;
6506 		IWM_UNLOCK(sc);
6507 	}
6508 
6509 	return (0);
6510 }
6511 
6512 static int
6513 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6514 {
6515 	struct iwm_fw_info *fw = &sc->sc_fw;
6516 	device_t dev = sc->sc_dev;
6517 	int i;
6518 
6519 	if (!sc->sc_attached)
6520 		return 0;
6521 	sc->sc_attached = 0;
6522 	if (do_net80211) {
6523 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6524 	}
6525 	callout_drain(&sc->sc_led_blink_to);
6526 	callout_drain(&sc->sc_watchdog_to);
6527 	iwm_stop_device(sc);
6528 	if (do_net80211) {
6529 		ieee80211_ifdetach(&sc->sc_ic);
6530 	}
6531 
6532 	iwm_phy_db_free(sc->sc_phy_db);
6533 	sc->sc_phy_db = NULL;
6534 
6535 	iwm_free_nvm_data(sc->nvm_data);
6536 
6537 	/* Free descriptor rings */
6538 	iwm_free_rx_ring(sc, &sc->rxq);
6539 	for (i = 0; i < nitems(sc->txq); i++)
6540 		iwm_free_tx_ring(sc, &sc->txq[i]);
6541 
6542 	/* Free firmware */
6543 	if (fw->fw_fp != NULL)
6544 		iwm_fw_info_free(fw);
6545 
6546 	/* Free scheduler */
6547 	iwm_dma_contig_free(&sc->sched_dma);
6548 	iwm_dma_contig_free(&sc->ict_dma);
6549 	iwm_dma_contig_free(&sc->kw_dma);
6550 	iwm_dma_contig_free(&sc->fw_dma);
6551 
6552 	iwm_free_fw_paging(sc);
6553 
6554 	/* Finished with the hardware - detach things */
6555 	iwm_pci_detach(dev);
6556 
6557 	if (sc->sc_notif_wait != NULL) {
6558 		iwm_notification_wait_free(sc->sc_notif_wait);
6559 		sc->sc_notif_wait = NULL;
6560 	}
6561 
6562 	mbufq_drain(&sc->sc_snd);
6563 	IWM_LOCK_DESTROY(sc);
6564 
6565 	return (0);
6566 }
6567 
6568 static int
6569 iwm_detach(device_t dev)
6570 {
6571 	struct iwm_softc *sc = device_get_softc(dev);
6572 
6573 	return (iwm_detach_local(sc, 1));
6574 }
6575 
6576 static device_method_t iwm_pci_methods[] = {
6577         /* Device interface */
6578         DEVMETHOD(device_probe,         iwm_probe),
6579         DEVMETHOD(device_attach,        iwm_attach),
6580         DEVMETHOD(device_detach,        iwm_detach),
6581         DEVMETHOD(device_suspend,       iwm_suspend),
6582         DEVMETHOD(device_resume,        iwm_resume),
6583 
6584         DEVMETHOD_END
6585 };
6586 
6587 static driver_t iwm_pci_driver = {
6588         "iwm",
6589         iwm_pci_methods,
6590         sizeof (struct iwm_softc)
6591 };
6592 
6593 static devclass_t iwm_devclass;
6594 
6595 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6596 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6597 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6598 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6599