xref: /netbsd-src/sys/dev/pci/if_iwm.c (revision f89f6560d453f5e37386cc7938c072d2f528b9fa)
1 /*	$NetBSD: if_iwm.c,v 1.30 2015/04/15 05:40:48 nonaka Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp	*/
3 
4 /*
5  * Copyright (c) 2014 genua mbh <info@genua.de>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*-
22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
23  * which were used as the reference documentation for this implementation.
24  *
25  * Driver version we are currently based off of is
26  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27  *
28  ***********************************************************************
29  *
30  * This file is provided under a dual BSD/GPLv2 license.  When using or
31  * redistributing this file, you may do so under either license.
32  *
33  * GPL LICENSE SUMMARY
34  *
35  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * All rights reserved.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  *
68  *  * Redistributions of source code must retain the above copyright
69  *    notice, this list of conditions and the following disclaimer.
70  *  * Redistributions in binary form must reproduce the above copyright
71  *    notice, this list of conditions and the following disclaimer in
72  *    the documentation and/or other materials provided with the
73  *    distribution.
74  *  * Neither the name Intel Corporation nor the names of its
75  *    contributors may be used to endorse or promote products derived
76  *    from this software without specific prior written permission.
77  *
78  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
80  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
81  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
82  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
83  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
84  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
85  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
86  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
87  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
88  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89  */
90 
91 /*-
92  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93  *
94  * Permission to use, copy, modify, and distribute this software for any
95  * purpose with or without fee is hereby granted, provided that the above
96  * copyright notice and this permission notice appear in all copies.
97  *
98  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
99  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
100  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
101  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
102  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
103  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
104  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105  */
106 
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.30 2015/04/15 05:40:48 nonaka Exp $");
109 
110 #include <sys/param.h>
111 #include <sys/conf.h>
112 #include <sys/kernel.h>
113 #include <sys/kmem.h>
114 #include <sys/mbuf.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/socket.h>
118 #include <sys/sockio.h>
119 #include <sys/systm.h>
120 
121 #include <sys/cpu.h>
122 #include <sys/bus.h>
123 #include <sys/workqueue.h>
124 #include <machine/endian.h>
125 #include <machine/intr.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 #include <dev/firmload.h>
131 
132 #include <net/bpf.h>
133 #include <net/if.h>
134 #include <net/if_arp.h>
135 #include <net/if_dl.h>
136 #include <net/if_media.h>
137 #include <net/if_types.h>
138 #include <net/if_ether.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/in_systm.h>
142 #include <netinet/ip.h>
143 
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
150 
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153 
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 1;
158 #else
159 #define DPRINTF(x)	do { ; } while (0)
160 #define DPRINTFN(n, x)	do { ; } while (0)
161 #endif
162 
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165 
166 static const uint8_t iwm_nvm_channels[] = {
167 	/* 2.4 GHz */
168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 	/* 5 GHz */
170 	36, 40, 44 , 48, 52, 56, 60, 64,
171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 	149, 153, 157, 161, 165
173 };
174 #define IWM_NUM_2GHZ_CHANNELS	14
175 
176 static const struct iwm_rate {
177 	uint8_t rate;
178 	uint8_t plcp;
179 } iwm_rates[] = {
180 	{   2,	IWM_RATE_1M_PLCP  },
181 	{   4,	IWM_RATE_2M_PLCP  },
182 	{  11,	IWM_RATE_5M_PLCP  },
183 	{  22,	IWM_RATE_11M_PLCP },
184 	{  12,	IWM_RATE_6M_PLCP  },
185 	{  18,	IWM_RATE_9M_PLCP  },
186 	{  24,	IWM_RATE_12M_PLCP },
187 	{  36,	IWM_RATE_18M_PLCP },
188 	{  48,	IWM_RATE_24M_PLCP },
189 	{  72,	IWM_RATE_36M_PLCP },
190 	{  96,	IWM_RATE_48M_PLCP },
191 	{ 108,	IWM_RATE_54M_PLCP },
192 };
193 #define IWM_RIDX_CCK	0
194 #define IWM_RIDX_OFDM	4
195 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
196 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
197 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
198 
199 struct iwm_newstate_state {
200 	struct work ns_wk;
201 	enum ieee80211_state ns_nstate;
202 	int ns_arg;
203 	int ns_generation;
204 };
205 
206 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
207 static int	iwm_firmware_store_section(struct iwm_softc *,
208 		    enum iwm_ucode_type, uint8_t *, size_t);
209 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
210 static int	iwm_read_firmware(struct iwm_softc *);
211 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
212 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
213 #ifdef IWM_DEBUG
214 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
215 #endif
216 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
217 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
218 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
219 static int	iwm_nic_lock(struct iwm_softc *);
220 static void	iwm_nic_unlock(struct iwm_softc *);
221 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
222 		    uint32_t);
223 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
224 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
225 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
226 		    bus_size_t, bus_size_t);
227 static void	iwm_dma_contig_free(struct iwm_dma_info *);
228 static int	iwm_alloc_fwmem(struct iwm_softc *);
229 static void	iwm_free_fwmem(struct iwm_softc *);
230 static int	iwm_alloc_sched(struct iwm_softc *);
231 static void	iwm_free_sched(struct iwm_softc *);
232 static int	iwm_alloc_kw(struct iwm_softc *);
233 static void	iwm_free_kw(struct iwm_softc *);
234 static int	iwm_alloc_ict(struct iwm_softc *);
235 static void	iwm_free_ict(struct iwm_softc *);
236 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
237 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
238 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
239 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
240 		    int);
241 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
242 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
243 static void	iwm_enable_rfkill_int(struct iwm_softc *);
244 static int	iwm_check_rfkill(struct iwm_softc *);
245 static void	iwm_enable_interrupts(struct iwm_softc *);
246 static void	iwm_restore_interrupts(struct iwm_softc *);
247 static void	iwm_disable_interrupts(struct iwm_softc *);
248 static void	iwm_ict_reset(struct iwm_softc *);
249 static int	iwm_set_hw_ready(struct iwm_softc *);
250 static int	iwm_prepare_card_hw(struct iwm_softc *);
251 static void	iwm_apm_config(struct iwm_softc *);
252 static int	iwm_apm_init(struct iwm_softc *);
253 static void	iwm_apm_stop(struct iwm_softc *);
254 static int	iwm_allow_mcast(struct iwm_softc *);
255 static int	iwm_start_hw(struct iwm_softc *);
256 static void	iwm_stop_device(struct iwm_softc *);
257 static void	iwm_set_pwr(struct iwm_softc *);
258 static void	iwm_mvm_nic_config(struct iwm_softc *);
259 static int	iwm_nic_rx_init(struct iwm_softc *);
260 static int	iwm_nic_tx_init(struct iwm_softc *);
261 static int	iwm_nic_init(struct iwm_softc *);
262 static void	iwm_enable_txq(struct iwm_softc *, int, int);
263 static int	iwm_post_alive(struct iwm_softc *);
264 static int	iwm_is_valid_channel(uint16_t);
265 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
266 static uint16_t iwm_channel_id_to_papd(uint16_t);
267 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
268 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
269 		    uint8_t **, uint16_t *, uint16_t);
270 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
271 		    void *);
272 static int	iwm_send_phy_db_data(struct iwm_softc *);
273 static int	iwm_send_phy_db_data(struct iwm_softc *);
274 static void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
275 		    struct iwm_time_event_cmd_v1 *);
276 static int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
277 		    const struct iwm_time_event_cmd_v2 *);
278 static int	iwm_mvm_time_event_send_add(struct iwm_softc *,
279 		    struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
280 static void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
281 		    uint32_t, uint32_t, uint32_t);
282 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
283 		    uint16_t, uint8_t *, uint16_t *);
284 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
285 		    uint16_t *);
286 static void	iwm_init_channel_map(struct iwm_softc *,
287 		    const uint16_t * const);
288 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
289 		    const uint16_t *, const uint16_t *, uint8_t, uint8_t);
290 static int	iwm_nvm_init(struct iwm_softc *);
291 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
292 		    const uint8_t *, uint32_t);
293 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
294 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
295 static int	iwm_fw_alive(struct iwm_softc *, uint32_t);
296 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
297 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
298 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
299 		    enum iwm_ucode_type);
300 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
301 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
302 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
303 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
304 		    struct iwm_rx_phy_info *);
305 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
306 		    struct iwm_rx_packet *, struct iwm_rx_data *);
307 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
308 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
309 		    struct iwm_rx_data *);
310 static void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
311 		    struct iwm_rx_packet *, struct iwm_node *);
312 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
313 		    struct iwm_rx_data *);
314 static int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
315 		    uint32_t);
316 static int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
317 		    int);
318 static int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
319 static void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
320 		    struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
321 		    uint32_t, uint32_t);
322 static void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
323 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
324 		    uint8_t, uint8_t);
325 static int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
326 		    struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
327 		    uint32_t);
328 static int	iwm_mvm_phy_ctxt_add(struct iwm_softc *,
329 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
330 		    uint8_t, uint8_t);
331 static int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
332 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
333 		    uint8_t, uint8_t);
334 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
335 static int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
336 		    uint16_t, const void *);
337 static int	iwm_mvm_send_cmd_status(struct iwm_softc *,
338 		    struct iwm_host_cmd *, uint32_t *);
339 static int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
340 		    uint16_t, const void *, uint32_t *);
341 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
342 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
343 #if 0
344 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
345 		    uint16_t);
346 #endif
347 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
348 		    struct iwm_node *, struct ieee80211_frame *,
349 		    struct iwm_tx_cmd *);
350 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
351 		    struct ieee80211_node *, int);
352 static int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
353 		    struct iwm_beacon_filter_cmd *);
354 static void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
355 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
356 static int	iwm_mvm_update_beacon_abort(struct iwm_softc *,
357 		    struct iwm_node *, int);
358 static void	iwm_mvm_power_log(struct iwm_softc *,
359 		    struct iwm_mac_power_cmd *);
360 static void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
361 		    struct iwm_mac_power_cmd *);
362 static int	iwm_mvm_power_mac_update_mode(struct iwm_softc *,
363 		    struct iwm_node *);
364 static int	iwm_mvm_power_update_device(struct iwm_softc *);
365 static int	iwm_mvm_enable_beacon_filter(struct iwm_softc *,
366 		    struct iwm_node *);
367 static int	iwm_mvm_disable_beacon_filter(struct iwm_softc *,
368 		    struct iwm_node *);
369 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
370 		    struct iwm_mvm_add_sta_cmd_v5 *);
371 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
372 		    struct iwm_mvm_add_sta_cmd_v6 *, int *);
373 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
374 		    int);
375 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
376 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
377 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
378 		    struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
379 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
380 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
381 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
382 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
383 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
384 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
385 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
386 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
387 static int	iwm_mvm_scan_fill_channels(struct iwm_softc *,
388 		    struct iwm_scan_cmd *, int, int, int);
389 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
390 		    struct ieee80211_frame *, const uint8_t *, int,
391 		    const uint8_t *, int, const uint8_t *, int, int);
392 static int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
393 		    int);
394 static void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
395 		    int *);
396 static void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
397 		    struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
398 static int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
399 		    struct iwm_mac_ctx_cmd *);
400 static void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
401 		    struct iwm_node *, struct iwm_mac_data_sta *, int);
402 static int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
403 		    struct iwm_node *, uint32_t);
404 static int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
405 		    uint32_t);
406 static int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
407 static int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
408 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
409 static int	iwm_auth(struct iwm_softc *);
410 static int	iwm_assoc(struct iwm_softc *);
411 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
412 static void	iwm_calib_timeout(void *);
413 static void	iwm_setrates(struct iwm_node *);
414 static int	iwm_media_change(struct ifnet *);
415 static void	iwm_newstate_cb(struct work *, void *);
416 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
417 static void	iwm_endscan_cb(struct work *, void *);
418 static int	iwm_init_hw(struct iwm_softc *);
419 static int	iwm_init(struct ifnet *);
420 static void	iwm_start(struct ifnet *);
421 static void	iwm_stop(struct ifnet *, int);
422 static void	iwm_watchdog(struct ifnet *);
423 static int	iwm_ioctl(struct ifnet *, u_long, void *);
424 #ifdef IWM_DEBUG
425 static const char *iwm_desc_lookup(uint32_t);
426 static void	iwm_nic_error(struct iwm_softc *);
427 #endif
428 static void	iwm_notif_intr(struct iwm_softc *);
429 static int	iwm_intr(void *);
430 static int	iwm_preinit(struct iwm_softc *);
431 static void	iwm_attach_hook(device_t);
432 static void	iwm_attach(device_t, device_t, void *);
433 #if 0
434 static void	iwm_init_task(void *);
435 static int	iwm_activate(device_t, enum devact);
436 static void	iwm_wakeup(struct iwm_softc *);
437 #endif
438 static void	iwm_radiotap_attach(struct iwm_softc *);
439 
440 static int
441 iwm_firmload(struct iwm_softc *sc)
442 {
443 	struct iwm_fw_info *fw = &sc->sc_fw;
444 	firmware_handle_t fwh;
445 	int error;
446 
447 	/* Open firmware image. */
448 	if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
449 		aprint_error_dev(sc->sc_dev,
450 		    "could not get firmware handle %s\n", sc->sc_fwname);
451 		return error;
452 	}
453 
454 	fw->fw_rawsize = firmware_get_size(fwh);
455 	/*
456 	 * Well, this is how the Linux driver checks it ....
457 	 */
458 	if (fw->fw_rawsize < sizeof(uint32_t)) {
459 		aprint_error_dev(sc->sc_dev,
460 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
461 		error = EINVAL;
462 		goto out;
463 	}
464 
465 	/* some sanity */
466 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
467 		aprint_error_dev(sc->sc_dev,
468 		    "firmware size is ridiculous: %zd bytes\n",
469 		fw->fw_rawsize);
470 		error = EINVAL;
471 		goto out;
472 	}
473 
474 	/* Read the firmware. */
475 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
476 	if (fw->fw_rawdata == NULL) {
477 		aprint_error_dev(sc->sc_dev,
478 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
479 		error = ENOMEM;
480 		goto out;
481 	}
482 	error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
483 	if (error) {
484 		aprint_error_dev(sc->sc_dev,
485 		    "could not read firmware %s\n", sc->sc_fwname);
486 		goto out;
487 	}
488 
489  out:
490 	/* caller will release memory, if necessary */
491 
492 	firmware_close(fwh);
493 	return error;
494 }
495 
496 /*
497  * just maintaining status quo.
498  */
499 static void
500 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
501 {
502 	struct ieee80211_frame *wh;
503 	uint8_t subtype;
504 	uint8_t *frm, *efrm;
505 
506 	wh = mtod(m, struct ieee80211_frame *);
507 
508 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
509 		return;
510 
511 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
512 
513 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
514 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
515 		return;
516 
517 	frm = (uint8_t *)(wh + 1);
518 	efrm = mtod(m, uint8_t *) + m->m_len;
519 
520 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
521 	while (frm < efrm) {
522 		if (*frm == IEEE80211_ELEMID_DSPARMS) {
523 #if IEEE80211_CHAN_MAX < 255
524 			if (frm[2] <= IEEE80211_CHAN_MAX)
525 #endif
526 				ic->ic_curchan = &ic->ic_channels[frm[2]];
527 		}
528 		frm += frm[1] + 2;
529 	}
530 }
531 
532 /*
533  * Firmware parser.
534  */
535 
536 static int
537 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
538 {
539 	struct iwm_fw_cscheme_list *l = (void *)data;
540 
541 	if (dlen < sizeof(*l) ||
542 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
543 		return EINVAL;
544 
545 	/* we don't actually store anything for now, always use s/w crypto */
546 
547 	return 0;
548 }
549 
550 static int
551 iwm_firmware_store_section(struct iwm_softc *sc,
552 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
553 {
554 	struct iwm_fw_sects *fws;
555 	struct iwm_fw_onesect *fwone;
556 
557 	if (type >= IWM_UCODE_TYPE_MAX)
558 		return EINVAL;
559 	if (dlen < sizeof(uint32_t))
560 		return EINVAL;
561 
562 	fws = &sc->sc_fw.fw_sects[type];
563 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
564 		return EINVAL;
565 
566 	fwone = &fws->fw_sect[fws->fw_count];
567 
568 	/* first 32bit are device load offset */
569 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
570 
571 	/* rest is data */
572 	fwone->fws_data = data + sizeof(uint32_t);
573 	fwone->fws_len = dlen - sizeof(uint32_t);
574 
575 	/* for freeing the buffer during driver unload */
576 	fwone->fws_alloc = data;
577 	fwone->fws_allocsize = dlen;
578 
579 	fws->fw_count++;
580 	fws->fw_totlen += fwone->fws_len;
581 
582 	return 0;
583 }
584 
585 /* iwlwifi: iwl-drv.c */
586 struct iwm_tlv_calib_data {
587 	uint32_t ucode_type;
588 	struct iwm_tlv_calib_ctrl calib;
589 } __packed;
590 
591 static int
592 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
593 {
594 	const struct iwm_tlv_calib_data *def_calib = data;
595 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
596 
597 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
598 		DPRINTF(("%s: Wrong ucode_type %u for default "
599 		    "calibration.\n", DEVNAME(sc), ucode_type));
600 		return EINVAL;
601 	}
602 
603 	sc->sc_default_calib[ucode_type].flow_trigger =
604 	    def_calib->calib.flow_trigger;
605 	sc->sc_default_calib[ucode_type].event_trigger =
606 	    def_calib->calib.event_trigger;
607 
608 	return 0;
609 }
610 
611 static int
612 iwm_read_firmware(struct iwm_softc *sc)
613 {
614 	struct iwm_fw_info *fw = &sc->sc_fw;
615 	struct iwm_tlv_ucode_header *uhdr;
616 	struct iwm_ucode_tlv tlv;
617 	enum iwm_ucode_tlv_type tlv_type;
618 	uint8_t *data;
619 	int error, status;
620 	size_t len;
621 
622 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
623 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
624 	} else {
625 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
626 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
627 	}
628 	status = fw->fw_status;
629 
630 	if (status == IWM_FW_STATUS_DONE)
631 		return 0;
632 
633 	/*
634 	 * Load firmware into driver memory.
635 	 * fw_rawdata and fw_rawsize will be set.
636 	 */
637 	error = iwm_firmload(sc);
638 	if (error != 0) {
639 		aprint_error_dev(sc->sc_dev,
640 		    "could not read firmware %s (error %d)\n",
641 		    sc->sc_fwname, error);
642 		goto out;
643 	}
644 
645 	/*
646 	 * Parse firmware contents
647 	 */
648 
649 	uhdr = (void *)fw->fw_rawdata;
650 	if (*(uint32_t *)fw->fw_rawdata != 0
651 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
652 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
653 		    sc->sc_fwname);
654 		error = EINVAL;
655 		goto out;
656 	}
657 
658 	sc->sc_fwver = le32toh(uhdr->ver);
659 	data = uhdr->data;
660 	len = fw->fw_rawsize - sizeof(*uhdr);
661 
662 	while (len >= sizeof(tlv)) {
663 		size_t tlv_len;
664 		void *tlv_data;
665 
666 		memcpy(&tlv, data, sizeof(tlv));
667 		tlv_len = le32toh(tlv.length);
668 		tlv_type = le32toh(tlv.type);
669 
670 		len -= sizeof(tlv);
671 		data += sizeof(tlv);
672 		tlv_data = data;
673 
674 		if (len < tlv_len) {
675 			aprint_error_dev(sc->sc_dev,
676 			    "firmware too short: %zu bytes\n", len);
677 			error = EINVAL;
678 			goto parse_out;
679 		}
680 
681 		switch ((int)tlv_type) {
682 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
683 			if (tlv_len < sizeof(uint32_t)) {
684 				error = EINVAL;
685 				goto parse_out;
686 			}
687 			sc->sc_capa_max_probe_len
688 			    = le32toh(*(uint32_t *)tlv_data);
689 			/* limit it to something sensible */
690 			if (sc->sc_capa_max_probe_len > (1<<16)) {
691 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
692 				    "ridiculous\n", DEVNAME(sc)));
693 				error = EINVAL;
694 				goto parse_out;
695 			}
696 			break;
697 		case IWM_UCODE_TLV_PAN:
698 			if (tlv_len) {
699 				error = EINVAL;
700 				goto parse_out;
701 			}
702 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
703 			break;
704 		case IWM_UCODE_TLV_FLAGS:
705 			if (tlv_len < sizeof(uint32_t)) {
706 				error = EINVAL;
707 				goto parse_out;
708 			}
709 			/*
710 			 * Apparently there can be many flags, but Linux driver
711 			 * parses only the first one, and so do we.
712 			 *
713 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
714 			 * Intentional or a bug?  Observations from
715 			 * current firmware file:
716 			 *  1) TLV_PAN is parsed first
717 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
718 			 * ==> this resets TLV_PAN to itself... hnnnk
719 			 */
720 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
721 			break;
722 		case IWM_UCODE_TLV_CSCHEME:
723 			if ((error = iwm_store_cscheme(sc,
724 			    tlv_data, tlv_len)) != 0)
725 				goto parse_out;
726 			break;
727 		case IWM_UCODE_TLV_NUM_OF_CPU:
728 			if (tlv_len != sizeof(uint32_t)) {
729 				error = EINVAL;
730 				goto parse_out;
731 			}
732 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
733 				DPRINTF(("%s: driver supports "
734 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
735 				error = EINVAL;
736 				goto parse_out;
737 			}
738 			break;
739 		case IWM_UCODE_TLV_SEC_RT:
740 			if ((error = iwm_firmware_store_section(sc,
741 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
742 				goto parse_out;
743 			break;
744 		case IWM_UCODE_TLV_SEC_INIT:
745 			if ((error = iwm_firmware_store_section(sc,
746 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
747 				goto parse_out;
748 			break;
749 		case IWM_UCODE_TLV_SEC_WOWLAN:
750 			if ((error = iwm_firmware_store_section(sc,
751 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
752 				goto parse_out;
753 			break;
754 		case IWM_UCODE_TLV_DEF_CALIB:
755 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
756 				error = EINVAL;
757 				goto parse_out;
758 			}
759 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
760 				goto parse_out;
761 			break;
762 		case IWM_UCODE_TLV_PHY_SKU:
763 			if (tlv_len != sizeof(uint32_t)) {
764 				error = EINVAL;
765 				goto parse_out;
766 			}
767 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
768 			break;
769 
770 		case IWM_UCODE_TLV_API_CHANGES_SET:
771 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
772 			/* ignore, not used by current driver */
773 			break;
774 
775 		default:
776 			DPRINTF(("%s: unknown firmware section %d, abort\n",
777 			    DEVNAME(sc), tlv_type));
778 			error = EINVAL;
779 			goto parse_out;
780 		}
781 
782 		len -= roundup(tlv_len, 4);
783 		data += roundup(tlv_len, 4);
784 	}
785 
786 	KASSERT(error == 0);
787 
788  parse_out:
789 	if (error) {
790 		aprint_error_dev(sc->sc_dev,
791 		    "firmware parse error, section type %d\n", tlv_type);
792 	}
793 
794 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
795 		aprint_error_dev(sc->sc_dev,
796 		    "device uses unsupported power ops\n");
797 		error = ENOTSUP;
798 	}
799 
800  out:
801 	if (error)
802 		fw->fw_status = IWM_FW_STATUS_NONE;
803 	else
804 		fw->fw_status = IWM_FW_STATUS_DONE;
805 	wakeup(&sc->sc_fw);
806 
807 	if (error && fw->fw_rawdata != NULL) {
808 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
809 		fw->fw_rawdata = NULL;
810 	}
811 	return error;
812 }
813 
814 /*
815  * basic device access
816  */
817 
818 static uint32_t
819 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
820 {
821 	IWM_WRITE(sc,
822 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
823 	IWM_BARRIER_READ_WRITE(sc);
824 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
825 }
826 
827 static void
828 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
829 {
830 	IWM_WRITE(sc,
831 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
832 	IWM_BARRIER_WRITE(sc);
833 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
834 }
835 
836 #ifdef IWM_DEBUG
837 /* iwlwifi: pcie/trans.c */
838 static int
839 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
840 {
841 	int offs, ret = 0;
842 	uint32_t *vals = buf;
843 
844 	if (iwm_nic_lock(sc)) {
845 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
846 		for (offs = 0; offs < dwords; offs++)
847 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
848 		iwm_nic_unlock(sc);
849 	} else {
850 		ret = EBUSY;
851 	}
852 	return ret;
853 }
854 #endif
855 
856 /* iwlwifi: pcie/trans.c */
857 static int
858 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
859 {
860 	int offs;
861 	const uint32_t *vals = buf;
862 
863 	if (iwm_nic_lock(sc)) {
864 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
865 		/* WADDR auto-increments */
866 		for (offs = 0; offs < dwords; offs++) {
867 			uint32_t val = vals ? vals[offs] : 0;
868 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
869 		}
870 		iwm_nic_unlock(sc);
871 	} else {
872 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
873 		return EBUSY;
874 	}
875 	return 0;
876 }
877 
878 static int
879 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
880 {
881 	return iwm_write_mem(sc, addr, &val, 1);
882 }
883 
884 static int
885 iwm_poll_bit(struct iwm_softc *sc, int reg,
886 	uint32_t bits, uint32_t mask, int timo)
887 {
888 	for (;;) {
889 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
890 			return 1;
891 		}
892 		if (timo < 10) {
893 			return 0;
894 		}
895 		timo -= 10;
896 		DELAY(10);
897 	}
898 }
899 
900 static int
901 iwm_nic_lock(struct iwm_softc *sc)
902 {
903 	int rv = 0;
904 
905 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
906 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
907 
908 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
909 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
910 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
911 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
912 	    	rv = 1;
913 	} else {
914 		/* jolt */
915 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
916 	}
917 
918 	return rv;
919 }
920 
921 static void
922 iwm_nic_unlock(struct iwm_softc *sc)
923 {
924 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
925 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
926 }
927 
928 static void
929 iwm_set_bits_mask_prph(struct iwm_softc *sc,
930 	uint32_t reg, uint32_t bits, uint32_t mask)
931 {
932 	uint32_t val;
933 
934 	/* XXX: no error path? */
935 	if (iwm_nic_lock(sc)) {
936 		val = iwm_read_prph(sc, reg) & mask;
937 		val |= bits;
938 		iwm_write_prph(sc, reg, val);
939 		iwm_nic_unlock(sc);
940 	}
941 }
942 
943 static void
944 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
945 {
946 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
947 }
948 
949 static void
950 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
951 {
952 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
953 }
954 
955 /*
956  * DMA resource routines
957  */
958 
959 static int
960 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
961     bus_size_t size, bus_size_t alignment)
962 {
963 	int nsegs, error;
964 	void *va;
965 
966 	dma->tag = tag;
967 	dma->size = size;
968 
969 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
970 	    &dma->map);
971 	if (error != 0)
972 		goto fail;
973 
974 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
975 	    BUS_DMA_NOWAIT);
976 	if (error != 0)
977 		goto fail;
978 
979 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
980 	    BUS_DMA_NOWAIT);
981 	if (error != 0)
982 		goto fail;
983 	dma->vaddr = va;
984 
985 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
986 	    BUS_DMA_NOWAIT);
987 	if (error != 0)
988 		goto fail;
989 
990 	memset(dma->vaddr, 0, size);
991 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
992 	dma->paddr = dma->map->dm_segs[0].ds_addr;
993 
994 	return 0;
995 
996 fail:	iwm_dma_contig_free(dma);
997 	return error;
998 }
999 
1000 static void
1001 iwm_dma_contig_free(struct iwm_dma_info *dma)
1002 {
1003 	if (dma->map != NULL) {
1004 		if (dma->vaddr != NULL) {
1005 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1006 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1007 			bus_dmamap_unload(dma->tag, dma->map);
1008 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1009 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1010 			dma->vaddr = NULL;
1011 		}
1012 		bus_dmamap_destroy(dma->tag, dma->map);
1013 		dma->map = NULL;
1014 	}
1015 }
1016 
1017 /* fwmem is used to load firmware onto the card */
1018 static int
1019 iwm_alloc_fwmem(struct iwm_softc *sc)
1020 {
1021 	/* Must be aligned on a 16-byte boundary. */
1022 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1023 	    sc->sc_fwdmasegsz, 16);
1024 }
1025 
1026 static void
1027 iwm_free_fwmem(struct iwm_softc *sc)
1028 {
1029 	iwm_dma_contig_free(&sc->fw_dma);
1030 }
1031 
1032 /* tx scheduler rings.  not used? */
1033 static int
1034 iwm_alloc_sched(struct iwm_softc *sc)
1035 {
1036 	int rv;
1037 
1038 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1039 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1040 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1041 	return rv;
1042 }
1043 
1044 static void
1045 iwm_free_sched(struct iwm_softc *sc)
1046 {
1047 	iwm_dma_contig_free(&sc->sched_dma);
1048 }
1049 
1050 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1051 static int
1052 iwm_alloc_kw(struct iwm_softc *sc)
1053 {
1054 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1055 }
1056 
1057 static void
1058 iwm_free_kw(struct iwm_softc *sc)
1059 {
1060 	iwm_dma_contig_free(&sc->kw_dma);
1061 }
1062 
1063 /* interrupt cause table */
1064 static int
1065 iwm_alloc_ict(struct iwm_softc *sc)
1066 {
1067 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1068 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1069 }
1070 
1071 static void
1072 iwm_free_ict(struct iwm_softc *sc)
1073 {
1074 	iwm_dma_contig_free(&sc->ict_dma);
1075 }
1076 
1077 static int
1078 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1079 {
1080 	bus_size_t size;
1081 	int i, error;
1082 
1083 	ring->cur = 0;
1084 
1085 	/* Allocate RX descriptors (256-byte aligned). */
1086 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1087 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1088 	if (error != 0) {
1089 		aprint_error_dev(sc->sc_dev,
1090 		    "could not allocate RX ring DMA memory\n");
1091 		goto fail;
1092 	}
1093 	ring->desc = ring->desc_dma.vaddr;
1094 
1095 	/* Allocate RX status area (16-byte aligned). */
1096 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1097 	    sizeof(*ring->stat), 16);
1098 	if (error != 0) {
1099 		aprint_error_dev(sc->sc_dev,
1100 		    "could not allocate RX status DMA memory\n");
1101 		goto fail;
1102 	}
1103 	ring->stat = ring->stat_dma.vaddr;
1104 
1105 	/*
1106 	 * Allocate and map RX buffers.
1107 	 */
1108 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1109 		struct iwm_rx_data *data = &ring->data[i];
1110 
1111 		memset(data, 0, sizeof(*data));
1112 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1113 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1114 		    &data->map);
1115 		if (error != 0) {
1116 			aprint_error_dev(sc->sc_dev,
1117 			    "could not create RX buf DMA map\n");
1118 			goto fail;
1119 		}
1120 
1121 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1122 			goto fail;
1123 		}
1124 	}
1125 	return 0;
1126 
1127 fail:	iwm_free_rx_ring(sc, ring);
1128 	return error;
1129 }
1130 
1131 static void
1132 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1133 {
1134 	int ntries;
1135 
1136 	if (iwm_nic_lock(sc)) {
1137 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1138 		for (ntries = 0; ntries < 1000; ntries++) {
1139 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1140 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1141 				break;
1142 			DELAY(10);
1143 		}
1144 		iwm_nic_unlock(sc);
1145 	}
1146 	ring->cur = 0;
1147 }
1148 
1149 static void
1150 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1151 {
1152 	int i;
1153 
1154 	iwm_dma_contig_free(&ring->desc_dma);
1155 	iwm_dma_contig_free(&ring->stat_dma);
1156 
1157 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1158 		struct iwm_rx_data *data = &ring->data[i];
1159 
1160 		if (data->m != NULL) {
1161 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1162 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1163 			bus_dmamap_unload(sc->sc_dmat, data->map);
1164 			m_freem(data->m);
1165 		}
1166 		if (data->map != NULL)
1167 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1168 	}
1169 }
1170 
1171 static int
1172 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1173 {
1174 	bus_addr_t paddr;
1175 	bus_size_t size;
1176 	int i, error;
1177 
1178 	ring->qid = qid;
1179 	ring->queued = 0;
1180 	ring->cur = 0;
1181 
1182 	/* Allocate TX descriptors (256-byte aligned). */
1183 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1184 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1185 	if (error != 0) {
1186 		aprint_error_dev(sc->sc_dev,
1187 		    "could not allocate TX ring DMA memory\n");
1188 		goto fail;
1189 	}
1190 	ring->desc = ring->desc_dma.vaddr;
1191 
1192 	/*
1193 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1194 	 * to allocate commands space for other rings.
1195 	 */
1196 	if (qid > IWM_MVM_CMD_QUEUE)
1197 		return 0;
1198 
1199 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1200 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1201 	if (error != 0) {
1202 		aprint_error_dev(sc->sc_dev,
1203 		    "could not allocate TX cmd DMA memory\n");
1204 		goto fail;
1205 	}
1206 	ring->cmd = ring->cmd_dma.vaddr;
1207 
1208 	paddr = ring->cmd_dma.paddr;
1209 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1210 		struct iwm_tx_data *data = &ring->data[i];
1211 
1212 		data->cmd_paddr = paddr;
1213 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1214 		    + offsetof(struct iwm_tx_cmd, scratch);
1215 		paddr += sizeof(struct iwm_device_cmd);
1216 
1217 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
1218 		    IWM_NUM_OF_TBS, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
1219 		    &data->map);
1220 		if (error != 0) {
1221 			aprint_error_dev(sc->sc_dev,
1222 			    "could not create TX buf DMA map\n");
1223 			goto fail;
1224 		}
1225 	}
1226 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1227 	return 0;
1228 
1229 fail:	iwm_free_tx_ring(sc, ring);
1230 	return error;
1231 }
1232 
1233 static void
1234 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1235 {
1236 	int i;
1237 
1238 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1239 		struct iwm_tx_data *data = &ring->data[i];
1240 
1241 		if (data->m != NULL) {
1242 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1243 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1244 			bus_dmamap_unload(sc->sc_dmat, data->map);
1245 			m_freem(data->m);
1246 			data->m = NULL;
1247 		}
1248 	}
1249 	/* Clear TX descriptors. */
1250 	memset(ring->desc, 0, ring->desc_dma.size);
1251 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1252 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1253 	sc->qfullmsk &= ~(1 << ring->qid);
1254 	ring->queued = 0;
1255 	ring->cur = 0;
1256 }
1257 
1258 static void
1259 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1260 {
1261 	int i;
1262 
1263 	iwm_dma_contig_free(&ring->desc_dma);
1264 	iwm_dma_contig_free(&ring->cmd_dma);
1265 
1266 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1267 		struct iwm_tx_data *data = &ring->data[i];
1268 
1269 		if (data->m != NULL) {
1270 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1271 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1272 			bus_dmamap_unload(sc->sc_dmat, data->map);
1273 			m_freem(data->m);
1274 		}
1275 		if (data->map != NULL)
1276 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1277 	}
1278 }
1279 
1280 /*
1281  * High-level hardware frobbing routines
1282  */
1283 
1284 static void
1285 iwm_enable_rfkill_int(struct iwm_softc *sc)
1286 {
1287 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1288 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1289 }
1290 
1291 static int
1292 iwm_check_rfkill(struct iwm_softc *sc)
1293 {
1294 	uint32_t v;
1295 	int s;
1296 	int rv;
1297 
1298 	s = splnet();
1299 
1300 	/*
1301 	 * "documentation" is not really helpful here:
1302 	 *  27:	HW_RF_KILL_SW
1303 	 *	Indicates state of (platform's) hardware RF-Kill switch
1304 	 *
1305 	 * But apparently when it's off, it's on ...
1306 	 */
1307 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1308 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1309 	if (rv) {
1310 		sc->sc_flags |= IWM_FLAG_RFKILL;
1311 	} else {
1312 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1313 	}
1314 
1315 	splx(s);
1316 	return rv;
1317 }
1318 
1319 static void
1320 iwm_enable_interrupts(struct iwm_softc *sc)
1321 {
1322 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1323 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1324 }
1325 
1326 static void
1327 iwm_restore_interrupts(struct iwm_softc *sc)
1328 {
1329 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1330 }
1331 
1332 static void
1333 iwm_disable_interrupts(struct iwm_softc *sc)
1334 {
1335 	int s = splnet();
1336 
1337 	/* disable interrupts */
1338 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1339 
1340 	/* acknowledge all interrupts */
1341 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1342 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1343 
1344 	splx(s);
1345 }
1346 
1347 static void
1348 iwm_ict_reset(struct iwm_softc *sc)
1349 {
1350 	iwm_disable_interrupts(sc);
1351 
1352 	/* Reset ICT table. */
1353 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1354 	sc->ict_cur = 0;
1355 
1356 	/* Set physical address of ICT table (4KB aligned). */
1357 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1358 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1359 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1360 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1361 
1362 	/* Switch to ICT interrupt mode in driver. */
1363 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1364 
1365 	/* Re-enable interrupts. */
1366 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1367 	iwm_enable_interrupts(sc);
1368 }
1369 
1370 #define IWM_HW_READY_TIMEOUT 50
1371 static int
1372 iwm_set_hw_ready(struct iwm_softc *sc)
1373 {
1374 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1375 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1376 
1377 	return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1378 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1379 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1380 	    IWM_HW_READY_TIMEOUT);
1381 }
1382 #undef IWM_HW_READY_TIMEOUT
1383 
1384 static int
1385 iwm_prepare_card_hw(struct iwm_softc *sc)
1386 {
1387 	int rv = 0;
1388 	int t = 0;
1389 
1390 	if (iwm_set_hw_ready(sc))
1391 		goto out;
1392 
1393 	/* If HW is not ready, prepare the conditions to check again */
1394 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1395 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1396 
1397 	do {
1398 		if (iwm_set_hw_ready(sc))
1399 			goto out;
1400 		DELAY(200);
1401 		t += 200;
1402 	} while (t < 150000);
1403 
1404 	rv = ETIMEDOUT;
1405 
1406  out:
1407 	return rv;
1408 }
1409 
1410 static void
1411 iwm_apm_config(struct iwm_softc *sc)
1412 {
1413 	pcireg_t reg;
1414 
1415 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1416 	    sc->sc_cap_off + PCIE_LCSR);
1417 	if (reg & PCIE_LCSR_ASPM_L1) {
1418 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1419 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1420 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1421 	} else {
1422 		/* ... and "Enabling" here */
1423 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1424 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1425 	}
1426 }
1427 
1428 /*
1429  * Start up NIC's basic functionality after it has been reset
1430  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
1431  * NOTE:  This does not load uCode nor start the embedded processor
1432  */
1433 static int
1434 iwm_apm_init(struct iwm_softc *sc)
1435 {
1436 	int error = 0;
1437 
1438 	DPRINTF(("iwm apm start\n"));
1439 
1440 	/* Disable L0S exit timer (platform NMI Work/Around) */
1441 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1442 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1443 
1444 	/*
1445 	 * Disable L0s without affecting L1;
1446 	 *  don't wait for ICH L0s (ICH bug W/A)
1447 	 */
1448 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1449 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1450 
1451 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1452 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1453 
1454 	/*
1455 	 * Enable HAP INTA (interrupt from management bus) to
1456 	 * wake device's PCI Express link L1a -> L0s
1457 	 */
1458 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1459 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1460 
1461 	iwm_apm_config(sc);
1462 
1463 #if 0 /* not for 7k */
1464 	/* Configure analog phase-lock-loop before activating to D0A */
1465 	if (trans->cfg->base_params->pll_cfg_val)
1466 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1467 		    trans->cfg->base_params->pll_cfg_val);
1468 #endif
1469 
1470 	/*
1471 	 * Set "initialization complete" bit to move adapter from
1472 	 * D0U* --> D0A* (powered-up active) state.
1473 	 */
1474 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1475 
1476 	/*
1477 	 * Wait for clock stabilization; once stabilized, access to
1478 	 * device-internal resources is supported, e.g. iwm_write_prph()
1479 	 * and accesses to uCode SRAM.
1480 	 */
1481 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1482 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1483 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1484 		aprint_error_dev(sc->sc_dev,
1485 		    "timeout waiting for clock stabilization\n");
1486 		goto out;
1487 	}
1488 
1489 	if (sc->host_interrupt_operation_mode) {
1490 		/*
1491 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1492 		 * only check host_interrupt_operation_mode even if this is
1493 		 * not related to host_interrupt_operation_mode.
1494 		 *
1495 		 * Enable the oscillator to count wake up time for L1 exit. This
1496 		 * consumes slightly more power (100uA) - but allows to be sure
1497 		 * that we wake up from L1 on time.
1498 		 *
1499 		 * This looks weird: read twice the same register, discard the
1500 		 * value, set a bit, and yet again, read that same register
1501 		 * just to discard the value. But that's the way the hardware
1502 		 * seems to like it.
1503 		 */
1504 		iwm_read_prph(sc, IWM_OSC_CLK);
1505 		iwm_read_prph(sc, IWM_OSC_CLK);
1506 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1507 		iwm_read_prph(sc, IWM_OSC_CLK);
1508 		iwm_read_prph(sc, IWM_OSC_CLK);
1509 	}
1510 
1511 	/*
1512 	 * Enable DMA clock and wait for it to stabilize.
1513 	 *
1514 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1515 	 * do not disable clocks.  This preserves any hardware bits already
1516 	 * set by default in "CLK_CTRL_REG" after reset.
1517 	 */
1518 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1519 	//kpause("iwmapm", 0, mstohz(20), NULL);
1520 	DELAY(20);
1521 
1522 	/* Disable L1-Active */
1523 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1524 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1525 
1526 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
1527 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1528 	    IWM_APMG_RTC_INT_STT_RFKILL);
1529 
1530  out:
1531 	if (error)
1532 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
1533 	return error;
1534 }
1535 
1536 /* iwlwifi/pcie/trans.c */
1537 static void
1538 iwm_apm_stop(struct iwm_softc *sc)
1539 {
1540 	/* stop device's busmaster DMA activity */
1541 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1542 
1543 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1544 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1545 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1546 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1547 	DPRINTF(("iwm apm stop\n"));
1548 }
1549 
1550 /* iwlwifi pcie/trans.c */
1551 static int
1552 iwm_start_hw(struct iwm_softc *sc)
1553 {
1554 	int error;
1555 
1556 	if ((error = iwm_prepare_card_hw(sc)) != 0)
1557 		return error;
1558 
1559 	/* Reset the entire device */
1560 	IWM_WRITE(sc, IWM_CSR_RESET,
1561 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
1562 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1563 	DELAY(10);
1564 
1565 	if ((error = iwm_apm_init(sc)) != 0)
1566 		return error;
1567 
1568 	iwm_enable_rfkill_int(sc);
1569 	iwm_check_rfkill(sc);
1570 
1571 	return 0;
1572 }
1573 
1574 /* iwlwifi pcie/trans.c */
1575 
1576 static void
1577 iwm_stop_device(struct iwm_softc *sc)
1578 {
1579 	int chnl, ntries;
1580 	int qid;
1581 
1582 	/* tell the device to stop sending interrupts */
1583 	iwm_disable_interrupts(sc);
1584 
1585 	/* device going down, Stop using ICT table */
1586 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1587 
1588 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1589 
1590 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1591 
1592 	/* Stop all DMA channels. */
1593 	if (iwm_nic_lock(sc)) {
1594 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1595 			IWM_WRITE(sc,
1596 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1597 			for (ntries = 0; ntries < 200; ntries++) {
1598 				uint32_t r;
1599 
1600 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1601 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1602 				    chnl))
1603 					break;
1604 				DELAY(20);
1605 			}
1606 		}
1607 		iwm_nic_unlock(sc);
1608 	}
1609 
1610 	/* Stop RX ring. */
1611 	iwm_reset_rx_ring(sc, &sc->rxq);
1612 
1613 	/* Reset all TX rings. */
1614 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1615 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1616 
1617 	/*
1618 	 * Power-down device's busmaster DMA clocks
1619 	 */
1620 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1621 	DELAY(5);
1622 
1623 	/* Make sure (redundant) we've released our request to stay awake */
1624 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1625 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1626 
1627 	/* Stop the device, and put it in low power state */
1628 	iwm_apm_stop(sc);
1629 
1630 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1631 	 * Clean again the interrupt here
1632 	 */
1633 	iwm_disable_interrupts(sc);
1634 	/* stop and reset the on-board processor */
1635 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1636 
1637 	/*
1638 	 * Even if we stop the HW, we still want the RF kill
1639 	 * interrupt
1640 	 */
1641 	iwm_enable_rfkill_int(sc);
1642 	iwm_check_rfkill(sc);
1643 }
1644 
1645 /* iwlwifi pcie/trans.c (always main power) */
1646 static void
1647 iwm_set_pwr(struct iwm_softc *sc)
1648 {
1649 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1650 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1651 }
1652 
1653 /* iwlwifi: mvm/ops.c */
1654 static void
1655 iwm_mvm_nic_config(struct iwm_softc *sc)
1656 {
1657 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1658 	uint32_t reg_val = 0;
1659 
1660 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1661 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1662 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1663 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1664 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1665 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1666 
1667 	/* SKU control */
1668 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1669 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1670 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1671 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1672 
1673 	/* radio configuration */
1674 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1675 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1676 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1677 
1678 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1679 
1680 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1681 	    radio_cfg_step, radio_cfg_dash));
1682 
1683 	/*
1684 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1685 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1686 	 * to lose ownership and not being able to obtain it back.
1687 	 */
1688 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1689 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1690 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1691 }
1692 
1693 static int
1694 iwm_nic_rx_init(struct iwm_softc *sc)
1695 {
1696 	if (!iwm_nic_lock(sc))
1697 		return EBUSY;
1698 
1699 	/*
1700 	 * Initialize RX ring.  This is from the iwn driver.
1701 	 */
1702 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1703 
1704 	/* stop DMA */
1705 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1706 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1707 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1708 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1709 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1710 
1711 	/* Set physical address of RX ring (256-byte aligned). */
1712 	IWM_WRITE(sc,
1713 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1714 
1715 	/* Set physical address of RX status (16-byte aligned). */
1716 	IWM_WRITE(sc,
1717 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1718 
1719 	/* Enable RX. */
1720 	/*
1721 	 * Note: Linux driver also sets this:
1722 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1723 	 *
1724 	 * It causes weird behavior.  YMMV.
1725 	 */
1726 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1727 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1728 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1729 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1730 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1731 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1732 
1733 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1734 
1735 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1736 	if (sc->host_interrupt_operation_mode)
1737 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1738 
1739 	/*
1740 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1741 	 *
1742 	 * This value should initially be 0 (before preparing any
1743  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1744 	 */
1745 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1746 
1747 	iwm_nic_unlock(sc);
1748 
1749 	return 0;
1750 }
1751 
1752 static int
1753 iwm_nic_tx_init(struct iwm_softc *sc)
1754 {
1755 	int qid;
1756 
1757 	if (!iwm_nic_lock(sc))
1758 		return EBUSY;
1759 
1760 	/* Deactivate TX scheduler. */
1761 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1762 
1763 	/* Set physical address of "keep warm" page (16-byte aligned). */
1764 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1765 
1766 	/* Initialize TX rings. */
1767 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1768 		struct iwm_tx_ring *txq = &sc->txq[qid];
1769 
1770 		/* Set physical address of TX ring (256-byte aligned). */
1771 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1772 		    txq->desc_dma.paddr >> 8);
1773 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1774 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1775 	}
1776 	iwm_nic_unlock(sc);
1777 
1778 	return 0;
1779 }
1780 
1781 static int
1782 iwm_nic_init(struct iwm_softc *sc)
1783 {
1784 	int error;
1785 
1786 	iwm_apm_init(sc);
1787 	iwm_set_pwr(sc);
1788 
1789 	iwm_mvm_nic_config(sc);
1790 
1791 	if ((error = iwm_nic_rx_init(sc)) != 0)
1792 		return error;
1793 
1794 	/*
1795 	 * Ditto for TX, from iwn
1796 	 */
1797 	if ((error = iwm_nic_tx_init(sc)) != 0)
1798 		return error;
1799 
1800 	DPRINTF(("shadow registers enabled\n"));
1801 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1802 
1803 	return 0;
1804 }
1805 
1806 #if 0
1807 enum iwm_mvm_tx_fifo {
1808 	IWM_MVM_TX_FIFO_BK = 0,
1809 	IWM_MVM_TX_FIFO_BE,
1810 	IWM_MVM_TX_FIFO_VI,
1811 	IWM_MVM_TX_FIFO_VO,
1812 	IWM_MVM_TX_FIFO_MCAST = 5,
1813 };
1814 
1815 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1816 	IWM_MVM_TX_FIFO_VO,
1817 	IWM_MVM_TX_FIFO_VI,
1818 	IWM_MVM_TX_FIFO_BE,
1819 	IWM_MVM_TX_FIFO_BK,
1820 };
1821 #endif
1822 
1823 static void
1824 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1825 {
1826 	if (!iwm_nic_lock(sc)) {
1827 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1828 		return; /* XXX return EBUSY */
1829 	}
1830 
1831 	/* unactivate before configuration */
1832 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1833 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1834 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1835 
1836 	if (qid != IWM_MVM_CMD_QUEUE) {
1837 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1838 	}
1839 
1840 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1841 
1842 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1843 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1844 
1845 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1846 	/* Set scheduler window size and frame limit. */
1847 	iwm_write_mem32(sc,
1848 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1849 	    sizeof(uint32_t),
1850 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1851 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1852 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1853 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1854 
1855 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1856 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1857 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1858 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1859 	    IWM_SCD_QUEUE_STTS_REG_MSK);
1860 
1861 	iwm_nic_unlock(sc);
1862 
1863 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1864 }
1865 
1866 static int
1867 iwm_post_alive(struct iwm_softc *sc)
1868 {
1869 	int nwords;
1870 	int error, chnl;
1871 
1872 	if (!iwm_nic_lock(sc))
1873 		return EBUSY;
1874 
1875 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1876 		DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
1877 		error = EINVAL;
1878 		goto out;
1879 	}
1880 
1881 	iwm_ict_reset(sc);
1882 
1883 	/* Clear TX scheduler state in SRAM. */
1884 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1885 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1886 	    / sizeof(uint32_t);
1887 	error = iwm_write_mem(sc,
1888 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1889 	    NULL, nwords);
1890 	if (error)
1891 		goto out;
1892 
1893 	/* Set physical address of TX scheduler rings (1KB aligned). */
1894 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1895 
1896 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1897 
1898 	/* enable command channel */
1899 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1900 
1901 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1902 
1903 	/* Enable DMA channels. */
1904 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1905 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1906 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1907 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1908 	}
1909 
1910 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1911 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1912 
1913 	/* Enable L1-Active */
1914 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1915 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1916 
1917  out:
1918  	iwm_nic_unlock(sc);
1919 	return error;
1920 }
1921 
1922 /*
1923  * PHY db
1924  * iwlwifi/iwl-phy-db.c
1925  */
1926 
1927 /*
1928  * BEGIN iwl-phy-db.c
1929  */
1930 
1931 enum iwm_phy_db_section_type {
1932 	IWM_PHY_DB_CFG = 1,
1933 	IWM_PHY_DB_CALIB_NCH,
1934 	IWM_PHY_DB_UNUSED,
1935 	IWM_PHY_DB_CALIB_CHG_PAPD,
1936 	IWM_PHY_DB_CALIB_CHG_TXP,
1937 	IWM_PHY_DB_MAX
1938 };
1939 
1940 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
1941 
1942 /*
1943  * phy db - configure operational ucode
1944  */
1945 struct iwm_phy_db_cmd {
1946 	uint16_t type;
1947 	uint16_t length;
1948 	uint8_t data[];
1949 } __packed;
1950 
1951 /* for parsing of tx power channel group data that comes from the firmware*/
1952 struct iwm_phy_db_chg_txp {
1953 	uint32_t space;
1954 	uint16_t max_channel_idx;
1955 } __packed;
1956 
1957 /*
1958  * phy db - Receive phy db chunk after calibrations
1959  */
1960 struct iwm_calib_res_notif_phy_db {
1961 	uint16_t type;
1962 	uint16_t length;
1963 	uint8_t data[];
1964 } __packed;
1965 
1966 /*
1967  * get phy db section: returns a pointer to a phy db section specified by
1968  * type and channel group id.
1969  */
1970 static struct iwm_phy_db_entry *
1971 iwm_phy_db_get_section(struct iwm_softc *sc,
1972 	enum iwm_phy_db_section_type type, uint16_t chg_id)
1973 {
1974 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1975 
1976 	if (type >= IWM_PHY_DB_MAX)
1977 		return NULL;
1978 
1979 	switch (type) {
1980 	case IWM_PHY_DB_CFG:
1981 		return &phy_db->cfg;
1982 	case IWM_PHY_DB_CALIB_NCH:
1983 		return &phy_db->calib_nch;
1984 	case IWM_PHY_DB_CALIB_CHG_PAPD:
1985 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1986 			return NULL;
1987 		return &phy_db->calib_ch_group_papd[chg_id];
1988 	case IWM_PHY_DB_CALIB_CHG_TXP:
1989 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1990 			return NULL;
1991 		return &phy_db->calib_ch_group_txp[chg_id];
1992 	default:
1993 		return NULL;
1994 	}
1995 	return NULL;
1996 }
1997 
1998 static int
1999 iwm_phy_db_set_section(struct iwm_softc *sc,
2000     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2001 {
2002 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2003 	struct iwm_phy_db_entry *entry;
2004 	uint16_t chg_id = 0;
2005 
2006 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2007 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2008 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2009 
2010 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2011 	if (!entry)
2012 		return EINVAL;
2013 
2014 	if (entry->data)
2015 		kmem_intr_free(entry->data, entry->size);
2016 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2017 	if (!entry->data) {
2018 		entry->size = 0;
2019 		return ENOMEM;
2020 	}
2021 	memcpy(entry->data, phy_db_notif->data, size);
2022 	entry->size = size;
2023 
2024 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2025 	    __func__, __LINE__, type, size, entry->data));
2026 
2027 	return 0;
2028 }
2029 
2030 static int
2031 iwm_is_valid_channel(uint16_t ch_id)
2032 {
2033 	if (ch_id <= 14 ||
2034 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2035 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2036 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2037 		return 1;
2038 	return 0;
2039 }
2040 
2041 static uint8_t
2042 iwm_ch_id_to_ch_index(uint16_t ch_id)
2043 {
2044 	if (!iwm_is_valid_channel(ch_id))
2045 		return 0xff;
2046 
2047 	if (ch_id <= 14)
2048 		return ch_id - 1;
2049 	if (ch_id <= 64)
2050 		return (ch_id + 20) / 4;
2051 	if (ch_id <= 140)
2052 		return (ch_id - 12) / 4;
2053 	return (ch_id - 13) / 4;
2054 }
2055 
2056 
2057 static uint16_t
2058 iwm_channel_id_to_papd(uint16_t ch_id)
2059 {
2060 	if (!iwm_is_valid_channel(ch_id))
2061 		return 0xff;
2062 
2063 	if (1 <= ch_id && ch_id <= 14)
2064 		return 0;
2065 	if (36 <= ch_id && ch_id <= 64)
2066 		return 1;
2067 	if (100 <= ch_id && ch_id <= 140)
2068 		return 2;
2069 	return 3;
2070 }
2071 
2072 static uint16_t
2073 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2074 {
2075 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2076 	struct iwm_phy_db_chg_txp *txp_chg;
2077 	int i;
2078 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2079 
2080 	if (ch_index == 0xff)
2081 		return 0xff;
2082 
2083 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2084 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2085 		if (!txp_chg)
2086 			return 0xff;
2087 		/*
2088 		 * Looking for the first channel group that its max channel is
2089 		 * higher then wanted channel.
2090 		 */
2091 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2092 			return i;
2093 	}
2094 	return 0xff;
2095 }
2096 
2097 static int
2098 iwm_phy_db_get_section_data(struct iwm_softc *sc,
2099 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
2100 {
2101 	struct iwm_phy_db_entry *entry;
2102 	uint16_t ch_group_id = 0;
2103 
2104 	/* find wanted channel group */
2105 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2106 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2107 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2108 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2109 
2110 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2111 	if (!entry)
2112 		return EINVAL;
2113 
2114 	*data = entry->data;
2115 	*size = entry->size;
2116 
2117 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2118 		       __func__, __LINE__, type, *size));
2119 
2120 	return 0;
2121 }
2122 
2123 static int
2124 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
2125 	uint16_t length, void *data)
2126 {
2127 	struct iwm_phy_db_cmd phy_db_cmd;
2128 	struct iwm_host_cmd cmd = {
2129 		.id = IWM_PHY_DB_CMD,
2130 		.flags = IWM_CMD_SYNC,
2131 	};
2132 
2133 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2134 	    type, length));
2135 
2136 	/* Set phy db cmd variables */
2137 	phy_db_cmd.type = le16toh(type);
2138 	phy_db_cmd.length = le16toh(length);
2139 
2140 	/* Set hcmd variables */
2141 	cmd.data[0] = &phy_db_cmd;
2142 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2143 	cmd.data[1] = data;
2144 	cmd.len[1] = length;
2145 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
2146 
2147 	return iwm_send_cmd(sc, &cmd);
2148 }
2149 
2150 static int
2151 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2152 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2153 {
2154 	uint16_t i;
2155 	int err;
2156 	struct iwm_phy_db_entry *entry;
2157 
2158 	/* Send all the channel-specific groups to operational fw */
2159 	for (i = 0; i < max_ch_groups; i++) {
2160 		entry = iwm_phy_db_get_section(sc, type, i);
2161 		if (!entry)
2162 			return EINVAL;
2163 
2164 		if (!entry->size)
2165 			continue;
2166 
2167 		/* Send the requested PHY DB section */
2168 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2169 		if (err) {
2170 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2171 			    "err %d\n", DEVNAME(sc), type, i, err));
2172 			return err;
2173 		}
2174 
2175 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
2176 	}
2177 
2178 	return 0;
2179 }
2180 
2181 static int
2182 iwm_send_phy_db_data(struct iwm_softc *sc)
2183 {
2184 	uint8_t *data = NULL;
2185 	uint16_t size = 0;
2186 	int err;
2187 
2188 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
2189 
2190 	/* Send PHY DB CFG section */
2191 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2192 	if (err) {
2193 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
2194 		    DEVNAME(sc), err));
2195 		return err;
2196 	}
2197 
2198 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2199 	if (err) {
2200 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
2201 		    DEVNAME(sc), err));
2202 		return err;
2203 	}
2204 
2205 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2206 	    &data, &size, 0);
2207 	if (err) {
2208 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
2209 		    "%d\n", DEVNAME(sc), err));
2210 		return err;
2211 	}
2212 
2213 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2214 	if (err) {
2215 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
2216 		    "sect, %d\n", DEVNAME(sc), err));
2217 		return err;
2218 	}
2219 
2220 	/* Send all the TXP channel specific data */
2221 	err = iwm_phy_db_send_all_channel_groups(sc,
2222 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2223 	if (err) {
2224 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
2225 		    DEVNAME(sc), err));
2226 		return err;
2227 	}
2228 
2229 	/* Send all the TXP channel specific data */
2230 	err = iwm_phy_db_send_all_channel_groups(sc,
2231 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2232 	if (err) {
2233 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
2234 		    "%d\n", DEVNAME(sc), err));
2235 		return err;
2236 	}
2237 
2238 	DPRINTF(("Finished sending phy db non channel data\n"));
2239 	return 0;
2240 }
2241 
2242 /*
2243  * END iwl-phy-db.c
2244  */
2245 
2246 /*
2247  * BEGIN iwlwifi/mvm/time-event.c
2248  */
2249 
2250 /*
2251  * For the high priority TE use a time event type that has similar priority to
2252  * the FW's action scan priority.
2253  */
2254 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2255 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2256 
2257 /* used to convert from time event API v2 to v1 */
2258 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2259 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2260 static inline uint16_t
2261 iwm_te_v2_get_notify(uint16_t policy)
2262 {
2263 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2264 }
2265 
2266 static inline uint16_t
2267 iwm_te_v2_get_dep_policy(uint16_t policy)
2268 {
2269 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2270 		IWM_TE_V2_PLACEMENT_POS;
2271 }
2272 
2273 static inline uint16_t
2274 iwm_te_v2_get_absence(uint16_t policy)
2275 {
2276 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2277 }
2278 
2279 static void
2280 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2281 	struct iwm_time_event_cmd_v1 *cmd_v1)
2282 {
2283 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2284 	cmd_v1->action = cmd_v2->action;
2285 	cmd_v1->id = cmd_v2->id;
2286 	cmd_v1->apply_time = cmd_v2->apply_time;
2287 	cmd_v1->max_delay = cmd_v2->max_delay;
2288 	cmd_v1->depends_on = cmd_v2->depends_on;
2289 	cmd_v1->interval = cmd_v2->interval;
2290 	cmd_v1->duration = cmd_v2->duration;
2291 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2292 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2293 	else
2294 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2295 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2296 	cmd_v1->interval_reciprocal = 0; /* unused */
2297 
2298 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2299 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2300 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2301 }
2302 
2303 static int
2304 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
2305 	const struct iwm_time_event_cmd_v2 *cmd)
2306 {
2307 	struct iwm_time_event_cmd_v1 cmd_v1;
2308 
2309 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2310 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2311 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
2312 
2313 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
2314 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
2315 	    sizeof(cmd_v1), &cmd_v1);
2316 }
2317 
2318 static int
2319 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
2320 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
2321 {
2322 	int ret;
2323 
2324 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
2325 
2326 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
2327 	if (ret) {
2328 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
2329 		    DEVNAME(sc), ret));
2330 	}
2331 
2332 	return ret;
2333 }
2334 
2335 static void
2336 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2337 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
2338 {
2339 	struct iwm_time_event_cmd_v2 time_cmd;
2340 
2341 	memset(&time_cmd, 0, sizeof(time_cmd));
2342 
2343 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2344 	time_cmd.id_and_color =
2345 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2346 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2347 
2348 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
2349 	    IWM_DEVICE_SYSTEM_TIME_REG));
2350 
2351 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2352 	time_cmd.max_delay = htole32(max_delay);
2353 	/* TODO: why do we need to interval = bi if it is not periodic? */
2354 	time_cmd.interval = htole32(1);
2355 	time_cmd.duration = htole32(duration);
2356 	time_cmd.repeat = 1;
2357 	time_cmd.policy
2358 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2359 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
2360 
2361 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
2362 }
2363 
2364 /*
2365  * END iwlwifi/mvm/time-event.c
2366  */
2367 
2368 /*
2369  * NVM read access and content parsing.  We do not support
2370  * external NVM or writing NVM.
2371  * iwlwifi/mvm/nvm.c
2372  */
2373 
2374 /* list of NVM sections we are allowed/need to read */
2375 static const int nvm_to_read[] = {
2376 	IWM_NVM_SECTION_TYPE_HW,
2377 	IWM_NVM_SECTION_TYPE_SW,
2378 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2379 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2380 };
2381 
2382 /* Default NVM size to read */
2383 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2384 #define IWM_MAX_NVM_SECTION_SIZE 7000
2385 
2386 #define IWM_NVM_WRITE_OPCODE 1
2387 #define IWM_NVM_READ_OPCODE 0
2388 
2389 static int
2390 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
2391 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
2392 {
2393 	offset = 0;
2394 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2395 		.offset = htole16(offset),
2396 		.length = htole16(length),
2397 		.type = htole16(section),
2398 		.op_code = IWM_NVM_READ_OPCODE,
2399 	};
2400 	struct iwm_nvm_access_resp *nvm_resp;
2401 	struct iwm_rx_packet *pkt;
2402 	struct iwm_host_cmd cmd = {
2403 		.id = IWM_NVM_ACCESS_CMD,
2404 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
2405 		    IWM_CMD_SEND_IN_RFKILL,
2406 		.data = { &nvm_access_cmd, },
2407 	};
2408 	int ret, bytes_read, offset_read;
2409 	uint8_t *resp_data;
2410 
2411 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2412 
2413 	ret = iwm_send_cmd(sc, &cmd);
2414 	if (ret)
2415 		return ret;
2416 
2417 	pkt = cmd.resp_pkt;
2418 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2419 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
2420 		    DEVNAME(sc), pkt->hdr.flags));
2421 		ret = EIO;
2422 		goto exit;
2423 	}
2424 
2425 	/* Extract NVM response */
2426 	nvm_resp = (void *)pkt->data;
2427 
2428 	ret = le16toh(nvm_resp->status);
2429 	bytes_read = le16toh(nvm_resp->length);
2430 	offset_read = le16toh(nvm_resp->offset);
2431 	resp_data = nvm_resp->data;
2432 	if (ret) {
2433 		DPRINTF(("%s: NVM access command failed with status %d\n",
2434 		    DEVNAME(sc), ret));
2435 		ret = EINVAL;
2436 		goto exit;
2437 	}
2438 
2439 	if (offset_read != offset) {
2440 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
2441 		    DEVNAME(sc), offset_read));
2442 		ret = EINVAL;
2443 		goto exit;
2444 	}
2445 
2446 	memcpy(data + offset, resp_data, bytes_read);
2447 	*len = bytes_read;
2448 
2449  exit:
2450 	iwm_free_resp(sc, &cmd);
2451 	return ret;
2452 }
2453 
2454 /*
2455  * Reads an NVM section completely.
2456  * NICs prior to 7000 family doesn't have a real NVM, but just read
2457  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2458  * by uCode, we need to manually check in this case that we don't
2459  * overflow and try to read more than the EEPROM size.
2460  * For 7000 family NICs, we supply the maximal size we can read, and
2461  * the uCode fills the response with as much data as we can,
2462  * without overflowing, so no check is needed.
2463  */
2464 static int
2465 iwm_nvm_read_section(struct iwm_softc *sc,
2466 	uint16_t section, uint8_t *data, uint16_t *len)
2467 {
2468 	uint16_t length, seglen;
2469 	int error;
2470 
2471 	/* Set nvm section read length */
2472 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2473 	*len = 0;
2474 
2475 	/* Read the NVM until exhausted (reading less than requested) */
2476 	while (seglen == length) {
2477 		error = iwm_nvm_read_chunk(sc,
2478 		    section, *len, length, data, &seglen);
2479 		if (error) {
2480 			aprint_error_dev(sc->sc_dev,
2481 			    "Cannot read NVM from section %d offset %d, "
2482 			    "length %d\n", section, *len, length);
2483 			return error;
2484 		}
2485 		*len += seglen;
2486 	}
2487 
2488 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2489 	return 0;
2490 }
2491 
2492 /*
2493  * BEGIN IWM_NVM_PARSE
2494  */
2495 
2496 /* iwlwifi/iwl-nvm-parse.c */
2497 
2498 /* NVM offsets (in words) definitions */
2499 enum wkp_nvm_offsets {
2500 	/* NVM HW-Section offset (in words) definitions */
2501 	IWM_HW_ADDR = 0x15,
2502 
2503 /* NVM SW-Section offset (in words) definitions */
2504 	IWM_NVM_SW_SECTION = 0x1C0,
2505 	IWM_NVM_VERSION = 0,
2506 	IWM_RADIO_CFG = 1,
2507 	IWM_SKU = 2,
2508 	IWM_N_HW_ADDRS = 3,
2509 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
2510 
2511 /* NVM calibration section offset (in words) definitions */
2512 	IWM_NVM_CALIB_SECTION = 0x2B8,
2513 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
2514 };
2515 
2516 /* SKU Capabilities (actual values from NVM definition) */
2517 enum nvm_sku_bits {
2518 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
2519 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
2520 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
2521 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
2522 };
2523 
2524 /* radio config bits (actual values from NVM definition) */
2525 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
2526 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
2527 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
2528 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
2529 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
2530 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2531 
2532 #define DEFAULT_MAX_TX_POWER 16
2533 
2534 /**
2535  * enum iwm_nvm_channel_flags - channel flags in NVM
2536  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2537  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2538  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2539  * @IWM_NVM_CHANNEL_RADAR: radar detection required
2540  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2541  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2542  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2543  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2544  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2545  */
2546 enum iwm_nvm_channel_flags {
2547 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2548 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2549 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2550 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2551 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2552 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2553 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2554 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2555 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2556 };
2557 
2558 static void
2559 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
2560 {
2561 	struct ieee80211com *ic = &sc->sc_ic;
2562 	struct iwm_nvm_data *data = &sc->sc_nvm;
2563 	int ch_idx;
2564 	struct ieee80211_channel *channel;
2565 	uint16_t ch_flags;
2566 	int is_5ghz;
2567 	int flags, hw_value;
2568 
2569 	for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
2570 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2571 
2572 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2573 		    !data->sku_cap_band_52GHz_enable)
2574 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2575 
2576 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2577 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2578 			    iwm_nvm_channels[ch_idx],
2579 			    ch_flags,
2580 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2581 			    "5.2" : "2.4"));
2582 			continue;
2583 		}
2584 
2585 		hw_value = iwm_nvm_channels[ch_idx];
2586 		channel = &ic->ic_channels[hw_value];
2587 
2588 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2589 		if (!is_5ghz) {
2590 			flags = IEEE80211_CHAN_2GHZ;
2591 			channel->ic_flags
2592 			    = IEEE80211_CHAN_CCK
2593 			    | IEEE80211_CHAN_OFDM
2594 			    | IEEE80211_CHAN_DYN
2595 			    | IEEE80211_CHAN_2GHZ;
2596 		} else {
2597 			flags = IEEE80211_CHAN_5GHZ;
2598 			channel->ic_flags =
2599 			    IEEE80211_CHAN_A;
2600 		}
2601 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2602 
2603 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2604 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2605 	}
2606 }
2607 
2608 static int
2609 iwm_parse_nvm_data(struct iwm_softc *sc,
2610 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2611 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
2612 {
2613 	struct iwm_nvm_data *data = &sc->sc_nvm;
2614 	uint8_t hw_addr[ETHER_ADDR_LEN];
2615 	uint16_t radio_cfg, sku;
2616 
2617 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2618 
2619 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2620 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2621 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2622 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2623 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2624 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
2625 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
2626 
2627 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
2628 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2629 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2630 	data->sku_cap_11n_enable = 0;
2631 
2632 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
2633 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
2634 		    data->valid_tx_ant, data->valid_rx_ant));
2635 		return EINVAL;
2636 	}
2637 
2638 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2639 
2640 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
2641 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
2642 
2643 	/* The byte order is little endian 16 bit, meaning 214365 */
2644 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2645 	data->hw_addr[0] = hw_addr[1];
2646 	data->hw_addr[1] = hw_addr[0];
2647 	data->hw_addr[2] = hw_addr[3];
2648 	data->hw_addr[3] = hw_addr[2];
2649 	data->hw_addr[4] = hw_addr[5];
2650 	data->hw_addr[5] = hw_addr[4];
2651 
2652 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
2653 	data->calib_version = 255;   /* TODO:
2654 					this value will prevent some checks from
2655 					failing, we need to check if this
2656 					field is still needed, and if it does,
2657 					where is it in the NVM */
2658 
2659 	return 0;
2660 }
2661 
2662 /*
2663  * END NVM PARSE
2664  */
2665 
2666 struct iwm_nvm_section {
2667 	uint16_t length;
2668 	const uint8_t *data;
2669 };
2670 
2671 #define IWM_FW_VALID_TX_ANT(sc) \
2672     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
2673     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
2674 #define IWM_FW_VALID_RX_ANT(sc) \
2675     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
2676     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
2677 
2678 static int
2679 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2680 {
2681 	const uint16_t *hw, *sw, *calib;
2682 
2683 	/* Checking for required sections */
2684 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2685 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2686 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
2687 		return ENOENT;
2688 	}
2689 
2690 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
2691 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2692 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2693 	return iwm_parse_nvm_data(sc, hw, sw, calib,
2694 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
2695 }
2696 
2697 static int
2698 iwm_nvm_init(struct iwm_softc *sc)
2699 {
2700 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2701 	int i, section, error;
2702 	uint16_t len;
2703 	uint8_t *nvm_buffer, *temp;
2704 
2705 	/* Read From FW NVM */
2706 	DPRINTF(("Read NVM\n"));
2707 
2708 	/* TODO: find correct NVM max size for a section */
2709 	nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
2710 	for (i = 0; i < __arraycount(nvm_to_read); i++) {
2711 		section = nvm_to_read[i];
2712 		KASSERT(section <= __arraycount(nvm_sections));
2713 
2714 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
2715 		if (error)
2716 			break;
2717 
2718 		temp = kmem_alloc(len, KM_SLEEP);
2719 		memcpy(temp, nvm_buffer, len);
2720 		nvm_sections[section].data = temp;
2721 		nvm_sections[section].length = len;
2722 	}
2723 	kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
2724 	if (error)
2725 		return error;
2726 
2727 	return iwm_parse_nvm_sections(sc, nvm_sections);
2728 }
2729 
2730 /*
2731  * Firmware loading gunk.  This is kind of a weird hybrid between the
2732  * iwn driver and the Linux iwlwifi driver.
2733  */
2734 
2735 static int
2736 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2737 	const uint8_t *section, uint32_t byte_cnt)
2738 {
2739 	struct iwm_dma_info *dma = &sc->fw_dma;
2740 	int error;
2741 
2742 	/* Copy firmware section into pre-allocated DMA-safe memory. */
2743 	memcpy(dma->vaddr, section, byte_cnt);
2744 	bus_dmamap_sync(sc->sc_dmat,
2745 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2746 
2747 	if (!iwm_nic_lock(sc))
2748 		return EBUSY;
2749 
2750 	sc->sc_fw_chunk_done = 0;
2751 
2752 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2753 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2754 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2755 	    dst_addr);
2756 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2757 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2758 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2759 	    (iwm_get_dma_hi_addr(dma->paddr)
2760 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2761 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2762 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2763 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2764 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2765 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2766 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2767 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2768 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2769 
2770 	iwm_nic_unlock(sc);
2771 
2772 	/* wait 1s for this segment to load */
2773 	while (!sc->sc_fw_chunk_done)
2774 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
2775 			break;
2776 
2777 	return error;
2778 }
2779 
2780 static int
2781 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2782 {
2783 	struct iwm_fw_sects *fws;
2784 	int error, i, w;
2785 	void *data;
2786 	uint32_t dlen;
2787 	uint32_t offset;
2788 
2789 	sc->sc_uc.uc_intr = 0;
2790 
2791 	fws = &sc->sc_fw.fw_sects[ucode_type];
2792 	for (i = 0; i < fws->fw_count; i++) {
2793 		data = fws->fw_sect[i].fws_data;
2794 		dlen = fws->fw_sect[i].fws_len;
2795 		offset = fws->fw_sect[i].fws_devoff;
2796 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
2797 		    ucode_type, offset, dlen));
2798 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2799 		if (error) {
2800 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
2801 			    "returned error %02d\n", i, fws->fw_count, error));
2802 			return error;
2803 		}
2804 	}
2805 
2806 	/* wait for the firmware to load */
2807 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2808 
2809 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2810 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2811 	}
2812 
2813 	return error;
2814 }
2815 
2816 /* iwlwifi: pcie/trans.c */
2817 static int
2818 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2819 {
2820 	int error;
2821 
2822 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2823 
2824 	if ((error = iwm_nic_init(sc)) != 0) {
2825 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
2826 		return error;
2827 	}
2828 
2829 	/* make sure rfkill handshake bits are cleared */
2830 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2831 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2832 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2833 
2834 	/* clear (again), then enable host interrupts */
2835 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2836 	iwm_enable_interrupts(sc);
2837 
2838 	/* really make sure rfkill handshake bits are cleared */
2839 	/* maybe we should write a few times more?  just to make sure */
2840 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2841 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2842 
2843 	/* Load the given image to the HW */
2844 	error = iwm_load_firmware(sc, ucode_type);
2845 	if (error) {
2846 		aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
2847 		    error);
2848 	}
2849 	return error;
2850 }
2851 
2852 static int
2853 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2854 {
2855 	return iwm_post_alive(sc);
2856 }
2857 
2858 static int
2859 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2860 {
2861 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2862 		.valid = htole32(valid_tx_ant),
2863 	};
2864 
2865 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2866 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2867 }
2868 
2869 /* iwlwifi: mvm/fw.c */
2870 static int
2871 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2872 {
2873 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2874 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2875 
2876 	/* Set parameters */
2877 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2878 	phy_cfg_cmd.calib_control.event_trigger =
2879 	    sc->sc_default_calib[ucode_type].event_trigger;
2880 	phy_cfg_cmd.calib_control.flow_trigger =
2881 	    sc->sc_default_calib[ucode_type].flow_trigger;
2882 
2883 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
2884 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2885 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2886 }
2887 
2888 static int
2889 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2890 	enum iwm_ucode_type ucode_type)
2891 {
2892 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2893 	int error;
2894 
2895 	if ((error = iwm_read_firmware(sc)) != 0)
2896 		return error;
2897 
2898 	sc->sc_uc_current = ucode_type;
2899 	error = iwm_start_fw(sc, ucode_type);
2900 	if (error) {
2901 		sc->sc_uc_current = old_type;
2902 		return error;
2903 	}
2904 
2905 	return iwm_fw_alive(sc, sc->sched_base);
2906 }
2907 
2908 /*
2909  * mvm misc bits
2910  */
2911 
2912 /*
2913  * follows iwlwifi/fw.c
2914  */
2915 static int
2916 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2917 {
2918 	int error;
2919 
2920 	/* do not operate with rfkill switch turned on */
2921 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2922 		aprint_error_dev(sc->sc_dev,
2923 		    "radio is disabled by hardware switch\n");
2924 		return EPERM;
2925 	}
2926 
2927 	sc->sc_init_complete = 0;
2928 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2929 	    IWM_UCODE_TYPE_INIT)) != 0)
2930 		return error;
2931 
2932 	if (justnvm) {
2933 		if ((error = iwm_nvm_init(sc)) != 0) {
2934 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
2935 			return error;
2936 		}
2937 		memcpy(&sc->sc_ic.ic_myaddr,
2938 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
2939 
2940 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2941 		    + sc->sc_capa_max_probe_len
2942 		    + IWM_MAX_NUM_SCAN_CHANNELS
2943 		    * sizeof(struct iwm_scan_channel);
2944 		sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
2945 
2946 		return 0;
2947 	}
2948 
2949 	/* Send TX valid antennas before triggering calibrations */
2950 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2951 		return error;
2952 
2953 	/*
2954 	* Send phy configurations command to init uCode
2955 	* to start the 16.0 uCode init image internal calibrations.
2956 	*/
2957 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2958 		DPRINTF(("%s: failed to run internal calibration: %d\n",
2959 		    DEVNAME(sc), error));
2960 		return error;
2961 	}
2962 
2963 	/*
2964 	 * Nothing to do but wait for the init complete notification
2965 	 * from the firmware
2966 	 */
2967 	while (!sc->sc_init_complete)
2968 		if ((error = tsleep(&sc->sc_init_complete,
2969 		    0, "iwminit", 2*hz)) != 0)
2970 			break;
2971 
2972 	return error;
2973 }
2974 
2975 /*
2976  * receive side
2977  */
2978 
2979 /* (re)stock rx ring, called at init-time and at runtime */
2980 static int
2981 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2982 {
2983 	struct iwm_rx_ring *ring = &sc->rxq;
2984 	struct iwm_rx_data *data = &ring->data[idx];
2985 	struct mbuf *m;
2986 	int error;
2987 	int fatal = 0;
2988 
2989 	m = m_gethdr(M_DONTWAIT, MT_DATA);
2990 	if (m == NULL)
2991 		return ENOBUFS;
2992 
2993 	if (size <= MCLBYTES) {
2994 		MCLGET(m, M_DONTWAIT);
2995 	} else {
2996 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
2997 	}
2998 	if ((m->m_flags & M_EXT) == 0) {
2999 		m_freem(m);
3000 		return ENOBUFS;
3001 	}
3002 
3003 	if (data->m != NULL) {
3004 		bus_dmamap_unload(sc->sc_dmat, data->map);
3005 		fatal = 1;
3006 	}
3007 
3008 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3009 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3010 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
3011 		/* XXX */
3012 		if (fatal)
3013 			panic("iwm: could not load RX mbuf");
3014 		m_freem(m);
3015 		return error;
3016 	}
3017 	data->m = m;
3018 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3019 
3020 	/* Update RX descriptor. */
3021 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3022 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3023 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3024 
3025 	return 0;
3026 }
3027 
3028 /* iwlwifi: mvm/rx.c */
3029 #define IWM_RSSI_OFFSET 50
3030 static int
3031 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3032 {
3033 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3034 	uint32_t agc_a, agc_b;
3035 	uint32_t val;
3036 
3037 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3038 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3039 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3040 
3041 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3042 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3043 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3044 
3045 	/*
3046 	 * dBm = rssi dB - agc dB - constant.
3047 	 * Higher AGC (higher radio gain) means lower signal.
3048 	 */
3049 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3050 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3051 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3052 
3053 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3054 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3055 
3056 	return max_rssi_dbm;
3057 }
3058 
3059 /* iwlwifi: mvm/rx.c */
3060 /*
3061  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3062  * values are reported by the fw as positive values - need to negate
3063  * to obtain their dBM.  Account for missing antennas by replacing 0
3064  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3065  */
3066 static int
3067 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
3068     struct iwm_rx_phy_info *phy_info)
3069 {
3070 	int energy_a, energy_b, energy_c, max_energy;
3071 	uint32_t val;
3072 
3073 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3074 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3075 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3076 	energy_a = energy_a ? -energy_a : -256;
3077 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3078 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3079 	energy_b = energy_b ? -energy_b : -256;
3080 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3081 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3082 	energy_c = energy_c ? -energy_c : -256;
3083 	max_energy = MAX(energy_a, energy_b);
3084 	max_energy = MAX(max_energy, energy_c);
3085 
3086 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3087 	    energy_a, energy_b, energy_c, max_energy));
3088 
3089 	return max_energy;
3090 }
3091 
3092 static void
3093 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3094 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3095 {
3096 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3097 
3098 	DPRINTFN(20, ("received PHY stats\n"));
3099 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3100 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3101 
3102 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3103 }
3104 
3105 /*
3106  * Retrieve the average noise (in dBm) among receivers.
3107  */
3108 static int
3109 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
3110 {
3111 	int i, total, nbant, noise;
3112 
3113 	total = nbant = noise = 0;
3114 	for (i = 0; i < 3; i++) {
3115 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3116 		if (noise) {
3117 			total += noise;
3118 			nbant++;
3119 		}
3120 	}
3121 
3122 	/* There should be at least one antenna but check anyway. */
3123 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3124 }
3125 
3126 /*
3127  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3128  *
3129  * Handles the actual data of the Rx packet from the fw
3130  */
3131 static void
3132 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3133 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3134 {
3135 	struct ieee80211com *ic = &sc->sc_ic;
3136 	struct ieee80211_frame *wh;
3137 	struct ieee80211_node *ni;
3138 	struct ieee80211_channel *c = NULL;
3139 	struct mbuf *m;
3140 	struct iwm_rx_phy_info *phy_info;
3141 	struct iwm_rx_mpdu_res_start *rx_res;
3142 	int device_timestamp;
3143 	uint32_t len;
3144 	uint32_t rx_pkt_status;
3145 	int rssi;
3146 
3147 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3148 	    BUS_DMASYNC_POSTREAD);
3149 
3150 	phy_info = &sc->sc_last_phy_info;
3151 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3152 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3153 	len = le16toh(rx_res->byte_count);
3154 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3155 
3156 	m = data->m;
3157 	m->m_data = pkt->data + sizeof(*rx_res);
3158 	m->m_pkthdr.len = m->m_len = len;
3159 
3160 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3161 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3162 		    phy_info->cfg_phy_cnt));
3163 		return;
3164 	}
3165 
3166 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3167 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3168 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3169 		return; /* drop */
3170 	}
3171 
3172 	device_timestamp = le32toh(phy_info->system_timestamp);
3173 
3174 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3175 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3176 	} else {
3177 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3178 	}
3179 	rssi = -rssi;
3180 
3181 	if (ic->ic_state == IEEE80211_S_SCAN)
3182 		iwm_fix_channel(ic, m);
3183 
3184 	/* replenish ring for the buffer we're going to feed to the sharks */
3185 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3186 		return;
3187 
3188 	m->m_pkthdr.rcvif = IC2IFP(ic);
3189 
3190 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
3191 		if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3192 			c = &ic->ic_channels[le32toh(phy_info->channel)];
3193 	}
3194 
3195 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3196 	if (c)
3197 		ni->ni_chan = c;
3198 
3199 	if (sc->sc_drvbpf != NULL) {
3200 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3201 
3202 		tap->wr_flags = 0;
3203 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3204 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3205 		tap->wr_chan_freq =
3206 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3207 		tap->wr_chan_flags =
3208 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3209 		tap->wr_dbm_antsignal = (int8_t)rssi;
3210 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3211 		tap->wr_tsft = phy_info->system_timestamp;
3212 		switch (phy_info->rate) {
3213 		/* CCK rates. */
3214 		case  10: tap->wr_rate =   2; break;
3215 		case  20: tap->wr_rate =   4; break;
3216 		case  55: tap->wr_rate =  11; break;
3217 		case 110: tap->wr_rate =  22; break;
3218 		/* OFDM rates. */
3219 		case 0xd: tap->wr_rate =  12; break;
3220 		case 0xf: tap->wr_rate =  18; break;
3221 		case 0x5: tap->wr_rate =  24; break;
3222 		case 0x7: tap->wr_rate =  36; break;
3223 		case 0x9: tap->wr_rate =  48; break;
3224 		case 0xb: tap->wr_rate =  72; break;
3225 		case 0x1: tap->wr_rate =  96; break;
3226 		case 0x3: tap->wr_rate = 108; break;
3227 		/* Unknown rate: should not happen. */
3228 		default:  tap->wr_rate =   0;
3229 		}
3230 
3231 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3232 	}
3233 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
3234 	ieee80211_free_node(ni);
3235 }
3236 
3237 static void
3238 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3239 	struct iwm_node *in)
3240 {
3241 	struct ieee80211com *ic = &sc->sc_ic;
3242 	struct ifnet *ifp = IC2IFP(ic);
3243 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3244 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3245 	int failack = tx_resp->failure_frame;
3246 
3247 	KASSERT(tx_resp->frame_count == 1);
3248 
3249 	/* Update rate control statistics. */
3250 	in->in_amn.amn_txcnt++;
3251 	if (failack > 0) {
3252 		in->in_amn.amn_retrycnt++;
3253 	}
3254 
3255 	if (status != IWM_TX_STATUS_SUCCESS &&
3256 	    status != IWM_TX_STATUS_DIRECT_DONE)
3257 		ifp->if_oerrors++;
3258 	else
3259 		ifp->if_opackets++;
3260 }
3261 
3262 static void
3263 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3264 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3265 {
3266 	struct ieee80211com *ic = &sc->sc_ic;
3267 	struct ifnet *ifp = IC2IFP(ic);
3268 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3269 	int idx = cmd_hdr->idx;
3270 	int qid = cmd_hdr->qid;
3271 	struct iwm_tx_ring *ring = &sc->txq[qid];
3272 	struct iwm_tx_data *txd = &ring->data[idx];
3273 	struct iwm_node *in = txd->in;
3274 
3275 	if (txd->done) {
3276 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3277 		    DEVNAME(sc)));
3278 		return;
3279 	}
3280 
3281 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3282 	    BUS_DMASYNC_POSTREAD);
3283 
3284 	sc->sc_tx_timer = 0;
3285 
3286 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3287 
3288 	/* Unmap and free mbuf. */
3289 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3290 	    BUS_DMASYNC_POSTWRITE);
3291 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3292 	m_freem(txd->m);
3293 
3294 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3295 	KASSERT(txd->done == 0);
3296 	txd->done = 1;
3297 	KASSERT(txd->in);
3298 
3299 	txd->m = NULL;
3300 	txd->in = NULL;
3301 	ieee80211_free_node(&in->in_ni);
3302 
3303 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3304 		sc->qfullmsk &= ~(1 << ring->qid);
3305 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3306 			ifp->if_flags &= ~IFF_OACTIVE;
3307 			/*
3308 			 * Well, we're in interrupt context, but then again
3309 			 * I guess net80211 does all sorts of stunts in
3310 			 * interrupt context, so maybe this is no biggie.
3311 			 */
3312 			(*ifp->if_start)(ifp);
3313 		}
3314 	}
3315 }
3316 
3317 /*
3318  * BEGIN iwlwifi/mvm/binding.c
3319  */
3320 
3321 static int
3322 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3323 {
3324 	struct iwm_binding_cmd cmd;
3325 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
3326 	int i, ret;
3327 	uint32_t status;
3328 
3329 	memset(&cmd, 0, sizeof(cmd));
3330 
3331 	cmd.id_and_color
3332 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3333 	cmd.action = htole32(action);
3334 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3335 
3336 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3337 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3338 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3339 
3340 	status = 0;
3341 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3342 	    sizeof(cmd), &cmd, &status);
3343 	if (ret) {
3344 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
3345 		    DEVNAME(sc), action, ret));
3346 		return ret;
3347 	}
3348 
3349 	if (status) {
3350 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
3351 		    status));
3352 		ret = EIO;
3353 	}
3354 
3355 	return ret;
3356 }
3357 
3358 static int
3359 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
3360 {
3361 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
3362 }
3363 
3364 static int
3365 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
3366 {
3367 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
3368 }
3369 
3370 /*
3371  * END iwlwifi/mvm/binding.c
3372  */
3373 
3374 /*
3375  * BEGIN iwlwifi/mvm/phy-ctxt.c
3376  */
3377 
3378 /*
3379  * Construct the generic fields of the PHY context command
3380  */
3381 static void
3382 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3383 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3384 {
3385 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3386 
3387 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3388 	    ctxt->color));
3389 	cmd->action = htole32(action);
3390 	cmd->apply_time = htole32(apply_time);
3391 }
3392 
3393 /*
3394  * Add the phy configuration to the PHY context command
3395  */
3396 static void
3397 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
3398 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
3399 	uint8_t chains_static, uint8_t chains_dynamic)
3400 {
3401 	struct ieee80211com *ic = &sc->sc_ic;
3402 	uint8_t active_cnt, idle_cnt;
3403 
3404 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3405 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3406 
3407 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3408 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3409 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3410 
3411 	/* Set rx the chains */
3412 	idle_cnt = chains_static;
3413 	active_cnt = chains_dynamic;
3414 
3415 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
3416 					IWM_PHY_RX_CHAIN_VALID_POS);
3417 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3418 	cmd->rxchain_info |= htole32(active_cnt <<
3419 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3420 
3421 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
3422 }
3423 
3424 /*
3425  * Send a command
3426  * only if something in the configuration changed: in case that this is the
3427  * first time that the phy configuration is applied or in case that the phy
3428  * configuration changed from the previous apply.
3429  */
3430 static int
3431 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
3432 	struct iwm_mvm_phy_ctxt *ctxt,
3433 	uint8_t chains_static, uint8_t chains_dynamic,
3434 	uint32_t action, uint32_t apply_time)
3435 {
3436 	struct iwm_phy_context_cmd cmd;
3437 	int ret;
3438 
3439 	/* Set the command header fields */
3440 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3441 
3442 	/* Set the command data */
3443 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3444 	    chains_static, chains_dynamic);
3445 
3446 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
3447 	    sizeof(struct iwm_phy_context_cmd), &cmd);
3448 	if (ret) {
3449 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
3450 	}
3451 	return ret;
3452 }
3453 
3454 /*
3455  * Send a command to add a PHY context based on the current HW configuration.
3456  */
3457 static int
3458 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3459 	struct ieee80211_channel *chan,
3460 	uint8_t chains_static, uint8_t chains_dynamic)
3461 {
3462 	ctxt->channel = chan;
3463 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3464 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
3465 }
3466 
3467 /*
3468  * Send a command to modify the PHY context based on the current HW
3469  * configuration. Note that the function does not check that the configuration
3470  * changed.
3471  */
3472 static int
3473 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
3474 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
3475 	uint8_t chains_static, uint8_t chains_dynamic)
3476 {
3477 	ctxt->channel = chan;
3478 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3479 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
3480 }
3481 
3482 /*
3483  * END iwlwifi/mvm/phy-ctxt.c
3484  */
3485 
3486 /*
3487  * transmit side
3488  */
3489 
3490 /*
3491  * Send a command to the firmware.  We try to implement the Linux
3492  * driver interface for the routine.
3493  * mostly from if_iwn (iwn_cmd()).
3494  *
3495  * For now, we always copy the first part and map the second one (if it exists).
3496  */
3497 static int
3498 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3499 {
3500 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3501 	struct iwm_tfd *desc;
3502 	struct iwm_tx_data *data;
3503 	struct iwm_device_cmd *cmd;
3504 	struct mbuf *m;
3505 	bus_addr_t paddr;
3506 	uint32_t addr_lo;
3507 	int error = 0, i, paylen, off, s;
3508 	int code;
3509 	int async, wantresp;
3510 
3511 	code = hcmd->id;
3512 	async = hcmd->flags & IWM_CMD_ASYNC;
3513 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3514 
3515 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3516 		paylen += hcmd->len[i];
3517 	}
3518 
3519 	/* if the command wants an answer, busy sc_cmd_resp */
3520 	if (wantresp) {
3521 		KASSERT(!async);
3522 		while (sc->sc_wantresp != -1)
3523 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3524 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
3525 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
3526 	}
3527 
3528 	/*
3529 	 * Is the hardware still available?  (after e.g. above wait).
3530 	 */
3531 	s = splnet();
3532 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
3533 		error = ENXIO;
3534 		goto out;
3535 	}
3536 
3537 	desc = &ring->desc[ring->cur];
3538 	data = &ring->data[ring->cur];
3539 
3540 	if (paylen > sizeof(cmd->data)) {
3541 		/* Command is too large */
3542 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
3543 			error = EINVAL;
3544 			goto out;
3545 		}
3546 		m = m_gethdr(M_DONTWAIT, MT_DATA);
3547 		if (m == NULL) {
3548 			error = ENOMEM;
3549 			goto out;
3550 		}
3551 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3552 		if (!(m->m_flags & M_EXT)) {
3553 			m_freem(m);
3554 			error = ENOMEM;
3555 			goto out;
3556 		}
3557 		cmd = mtod(m, struct iwm_device_cmd *);
3558 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
3559 		    IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3560 		if (error != 0) {
3561 			m_freem(m);
3562 			goto out;
3563 		}
3564 		data->m = m;
3565 		paddr = data->map->dm_segs[0].ds_addr;
3566 	} else {
3567 		cmd = &ring->cmd[ring->cur];
3568 		paddr = data->cmd_paddr;
3569 	}
3570 
3571 	cmd->hdr.code = code;
3572 	cmd->hdr.flags = 0;
3573 	cmd->hdr.qid = ring->qid;
3574 	cmd->hdr.idx = ring->cur;
3575 
3576 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3577 		if (hcmd->len[i] == 0)
3578 			continue;
3579 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
3580 		off += hcmd->len[i];
3581 	}
3582 	KASSERT(off == paylen);
3583 
3584 	/* lo field is not aligned */
3585 	addr_lo = htole32((uint32_t)paddr);
3586 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3587 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
3588 	    | ((sizeof(cmd->hdr) + paylen) << 4));
3589 	desc->num_tbs = 1;
3590 
3591 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3592 	    code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
3593 
3594 	if (paylen > sizeof(cmd->data)) {
3595 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3596 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3597 	} else {
3598 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3599 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3600 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3601 	}
3602 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3603 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3604 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3605 
3606 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3607 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3608 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3609 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3610 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3611 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3612 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
3613 		error = EBUSY;
3614 		goto out;
3615 	}
3616 
3617 #if 0
3618 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3619 #endif
3620 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3621 	    code, ring->qid, ring->cur));
3622 
3623 	/* Kick command ring. */
3624 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3625 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3626 
3627 	if (!async) {
3628 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
3629 		int generation = sc->sc_generation;
3630 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
3631 		if (error == 0) {
3632 			/* if hardware is no longer up, return error */
3633 			if (generation != sc->sc_generation) {
3634 				error = ENXIO;
3635 			} else {
3636 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3637 			}
3638 		}
3639 	}
3640  out:
3641 	if (wantresp && error != 0) {
3642 		iwm_free_resp(sc, hcmd);
3643 	}
3644 	splx(s);
3645 
3646 	return error;
3647 }
3648 
3649 /* iwlwifi: mvm/utils.c */
3650 static int
3651 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
3652 	uint32_t flags, uint16_t len, const void *data)
3653 {
3654 	struct iwm_host_cmd cmd = {
3655 		.id = id,
3656 		.len = { len, },
3657 		.data = { data, },
3658 		.flags = flags,
3659 	};
3660 
3661 	return iwm_send_cmd(sc, &cmd);
3662 }
3663 
3664 /* iwlwifi: mvm/utils.c */
3665 static int
3666 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
3667 	struct iwm_host_cmd *cmd, uint32_t *status)
3668 {
3669 	struct iwm_rx_packet *pkt;
3670 	struct iwm_cmd_response *resp;
3671 	int error, resp_len;
3672 
3673 	//lockdep_assert_held(&mvm->mutex);
3674 
3675 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3676 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
3677 
3678 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
3679 		return error;
3680 	pkt = cmd->resp_pkt;
3681 
3682 	/* Can happen if RFKILL is asserted */
3683 	if (!pkt) {
3684 		error = 0;
3685 		goto out_free_resp;
3686 	}
3687 
3688 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3689 		error = EIO;
3690 		goto out_free_resp;
3691 	}
3692 
3693 	resp_len = iwm_rx_packet_payload_len(pkt);
3694 	if (resp_len != sizeof(*resp)) {
3695 		error = EIO;
3696 		goto out_free_resp;
3697 	}
3698 
3699 	resp = (void *)pkt->data;
3700 	*status = le32toh(resp->status);
3701  out_free_resp:
3702 	iwm_free_resp(sc, cmd);
3703 	return error;
3704 }
3705 
3706 /* iwlwifi/mvm/utils.c */
3707 static int
3708 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
3709 	uint16_t len, const void *data, uint32_t *status)
3710 {
3711 	struct iwm_host_cmd cmd = {
3712 		.id = id,
3713 		.len = { len, },
3714 		.data = { data, },
3715 	};
3716 
3717 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
3718 }
3719 
3720 static void
3721 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3722 {
3723 	KASSERT(sc->sc_wantresp != -1);
3724 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
3725 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
3726 	sc->sc_wantresp = -1;
3727 	wakeup(&sc->sc_wantresp);
3728 }
3729 
3730 /*
3731  * Process a "command done" firmware notification.  This is where we wakeup
3732  * processes waiting for a synchronous command completion.
3733  * from if_iwn
3734  */
3735 static void
3736 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3737 {
3738 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3739 	struct iwm_tx_data *data;
3740 
3741 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3742 		return;	/* Not a command ack. */
3743 	}
3744 
3745 	data = &ring->data[pkt->hdr.idx];
3746 
3747 	/* If the command was mapped in an mbuf, free it. */
3748 	if (data->m != NULL) {
3749 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3750 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3751 		bus_dmamap_unload(sc->sc_dmat, data->map);
3752 		m_freem(data->m);
3753 		data->m = NULL;
3754 	}
3755 	wakeup(&ring->desc[pkt->hdr.idx]);
3756 }
3757 
3758 #if 0
3759 /*
3760  * necessary only for block ack mode
3761  */
3762 void
3763 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3764 	uint16_t len)
3765 {
3766 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3767 	uint16_t w_val;
3768 
3769 	scd_bc_tbl = sc->sched_dma.vaddr;
3770 
3771 	len += 8; /* magic numbers came naturally from paris */
3772 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3773 		len = roundup(len, 4) / 4;
3774 
3775 	w_val = htole16(sta_id << 12 | len);
3776 
3777 	/* Update TX scheduler. */
3778 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3779 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3780 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3781 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3782 
3783 	/* I really wonder what this is ?!? */
3784 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3785 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3786 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3787 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3788 		    (char *)(void *)sc->sched_dma.vaddr,
3789 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3790 	}
3791 }
3792 #endif
3793 
3794 /*
3795  * Fill in various bit for management frames, and leave them
3796  * unfilled for data frames (firmware takes care of that).
3797  * Return the selected TX rate.
3798  */
3799 static const struct iwm_rate *
3800 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3801 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3802 {
3803 	struct ieee80211com *ic = &sc->sc_ic;
3804 	struct ieee80211_node *ni = &in->in_ni;
3805 	const struct iwm_rate *rinfo;
3806 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3807 	int ridx, rate_flags;
3808 	int nrates = ni->ni_rates.rs_nrates;
3809 
3810 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3811 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3812 
3813 	if (type != IEEE80211_FC0_TYPE_DATA) {
3814 		/* for non-data, use the lowest supported rate */
3815 		ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3816 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
3817 	} else if (ic->ic_fixed_rate != -1) {
3818 		ridx = sc->sc_fixed_ridx;
3819 	} else {
3820 		/* for data frames, use RS table */
3821 		tx->initial_rate_index = (nrates - 1) - ni->ni_txrate;
3822 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3823 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
3824 		ridx = in->in_ridx[ni->ni_txrate];
3825 		return &iwm_rates[ridx];
3826 	}
3827 
3828 	rinfo = &iwm_rates[ridx];
3829 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3830 	if (IWM_RIDX_IS_CCK(ridx))
3831 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3832 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3833 
3834 	return rinfo;
3835 }
3836 
3837 #define TB0_SIZE 16
3838 static int
3839 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3840 {
3841 	struct ieee80211com *ic = &sc->sc_ic;
3842 	struct iwm_node *in = (void *)ni;
3843 	struct iwm_tx_ring *ring;
3844 	struct iwm_tx_data *data;
3845 	struct iwm_tfd *desc;
3846 	struct iwm_device_cmd *cmd;
3847 	struct iwm_tx_cmd *tx;
3848 	struct ieee80211_frame *wh;
3849 	struct ieee80211_key *k = NULL;
3850 	struct mbuf *m1;
3851 	const struct iwm_rate *rinfo;
3852 	uint32_t flags;
3853 	u_int hdrlen;
3854 	bus_dma_segment_t *seg;
3855 	uint8_t tid, type;
3856 	int i, totlen, error, pad;
3857 	int hdrlen2;
3858 
3859 	wh = mtod(m, struct ieee80211_frame *);
3860 	hdrlen = ieee80211_anyhdrsize(wh);
3861 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3862 
3863 	hdrlen2 = (ieee80211_has_qos(wh)) ?
3864 	    sizeof (struct ieee80211_qosframe) :
3865 	    sizeof (struct ieee80211_frame);
3866 
3867 	if (hdrlen != hdrlen2)
3868 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
3869 		    DEVNAME(sc), hdrlen, hdrlen2));
3870 
3871 	tid = 0;
3872 
3873 	ring = &sc->txq[ac];
3874 	desc = &ring->desc[ring->cur];
3875 	memset(desc, 0, sizeof(*desc));
3876 	data = &ring->data[ring->cur];
3877 
3878 	/* Fill out iwm_tx_cmd to send to the firmware */
3879 	cmd = &ring->cmd[ring->cur];
3880 	cmd->hdr.code = IWM_TX_CMD;
3881 	cmd->hdr.flags = 0;
3882 	cmd->hdr.qid = ring->qid;
3883 	cmd->hdr.idx = ring->cur;
3884 
3885 	tx = (void *)cmd->data;
3886 	memset(tx, 0, sizeof(*tx));
3887 
3888 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3889 
3890 	if (sc->sc_drvbpf != NULL) {
3891 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3892 
3893 		tap->wt_flags = 0;
3894 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3895 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3896 		tap->wt_rate = rinfo->rate;
3897 		tap->wt_hwqueue = ac;
3898 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
3899 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3900 
3901 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
3902 	}
3903 
3904 	/* Encrypt the frame if need be. */
3905 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3906 		k = ieee80211_crypto_encap(ic, ni, m);
3907 		if (k == NULL) {
3908 			m_freem(m);
3909 			return ENOBUFS;
3910 		}
3911 		/* Packet header may have moved, reset our local pointer. */
3912 		wh = mtod(m, struct ieee80211_frame *);
3913 	}
3914 	totlen = m->m_pkthdr.len;
3915 
3916 	flags = 0;
3917 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3918 		flags |= IWM_TX_CMD_FLG_ACK;
3919 	}
3920 
3921 	if (type != IEEE80211_FC0_TYPE_DATA
3922 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
3923 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3924 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3925 	}
3926 
3927 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3928 	    type != IEEE80211_FC0_TYPE_DATA)
3929 		tx->sta_id = sc->sc_aux_sta.sta_id;
3930 	else
3931 		tx->sta_id = IWM_STATION_ID;
3932 
3933 	if (type == IEEE80211_FC0_TYPE_MGT) {
3934 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3935 
3936 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3937 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3938 			tx->pm_frame_timeout = htole16(3);
3939 		else
3940 			tx->pm_frame_timeout = htole16(2);
3941 	} else {
3942 		tx->pm_frame_timeout = htole16(0);
3943 	}
3944 
3945 	if (hdrlen & 3) {
3946 		/* First segment length must be a multiple of 4. */
3947 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3948 		pad = 4 - (hdrlen & 3);
3949 	} else
3950 		pad = 0;
3951 
3952 	tx->driver_txop = 0;
3953 	tx->next_frame_len = 0;
3954 
3955 	tx->len = htole16(totlen);
3956 	tx->tid_tspec = tid;
3957 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3958 
3959 	/* Set physical address of "scratch area". */
3960 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3961 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3962 
3963 	/* Copy 802.11 header in TX command. */
3964 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3965 
3966 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3967 
3968 	tx->sec_ctl = 0;
3969 	tx->tx_flags |= htole32(flags);
3970 
3971 	/* Trim 802.11 header. */
3972 	m_adj(m, hdrlen);
3973 
3974 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3975 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3976 	if (error != 0) {
3977 		if (error != EFBIG) {
3978 			aprint_error_dev(sc->sc_dev,
3979 			    "can't map mbuf (error %d)\n", error);
3980 			m_freem(m);
3981 			return error;
3982 		}
3983 		/* Too many DMA segments, linearize mbuf. */
3984 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
3985 		if (m1 == NULL) {
3986 			m_freem(m);
3987 			return ENOBUFS;
3988 		}
3989 		if (m->m_pkthdr.len > MHLEN) {
3990 			MCLGET(m1, M_DONTWAIT);
3991 			if (!(m1->m_flags & M_EXT)) {
3992 				m_freem(m);
3993 				m_freem(m1);
3994 				return ENOBUFS;
3995 			}
3996 		}
3997 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
3998 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
3999 		m_freem(m);
4000 		m = m1;
4001 
4002 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4003 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4004 		if (error != 0) {
4005 			aprint_error_dev(sc->sc_dev,
4006 			    "can't map mbuf (error %d)\n", error);
4007 			m_freem(m);
4008 			return error;
4009 		}
4010 	}
4011 	data->m = m;
4012 	data->in = in;
4013 	data->done = 0;
4014 
4015 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4016 	KASSERT(data->in != NULL);
4017 
4018 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4019 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4020 
4021 	/* Fill TX descriptor. */
4022 	desc->num_tbs = 2 + data->map->dm_nsegs;
4023 
4024 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4025 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4026 	    (TB0_SIZE << 4);
4027 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4028 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4029 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4030 	      + hdrlen + pad - TB0_SIZE) << 4);
4031 
4032 	/* Other DMA segments are for data payload. */
4033 	seg = data->map->dm_segs;
4034 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4035 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4036 		desc->tbs[i+2].hi_n_len = \
4037 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4038 		    | ((seg->ds_len) << 4);
4039 	}
4040 
4041 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4042 	    BUS_DMASYNC_PREWRITE);
4043 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4044 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4045 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4046 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4047 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4048 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4049 
4050 #if 0
4051 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4052 #endif
4053 
4054 	/* Kick TX ring. */
4055 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4056 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4057 
4058 	/* Mark TX ring as full if we reach a certain threshold. */
4059 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4060 		sc->qfullmsk |= 1 << ring->qid;
4061 	}
4062 
4063 	return 0;
4064 }
4065 
4066 #if 0
4067 /* not necessary? */
4068 static int
4069 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4070 {
4071 	struct iwm_tx_path_flush_cmd flush_cmd = {
4072 		.queues_ctl = htole32(tfd_msk),
4073 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4074 	};
4075 	int ret;
4076 
4077 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
4078 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
4079 	    sizeof(flush_cmd), &flush_cmd);
4080 	if (ret)
4081 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4082 		    ret);
4083 	return ret;
4084 }
4085 #endif
4086 
4087 
4088 /*
4089  * BEGIN mvm/power.c
4090  */
4091 
4092 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4093 
4094 static int
4095 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
4096 	struct iwm_beacon_filter_cmd *cmd)
4097 {
4098 	int ret;
4099 
4100 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4101 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
4102 
4103 	if (!ret) {
4104 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
4105 		    le32toh(cmd->ba_enable_beacon_abort)));
4106 		DPRINTF(("ba_escape_timer is: %d\n",
4107 		    le32toh(cmd->ba_escape_timer)));
4108 		DPRINTF(("bf_debug_flag is: %d\n",
4109 		    le32toh(cmd->bf_debug_flag)));
4110 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
4111 		    le32toh(cmd->bf_enable_beacon_filter)));
4112 		DPRINTF(("bf_energy_delta is: %d\n",
4113 		    le32toh(cmd->bf_energy_delta)));
4114 		DPRINTF(("bf_escape_timer is: %d\n",
4115 		    le32toh(cmd->bf_escape_timer)));
4116 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
4117 		    le32toh(cmd->bf_roaming_energy_delta)));
4118 		DPRINTF(("bf_roaming_state is: %d\n",
4119 		    le32toh(cmd->bf_roaming_state)));
4120 		DPRINTF(("bf_temp_threshold is: %d\n",
4121 		    le32toh(cmd->bf_temp_threshold)));
4122 		DPRINTF(("bf_temp_fast_filter is: %d\n",
4123 		    le32toh(cmd->bf_temp_fast_filter)));
4124 		DPRINTF(("bf_temp_slow_filter is: %d\n",
4125 		    le32toh(cmd->bf_temp_slow_filter)));
4126 	}
4127 	return ret;
4128 }
4129 
4130 static void
4131 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
4132 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
4133 {
4134 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4135 }
4136 
4137 static int
4138 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
4139 	int enable)
4140 {
4141 	struct iwm_beacon_filter_cmd cmd = {
4142 		IWM_BF_CMD_CONFIG_DEFAULTS,
4143 		.bf_enable_beacon_filter = htole32(1),
4144 		.ba_enable_beacon_abort = htole32(enable),
4145 	};
4146 
4147 	if (!sc->sc_bf.bf_enabled)
4148 		return 0;
4149 
4150 	sc->sc_bf.ba_enabled = enable;
4151 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4152 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4153 }
4154 
4155 static void
4156 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
4157 {
4158 	DPRINTF(("Sending power table command on mac id 0x%X for "
4159 	    "power level %d, flags = 0x%X\n",
4160 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
4161 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
4162 
4163 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
4164 		DPRINTF(("Disable power management\n"));
4165 		return;
4166 	}
4167 	KASSERT(0);
4168 
4169 #if 0
4170 	DPRINTF(mvm, "Rx timeout = %u usec\n",
4171 			le32_to_cpu(cmd->rx_data_timeout));
4172 	DPRINTF(mvm, "Tx timeout = %u usec\n",
4173 			le32_to_cpu(cmd->tx_data_timeout));
4174 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
4175 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
4176 				cmd->skip_dtim_periods);
4177 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
4178 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
4179 				cmd->lprx_rssi_threshold);
4180 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
4181 		DPRINTF(mvm, "uAPSD enabled\n");
4182 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
4183 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
4184 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
4185 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
4186 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
4187 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
4188 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
4189 	}
4190 #endif
4191 }
4192 
4193 static void
4194 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4195 	struct iwm_mac_power_cmd *cmd)
4196 {
4197 	struct ieee80211com *ic = &sc->sc_ic;
4198 	struct ieee80211_node *ni = &in->in_ni;
4199 	int dtimper, dtimper_msec;
4200 	int keep_alive;
4201 
4202 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4203 	    in->in_color));
4204 	dtimper = ic->ic_dtim_period ?: 1;
4205 
4206 	/*
4207 	 * Regardless of power management state the driver must set
4208 	 * keep alive period. FW will use it for sending keep alive NDPs
4209 	 * immediately after association. Check that keep alive period
4210 	 * is at least 3 * DTIM
4211 	 */
4212 	dtimper_msec = dtimper * ni->ni_intval;
4213 	keep_alive
4214 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4215 	keep_alive = roundup(keep_alive, 1000) / 1000;
4216 	cmd->keep_alive_seconds = htole16(keep_alive);
4217 }
4218 
4219 static int
4220 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4221 {
4222 	int ret;
4223 	int ba_enable;
4224 	struct iwm_mac_power_cmd cmd;
4225 
4226 	memset(&cmd, 0, sizeof(cmd));
4227 
4228 	iwm_mvm_power_build_cmd(sc, in, &cmd);
4229 	iwm_mvm_power_log(sc, &cmd);
4230 
4231 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
4232 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
4233 		return ret;
4234 
4235 	ba_enable = !!(cmd.flags &
4236 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4237 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
4238 }
4239 
4240 static int
4241 iwm_mvm_power_update_device(struct iwm_softc *sc)
4242 {
4243 	struct iwm_device_power_cmd cmd = {
4244 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4245 	};
4246 
4247 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4248 		return 0;
4249 
4250 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4251 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
4252 
4253 	return iwm_mvm_send_cmd_pdu(sc,
4254 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4255 }
4256 
4257 static int
4258 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4259 {
4260 	struct iwm_beacon_filter_cmd cmd = {
4261 		IWM_BF_CMD_CONFIG_DEFAULTS,
4262 		.bf_enable_beacon_filter = htole32(1),
4263 	};
4264 	int ret;
4265 
4266 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4267 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4268 
4269 	if (ret == 0)
4270 		sc->sc_bf.bf_enabled = 1;
4271 
4272 	return ret;
4273 }
4274 
4275 static int
4276 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4277 {
4278 	struct iwm_beacon_filter_cmd cmd;
4279 	int ret;
4280 
4281 	memset(&cmd, 0, sizeof(cmd));
4282 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4283 		return 0;
4284 
4285 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4286 	if (ret == 0)
4287 		sc->sc_bf.bf_enabled = 0;
4288 
4289 	return ret;
4290 }
4291 
4292 #if 0
4293 static int
4294 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4295 {
4296 	if (!sc->sc_bf.bf_enabled)
4297 		return 0;
4298 
4299 	return iwm_mvm_enable_beacon_filter(sc, in);
4300 }
4301 #endif
4302 
4303 /*
4304  * END mvm/power.c
4305  */
4306 
4307 /*
4308  * BEGIN mvm/sta.c
4309  */
4310 
4311 static void
4312 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
4313 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
4314 {
4315 	memset(cmd_v5, 0, sizeof(*cmd_v5));
4316 
4317 	cmd_v5->add_modify = cmd_v6->add_modify;
4318 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
4319 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
4320 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
4321 	cmd_v5->sta_id = cmd_v6->sta_id;
4322 	cmd_v5->modify_mask = cmd_v6->modify_mask;
4323 	cmd_v5->station_flags = cmd_v6->station_flags;
4324 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
4325 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
4326 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
4327 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
4328 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
4329 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
4330 	cmd_v5->assoc_id = cmd_v6->assoc_id;
4331 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
4332 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
4333 }
4334 
4335 static int
4336 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4337 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
4338 {
4339 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
4340 
4341 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
4342 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
4343 		    sizeof(*cmd), cmd, status);
4344 	}
4345 
4346 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
4347 
4348 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
4349 	    &cmd_v5, status);
4350 }
4351 
4352 /* send station add/update command to firmware */
4353 static int
4354 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4355 {
4356 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
4357 	int ret;
4358 	uint32_t status;
4359 
4360 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4361 
4362 	add_sta_cmd.sta_id = IWM_STATION_ID;
4363 	add_sta_cmd.mac_id_n_color
4364 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4365 	if (!update) {
4366 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
4367 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4368 	}
4369 	add_sta_cmd.add_modify = update ? 1 : 0;
4370 	add_sta_cmd.station_flags_msk
4371 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4372 
4373 	status = IWM_ADD_STA_SUCCESS;
4374 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4375 	if (ret)
4376 		return ret;
4377 
4378 	switch (status) {
4379 	case IWM_ADD_STA_SUCCESS:
4380 		break;
4381 	default:
4382 		ret = EIO;
4383 		DPRINTF(("IWM_ADD_STA failed\n"));
4384 		break;
4385 	}
4386 
4387 	return ret;
4388 }
4389 
4390 static int
4391 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4392 {
4393 	int ret;
4394 
4395 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
4396 	if (ret)
4397 		return ret;
4398 
4399 	return 0;
4400 }
4401 
4402 static int
4403 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4404 {
4405 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
4406 }
4407 
4408 static int
4409 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4410 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
4411 {
4412 	struct iwm_mvm_add_sta_cmd_v6 cmd;
4413 	int ret;
4414 	uint32_t status;
4415 
4416 	memset(&cmd, 0, sizeof(cmd));
4417 	cmd.sta_id = sta->sta_id;
4418 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4419 
4420 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4421 
4422 	if (addr)
4423 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
4424 
4425 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4426 	if (ret)
4427 		return ret;
4428 
4429 	switch (status) {
4430 	case IWM_ADD_STA_SUCCESS:
4431 		DPRINTF(("Internal station added.\n"));
4432 		return 0;
4433 	default:
4434 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
4435 		    DEVNAME(sc), status));
4436 		ret = EIO;
4437 		break;
4438 	}
4439 	return ret;
4440 }
4441 
4442 static int
4443 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4444 {
4445 	int ret;
4446 
4447 	sc->sc_aux_sta.sta_id = 3;
4448 	sc->sc_aux_sta.tfd_queue_msk = 0;
4449 
4450 	ret = iwm_mvm_add_int_sta_common(sc,
4451 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4452 
4453 	if (ret)
4454 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4455 	return ret;
4456 }
4457 
4458 /*
4459  * END mvm/sta.c
4460  */
4461 
4462 /*
4463  * BEGIN mvm/scan.c
4464  */
4465 
4466 #define IWM_PLCP_QUIET_THRESH 1
4467 #define IWM_ACTIVE_QUIET_TIME 10
4468 #define LONG_OUT_TIME_PERIOD 600
4469 #define SHORT_OUT_TIME_PERIOD 200
4470 #define SUSPEND_TIME_PERIOD 100
4471 
4472 static uint16_t
4473 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
4474 {
4475 	uint16_t rx_chain;
4476 	uint8_t rx_ant;
4477 
4478 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
4479 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4480 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4481 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4482 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4483 	return htole16(rx_chain);
4484 }
4485 
4486 #define ieee80211_tu_to_usec(a) (1024*(a))
4487 
4488 static uint32_t
4489 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4490 {
4491 	if (!is_assoc)
4492 		return 0;
4493 	if (flags & 0x1)
4494 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4495 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4496 }
4497 
4498 static uint32_t
4499 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4500 {
4501 	if (!is_assoc)
4502 		return 0;
4503 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4504 }
4505 
4506 static uint32_t
4507 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
4508 {
4509 	if (flags & IEEE80211_CHAN_2GHZ)
4510 		return htole32(IWM_PHY_BAND_24);
4511 	else
4512 		return htole32(IWM_PHY_BAND_5);
4513 }
4514 
4515 static uint32_t
4516 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4517 {
4518 	uint32_t tx_ant;
4519 	int i, ind;
4520 
4521 	for (i = 0, ind = sc->sc_scan_last_antenna;
4522 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4523 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4524 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
4525 			sc->sc_scan_last_antenna = ind;
4526 			break;
4527 		}
4528 	}
4529 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4530 
4531 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4532 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4533 				   tx_ant);
4534 	else
4535 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
4536 }
4537 
4538 /*
4539  * If req->n_ssids > 0, it means we should do an active scan.
4540  * In case of active scan w/o directed scan, we receive a zero-length SSID
4541  * just to notify that this scan is active and not passive.
4542  * In order to notify the FW of the number of SSIDs we wish to scan (including
4543  * the zero-length one), we need to set the corresponding bits in chan->type,
4544  * one for each SSID, and set the active bit (first). If the first SSID is
4545  * already included in the probe template, so we need to set only
4546  * req->n_ssids - 1 bits in addition to the first bit.
4547  */
4548 static uint16_t
4549 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4550 {
4551 	if (flags & IEEE80211_CHAN_2GHZ)
4552 		return 30  + 3 * (n_ssids + 1);
4553 	return 20  + 2 * (n_ssids + 1);
4554 }
4555 
4556 static uint16_t
4557 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
4558 {
4559 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4560 }
4561 
4562 static int
4563 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
4564 	int flags, int n_ssids, int basic_ssid)
4565 {
4566 	struct ieee80211com *ic = &sc->sc_ic;
4567 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
4568 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
4569 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
4570 		(cmd->data + le16toh(cmd->tx_cmd.len));
4571 	int type = (1 << n_ssids) - 1;
4572 	struct ieee80211_channel *c;
4573 	int nchan;
4574 
4575 	if (!basic_ssid)
4576 		type |= (1 << n_ssids);
4577 
4578 	for (nchan = 0, c = &ic->ic_channels[1];
4579 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
4580 	    c++) {
4581 		if ((c->ic_flags & flags) != flags)
4582 			continue;
4583 
4584 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
4585 		chan->type = htole32(type);
4586 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4587 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4588 		chan->active_dwell = htole16(active_dwell);
4589 		chan->passive_dwell = htole16(passive_dwell);
4590 		chan->iteration_count = htole16(1);
4591 		chan++;
4592 		nchan++;
4593 	}
4594 	if (nchan == 0)
4595 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
4596 	return nchan;
4597 }
4598 
4599 /*
4600  * Fill in probe request with the following parameters:
4601  * TA is our vif HW address, which mac80211 ensures we have.
4602  * Packet is broadcasted, so this is both SA and DA.
4603  * The probe request IE is made out of two: first comes the most prioritized
4604  * SSID if a directed scan is requested. Second comes whatever extra
4605  * information was given to us as the scan request IE.
4606  */
4607 static uint16_t
4608 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
4609 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
4610 	const uint8_t *ie, int ie_len, int left)
4611 {
4612 	int len = 0;
4613 	uint8_t *pos = NULL;
4614 
4615 	/* Make sure there is enough space for the probe request,
4616 	 * two mandatory IEs and the data */
4617 	left -= sizeof(*frame);
4618 	if (left < 0)
4619 		return 0;
4620 
4621 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4622 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4623 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4624 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
4625 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
4626 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
4627 
4628 	len += sizeof(*frame);
4629 	CTASSERT(sizeof(*frame) == 24);
4630 
4631 	/* for passive scans, no need to fill anything */
4632 	if (n_ssids == 0)
4633 		return (uint16_t)len;
4634 
4635 	/* points to the payload of the request */
4636 	pos = (uint8_t *)frame + sizeof(*frame);
4637 
4638 	/* fill in our SSID IE */
4639 	left -= ssid_len + 2;
4640 	if (left < 0)
4641 		return 0;
4642 	*pos++ = IEEE80211_ELEMID_SSID;
4643 	*pos++ = ssid_len;
4644 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
4645 		memcpy(pos, ssid, ssid_len);
4646 		pos += ssid_len;
4647 	}
4648 
4649 	len += ssid_len + 2;
4650 
4651 	if (left < ie_len)
4652 		return len;
4653 
4654 	if (ie && ie_len) {
4655 		memcpy(pos, ie, ie_len);
4656 		len += ie_len;
4657 	}
4658 
4659 	return (uint16_t)len;
4660 }
4661 
4662 static int
4663 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
4664 	int n_ssids, uint8_t *ssid, int ssid_len)
4665 {
4666 	struct ieee80211com *ic = &sc->sc_ic;
4667 	struct iwm_host_cmd hcmd = {
4668 		.id = IWM_SCAN_REQUEST_CMD,
4669 		.len = { 0, },
4670 		.data = { sc->sc_scan_cmd, },
4671 		.flags = IWM_CMD_SYNC,
4672 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
4673 	};
4674 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
4675 	int is_assoc = 0;
4676 	int ret;
4677 	uint32_t status;
4678 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
4679 
4680 	//lockdep_assert_held(&mvm->mutex);
4681 
4682 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
4683 
4684 	DPRINTF(("Handling ieee80211 scan request\n"));
4685 	memset(cmd, 0, sc->sc_scan_cmd_len);
4686 
4687 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
4688 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
4689 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
4690 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
4691 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
4692 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
4693 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
4694 	    IWM_MAC_FILTER_IN_BEACON);
4695 
4696 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
4697 	cmd->repeats = htole32(1);
4698 
4699 	/*
4700 	 * If the user asked for passive scan, don't change to active scan if
4701 	 * you see any activity on the channel - remain passive.
4702 	 */
4703 	if (n_ssids > 0) {
4704 		cmd->passive2active = htole16(1);
4705 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4706 #if 0
4707 		if (basic_ssid) {
4708 			ssid = req->ssids[0].ssid;
4709 			ssid_len = req->ssids[0].ssid_len;
4710 		}
4711 #endif
4712 	} else {
4713 		cmd->passive2active = 0;
4714 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4715 	}
4716 
4717 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4718 	    IWM_TX_CMD_FLG_BT_DIS);
4719 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
4720 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4721 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
4722 
4723 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
4724 			    (struct ieee80211_frame *)cmd->data,
4725 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
4726 			    NULL, 0, sc->sc_capa_max_probe_len));
4727 
4728 	cmd->channel_count
4729 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
4730 
4731 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
4732 		le16toh(cmd->tx_cmd.len) +
4733 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
4734 	hcmd.len[0] = le16toh(cmd->len);
4735 
4736 	status = IWM_SCAN_RESPONSE_OK;
4737 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
4738 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
4739 		DPRINTF(("Scan request was sent successfully\n"));
4740 	} else {
4741 		/*
4742 		 * If the scan failed, it usually means that the FW was unable
4743 		 * to allocate the time events. Warn on it, but maybe we
4744 		 * should try to send the command again with different params.
4745 		 */
4746 		sc->sc_scanband = 0;
4747 		ret = EIO;
4748 	}
4749 	return ret;
4750 }
4751 
4752 /*
4753  * END mvm/scan.c
4754  */
4755 
4756 /*
4757  * BEGIN mvm/mac-ctxt.c
4758  */
4759 
4760 static void
4761 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
4762 	int *cck_rates, int *ofdm_rates)
4763 {
4764 	struct ieee80211_node *ni = &in->in_ni;
4765 	int lowest_present_ofdm = 100;
4766 	int lowest_present_cck = 100;
4767 	uint8_t cck = 0;
4768 	uint8_t ofdm = 0;
4769 	int i;
4770 
4771 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4772 		for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4773 			cck |= (1 << i);
4774 			if (lowest_present_cck > i)
4775 				lowest_present_cck = i;
4776 		}
4777 	}
4778 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4779 		int adj = i - IWM_FIRST_OFDM_RATE;
4780 		ofdm |= (1 << adj);
4781 		if (lowest_present_ofdm > i)
4782 			lowest_present_ofdm = i;
4783 	}
4784 
4785 	/*
4786 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
4787 	 * variables. This isn't sufficient though, as there might not
4788 	 * be all the right rates in the bitmap. E.g. if the only basic
4789 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4790 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4791 	 *
4792 	 *    [...] a STA responding to a received frame shall transmit
4793 	 *    its Control Response frame [...] at the highest rate in the
4794 	 *    BSSBasicRateSet parameter that is less than or equal to the
4795 	 *    rate of the immediately previous frame in the frame exchange
4796 	 *    sequence ([...]) and that is of the same modulation class
4797 	 *    ([...]) as the received frame. If no rate contained in the
4798 	 *    BSSBasicRateSet parameter meets these conditions, then the
4799 	 *    control frame sent in response to a received frame shall be
4800 	 *    transmitted at the highest mandatory rate of the PHY that is
4801 	 *    less than or equal to the rate of the received frame, and
4802 	 *    that is of the same modulation class as the received frame.
4803 	 *
4804 	 * As a consequence, we need to add all mandatory rates that are
4805 	 * lower than all of the basic rates to these bitmaps.
4806 	 */
4807 
4808 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4809 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4810 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4811 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4812 	/* 6M already there or needed so always add */
4813 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4814 
4815 	/*
4816 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4817 	 * Note, however:
4818 	 *  - if no CCK rates are basic, it must be ERP since there must
4819 	 *    be some basic rates at all, so they're OFDM => ERP PHY
4820 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
4821 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4822 	 *  - if 5.5M is basic, 1M and 2M are mandatory
4823 	 *  - if 2M is basic, 1M is mandatory
4824 	 *  - if 1M is basic, that's the only valid ACK rate.
4825 	 * As a consequence, it's not as complicated as it sounds, just add
4826 	 * any lower rates to the ACK rate bitmap.
4827 	 */
4828 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
4829 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4830 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
4831 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4832 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
4833 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4834 	/* 1M already there or needed so always add */
4835 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4836 
4837 	*cck_rates = cck;
4838 	*ofdm_rates = ofdm;
4839 }
4840 
4841 static void
4842 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4843 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4844 {
4845 	struct ieee80211com *ic = &sc->sc_ic;
4846 	struct ieee80211_node *ni = ic->ic_bss;
4847 	int cck_ack_rates, ofdm_ack_rates;
4848 	int i;
4849 
4850 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4851 	    in->in_color));
4852 	cmd->action = htole32(action);
4853 
4854 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4855 	cmd->tsf_id = htole32(in->in_tsfid);
4856 
4857 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4858 	if (in->in_assoc) {
4859 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4860 	} else {
4861 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
4862 	}
4863 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4864 	cmd->cck_rates = htole32(cck_ack_rates);
4865 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
4866 
4867 	cmd->cck_short_preamble
4868 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4869 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
4870 	cmd->short_slot
4871 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
4872 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
4873 
4874 	for (i = 0; i < IWM_AC_NUM+1; i++) {
4875 		int txf = i;
4876 
4877 		cmd->ac[txf].cw_min = htole16(0x0f);
4878 		cmd->ac[txf].cw_max = htole16(0x3f);
4879 		cmd->ac[txf].aifsn = 1;
4880 		cmd->ac[txf].fifos_mask = (1 << txf);
4881 		cmd->ac[txf].edca_txop = 0;
4882 	}
4883 
4884 	if (ic->ic_flags & IEEE80211_F_USEPROT)
4885 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
4886 
4887 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
4888 }
4889 
4890 static int
4891 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
4892 {
4893 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
4894 				       sizeof(*cmd), cmd);
4895 	if (ret)
4896 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
4897 		    DEVNAME(sc), le32toh(cmd->action), ret));
4898 	return ret;
4899 }
4900 
4901 /*
4902  * Fill the specific data for mac context of type station or p2p client
4903  */
4904 static void
4905 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
4906 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
4907 {
4908 	struct ieee80211_node *ni = &in->in_ni;
4909 	unsigned dtim_period, dtim_count;
4910 
4911 	dtim_period = ni->ni_dtim_period;
4912 	dtim_count = ni->ni_dtim_count;
4913 
4914 	/* We need the dtim_period to set the MAC as associated */
4915 	if (in->in_assoc && dtim_period && !force_assoc_off) {
4916 		uint64_t tsf;
4917 		uint32_t dtim_offs;
4918 
4919 		/*
4920 		 * The DTIM count counts down, so when it is N that means N
4921 		 * more beacon intervals happen until the DTIM TBTT. Therefore
4922 		 * add this to the current time. If that ends up being in the
4923 		 * future, the firmware will handle it.
4924 		 *
4925 		 * Also note that the system_timestamp (which we get here as
4926 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
4927 		 * same offset in the frame -- the TSF is at the first symbol
4928 		 * of the TSF, the system timestamp is at signal acquisition
4929 		 * time. This means there's an offset between them of at most
4930 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
4931 		 * 384us in the longest case), this is currently not relevant
4932 		 * as the firmware wakes up around 2ms before the TBTT.
4933 		 */
4934 		dtim_offs = dtim_count * ni->ni_intval;
4935 		/* convert TU to usecs */
4936 		dtim_offs *= 1024;
4937 
4938 		tsf = ni->ni_tstamp.tsf;
4939 
4940 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
4941 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
4942 
4943 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
4944 		    (long long)le64toh(ctxt_sta->dtim_tsf),
4945 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
4946 
4947 		ctxt_sta->is_assoc = htole32(1);
4948 	} else {
4949 		ctxt_sta->is_assoc = htole32(0);
4950 	}
4951 
4952 	ctxt_sta->bi = htole32(ni->ni_intval);
4953 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
4954 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
4955 	ctxt_sta->dtim_reciprocal =
4956 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
4957 
4958 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
4959 	ctxt_sta->listen_interval = htole32(10);
4960 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
4961 }
4962 
4963 static int
4964 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
4965 	uint32_t action)
4966 {
4967 	struct iwm_mac_ctx_cmd cmd;
4968 
4969 	memset(&cmd, 0, sizeof(cmd));
4970 
4971 	/* Fill the common data for all mac context types */
4972 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
4973 
4974 	/* Allow beacons to pass through as long as we are not associated,or we
4975 	 * do not have dtim period information */
4976 	if (!in->in_assoc || !sc->sc_ic.ic_dtim_period)
4977 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
4978 	else
4979 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
4980 
4981 	/* Fill the data specific for station mode */
4982 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
4983 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
4984 
4985 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
4986 }
4987 
4988 static int
4989 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4990 {
4991 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
4992 }
4993 
4994 static int
4995 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
4996 {
4997 	int ret;
4998 
4999 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
5000 	if (ret)
5001 		return ret;
5002 
5003 	return 0;
5004 }
5005 
5006 static int
5007 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
5008 {
5009 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
5010 }
5011 
5012 #if 0
5013 static int
5014 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
5015 {
5016 	struct iwm_mac_ctx_cmd cmd;
5017 	int ret;
5018 
5019 	if (!in->in_uploaded) {
5020 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
5021 		return EIO;
5022 	}
5023 
5024 	memset(&cmd, 0, sizeof(cmd));
5025 
5026 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5027 	    in->in_color));
5028 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
5029 
5030 	ret = iwm_mvm_send_cmd_pdu(sc,
5031 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
5032 	if (ret) {
5033 		aprint_error_dev(sc->sc_dev,
5034 		    "Failed to remove MAC context: %d\n", ret);
5035 		return ret;
5036 	}
5037 	in->in_uploaded = 0;
5038 
5039 	return 0;
5040 }
5041 #endif
5042 
5043 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
5044 
5045 static void
5046 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
5047 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5048 {
5049 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5050 
5051 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5052 	    le32toh(mb->mac_id),
5053 	    le32toh(mb->consec_missed_beacons),
5054 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5055 	    le32toh(mb->num_recvd_beacons),
5056 	    le32toh(mb->num_expected_beacons)));
5057 
5058 	/*
5059 	 * TODO: the threshold should be adjusted based on latency conditions,
5060 	 * and/or in case of a CS flow on one of the other AP vifs.
5061 	 */
5062 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5063 	    IWM_MVM_MISSED_BEACONS_THRESHOLD)
5064 		ieee80211_beacon_miss(&sc->sc_ic);
5065 }
5066 
5067 /*
5068  * END mvm/mac-ctxt.c
5069  */
5070 
5071 /*
5072  * BEGIN mvm/quota.c
5073  */
5074 
5075 static int
5076 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5077 {
5078 	struct iwm_time_quota_cmd cmd;
5079 	int i, idx, ret, num_active_macs, quota, quota_rem;
5080 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5081 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5082 	uint16_t id;
5083 
5084 	memset(&cmd, 0, sizeof(cmd));
5085 
5086 	/* currently, PHY ID == binding ID */
5087 	if (in) {
5088 		id = in->in_phyctxt->id;
5089 		KASSERT(id < IWM_MAX_BINDINGS);
5090 		colors[id] = in->in_phyctxt->color;
5091 
5092 		if (1)
5093 			n_ifs[id] = 1;
5094 	}
5095 
5096 	/*
5097 	 * The FW's scheduling session consists of
5098 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
5099 	 * equally between all the bindings that require quota
5100 	 */
5101 	num_active_macs = 0;
5102 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5103 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5104 		num_active_macs += n_ifs[i];
5105 	}
5106 
5107 	quota = 0;
5108 	quota_rem = 0;
5109 	if (num_active_macs) {
5110 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
5111 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
5112 	}
5113 
5114 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5115 		if (colors[i] < 0)
5116 			continue;
5117 
5118 		cmd.quotas[idx].id_and_color =
5119 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5120 
5121 		if (n_ifs[i] <= 0) {
5122 			cmd.quotas[idx].quota = htole32(0);
5123 			cmd.quotas[idx].max_duration = htole32(0);
5124 		} else {
5125 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5126 			cmd.quotas[idx].max_duration = htole32(0);
5127 		}
5128 		idx++;
5129 	}
5130 
5131 	/* Give the remainder of the session to the first binding */
5132 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5133 
5134 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
5135 	    sizeof(cmd), &cmd);
5136 	if (ret)
5137 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
5138 	return ret;
5139 }
5140 
5141 /*
5142  * END mvm/quota.c
5143  */
5144 
5145 /*
5146  * aieee80211 routines
5147  */
5148 
5149 /*
5150  * Change to AUTH state in 80211 state machine.  Roughly matches what
5151  * Linux does in bss_info_changed().
5152  */
5153 static int
5154 iwm_auth(struct iwm_softc *sc)
5155 {
5156 	struct ieee80211com *ic = &sc->sc_ic;
5157 	struct iwm_node *in = (void *)ic->ic_bss;
5158 	uint32_t duration;
5159 	uint32_t min_duration;
5160 	int error;
5161 
5162 	in->in_assoc = 0;
5163 
5164 	if ((error = iwm_allow_mcast(sc)) != 0)
5165 		return error;
5166 
5167 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
5168 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5169 		return error;
5170 	}
5171 
5172 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
5173 	    in->in_ni.ni_chan, 1, 1)) != 0) {
5174 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
5175 		return error;
5176 	}
5177 	in->in_phyctxt = &sc->sc_phyctxt[0];
5178 
5179 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
5180 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
5181 		return error;
5182 	}
5183 
5184 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
5185 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5186 		return error;
5187 	}
5188 
5189 	/* a bit superfluous? */
5190 	while (sc->sc_auth_prot)
5191 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
5192 	sc->sc_auth_prot = 1;
5193 
5194 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
5195 	    200 + in->in_ni.ni_intval);
5196 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
5197 	    100 + in->in_ni.ni_intval);
5198 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
5199 
5200 	while (sc->sc_auth_prot != 2) {
5201 		/*
5202 		 * well, meh, but if the kernel is sleeping for half a
5203 		 * second, we have bigger problems
5204 		 */
5205 		if (sc->sc_auth_prot == 0) {
5206 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
5207 			return ETIMEDOUT;
5208 		} else if (sc->sc_auth_prot == -1) {
5209 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
5210 			sc->sc_auth_prot = 0;
5211 			return EAUTH;
5212 		}
5213 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
5214 	}
5215 
5216 	return 0;
5217 }
5218 
5219 static int
5220 iwm_assoc(struct iwm_softc *sc)
5221 {
5222 	struct ieee80211com *ic = &sc->sc_ic;
5223 	struct iwm_node *in = (void *)ic->ic_bss;
5224 	int error;
5225 
5226 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
5227 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
5228 		return error;
5229 	}
5230 
5231 	in->in_assoc = 1;
5232 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5233 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
5234 		return error;
5235 	}
5236 
5237 	return 0;
5238 }
5239 
5240 static int
5241 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
5242 {
5243 	/*
5244 	 * Ok, so *technically* the proper set of calls for going
5245 	 * from RUN back to SCAN is:
5246 	 *
5247 	 * iwm_mvm_power_mac_disable(sc, in);
5248 	 * iwm_mvm_mac_ctxt_changed(sc, in);
5249 	 * iwm_mvm_rm_sta(sc, in);
5250 	 * iwm_mvm_update_quotas(sc, NULL);
5251 	 * iwm_mvm_mac_ctxt_changed(sc, in);
5252 	 * iwm_mvm_binding_remove_vif(sc, in);
5253 	 * iwm_mvm_mac_ctxt_remove(sc, in);
5254 	 *
5255 	 * However, that freezes the device not matter which permutations
5256 	 * and modifications are attempted.  Obviously, this driver is missing
5257 	 * something since it works in the Linux driver, but figuring out what
5258 	 * is missing is a little more complicated.  Now, since we're going
5259 	 * back to nothing anyway, we'll just do a complete device reset.
5260 	 * Up your's, device!
5261 	 */
5262 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
5263 	iwm_stop_device(sc);
5264 	iwm_init_hw(sc);
5265 	if (in)
5266 		in->in_assoc = 0;
5267 	return 0;
5268 
5269 #if 0
5270 	int error;
5271 
5272 	iwm_mvm_power_mac_disable(sc, in);
5273 
5274 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5275 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
5276 		    error);
5277 		return error;
5278 	}
5279 
5280 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
5281 		aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
5282 		return error;
5283 	}
5284 	error = iwm_mvm_rm_sta(sc, in);
5285 	in->in_assoc = 0;
5286 	iwm_mvm_update_quotas(sc, NULL);
5287 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5288 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
5289 		    error);
5290 		return error;
5291 	}
5292 	iwm_mvm_binding_remove_vif(sc, in);
5293 
5294 	iwm_mvm_mac_ctxt_remove(sc, in);
5295 
5296 	return error;
5297 #endif
5298 }
5299 
5300 
5301 static struct ieee80211_node *
5302 iwm_node_alloc(struct ieee80211_node_table *nt)
5303 {
5304 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5305 }
5306 
5307 static void
5308 iwm_calib_timeout(void *arg)
5309 {
5310 	struct iwm_softc *sc = arg;
5311 	struct ieee80211com *ic = &sc->sc_ic;
5312 	int s;
5313 
5314 	s = splnet();
5315 	if (ic->ic_fixed_rate == -1
5316 	    && ic->ic_opmode == IEEE80211_M_STA
5317 	    && ic->ic_bss) {
5318 		struct iwm_node *in = (void *)ic->ic_bss;
5319 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5320 	}
5321 	splx(s);
5322 
5323 	callout_schedule(&sc->sc_calib_to, hz/2);
5324 }
5325 
5326 static void
5327 iwm_setrates(struct iwm_node *in)
5328 {
5329 	struct ieee80211_node *ni = &in->in_ni;
5330 	struct ieee80211com *ic = ni->ni_ic;
5331 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5332 	struct iwm_lq_cmd *lq = &in->in_lq;
5333 	int nrates = ni->ni_rates.rs_nrates;
5334 	int i, ridx, tab = 0;
5335 	int txant = 0;
5336 
5337 	if (nrates > __arraycount(lq->rs_table) ||
5338 	    nrates > IEEE80211_RATE_MAXSIZE) {
5339 		DPRINTF(("%s: node supports %d rates, driver handles only "
5340 		    "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
5341 		return;
5342 	}
5343 
5344 	/* first figure out which rates we should support */
5345 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
5346 	for (i = 0; i < nrates; i++) {
5347 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
5348 
5349 		/* Map 802.11 rate to HW rate index. */
5350 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5351 			if (iwm_rates[ridx].rate == rate)
5352 				break;
5353 		if (ridx > IWM_RIDX_MAX)
5354 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
5355 			    DEVNAME(sc), rate));
5356 		else
5357 			in->in_ridx[i] = ridx;
5358 	}
5359 
5360 	/* then construct a lq_cmd based on those */
5361 	memset(lq, 0, sizeof(*lq));
5362 	lq->sta_id = IWM_STATION_ID;
5363 
5364 	/*
5365 	 * are these used? (we don't do SISO or MIMO)
5366 	 * need to set them to non-zero, though, or we get an error.
5367 	 */
5368 	lq->single_stream_ant_msk = 1;
5369 	lq->dual_stream_ant_msk = 1;
5370 
5371 	/*
5372 	 * Build the actual rate selection table.
5373 	 * The lowest bits are the rates.  Additionally,
5374 	 * CCK needs bit 9 to be set.  The rest of the bits
5375 	 * we add to the table select the tx antenna
5376 	 * Note that we add the rates in the highest rate first
5377 	 * (opposite of ni_rates).
5378 	 */
5379 	for (i = 0; i < nrates; i++) {
5380 		int nextant;
5381 
5382 		if (txant == 0)
5383 			txant = IWM_FW_VALID_TX_ANT(sc);
5384 		nextant = 1<<(ffs(txant)-1);
5385 		txant &= ~nextant;
5386 
5387 		ridx = in->in_ridx[(nrates-1)-i];
5388 		tab = iwm_rates[ridx].plcp;
5389 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
5390 		if (IWM_RIDX_IS_CCK(ridx))
5391 			tab |= IWM_RATE_MCS_CCK_MSK;
5392 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
5393 		lq->rs_table[i] = htole32(tab);
5394 	}
5395 	/* then fill the rest with the lowest possible rate */
5396 	for (i = nrates; i < __arraycount(lq->rs_table); i++) {
5397 		KASSERT(tab != 0);
5398 		lq->rs_table[i] = htole32(tab);
5399 	}
5400 
5401 	/* init amrr */
5402 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5403 	/* Start at lowest available bit-rate, AMRR will raise. */
5404 	ni->ni_txrate = 0;
5405 }
5406 
5407 static int
5408 iwm_media_change(struct ifnet *ifp)
5409 {
5410 	struct iwm_softc *sc = ifp->if_softc;
5411 	struct ieee80211com *ic = &sc->sc_ic;
5412 	uint8_t rate, ridx;
5413 	int error;
5414 
5415 	error = ieee80211_media_change(ifp);
5416 	if (error != ENETRESET)
5417 		return error;
5418 
5419 	if (ic->ic_fixed_rate != -1) {
5420 		rate = ic->ic_sup_rates[ic->ic_curmode].
5421 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5422 		/* Map 802.11 rate to HW rate index. */
5423 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5424 			if (iwm_rates[ridx].rate == rate)
5425 				break;
5426 		sc->sc_fixed_ridx = ridx;
5427 	}
5428 
5429 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5430 	    (IFF_UP | IFF_RUNNING)) {
5431 		iwm_stop(ifp, 0);
5432 		error = iwm_init(ifp);
5433 	}
5434 	return error;
5435 }
5436 
5437 static void
5438 iwm_newstate_cb(struct work *wk, void *v)
5439 {
5440 	struct iwm_softc *sc = v;
5441 	struct ieee80211com *ic = &sc->sc_ic;
5442 	struct iwm_newstate_state *iwmns = (void *)wk;
5443 	enum ieee80211_state nstate = iwmns->ns_nstate;
5444 	int generation = iwmns->ns_generation;
5445 	struct iwm_node *in;
5446 	int arg = iwmns->ns_arg;
5447 	int error;
5448 
5449 	kmem_free(iwmns, sizeof(*iwmns));
5450 
5451 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
5452 	if (sc->sc_generation != generation) {
5453 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5454 		if (nstate == IEEE80211_S_INIT) {
5455 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5456 			sc->sc_newstate(ic, nstate, arg);
5457 		}
5458 		return;
5459 	}
5460 
5461 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
5462 
5463 	/* disable beacon filtering if we're hopping out of RUN */
5464 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
5465 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
5466 
5467 		if (((in = (void *)ic->ic_bss) != NULL))
5468 			in->in_assoc = 0;
5469 		iwm_release(sc, NULL);
5470 
5471 		/*
5472 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
5473 		 * above then the card will be completely reinitialized,
5474 		 * so the driver must do everything necessary to bring the card
5475 		 * from INIT to SCAN.
5476 		 *
5477 		 * Additionally, upon receiving deauth frame from AP,
5478 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
5479 		 * state. This will also fail with this driver, so bring the FSM
5480 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
5481 		 */
5482 		if (nstate == IEEE80211_S_SCAN ||
5483 		    nstate == IEEE80211_S_AUTH ||
5484 		    nstate == IEEE80211_S_ASSOC) {
5485 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5486 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
5487 			DPRINTF(("Going INIT->SCAN\n"));
5488 			nstate = IEEE80211_S_SCAN;
5489 		}
5490 	}
5491 
5492 	switch (nstate) {
5493 	case IEEE80211_S_INIT:
5494 		sc->sc_scanband = 0;
5495 		break;
5496 
5497 	case IEEE80211_S_SCAN:
5498 		if (sc->sc_scanband)
5499 			break;
5500 
5501 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
5502 		    ic->ic_des_esslen != 0,
5503 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5504 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5505 			return;
5506 		}
5507 		ic->ic_state = nstate;
5508 		return;
5509 
5510 	case IEEE80211_S_AUTH:
5511 		if ((error = iwm_auth(sc)) != 0) {
5512 			DPRINTF(("%s: could not move to auth state: %d\n",
5513 			    DEVNAME(sc), error));
5514 			return;
5515 		}
5516 
5517 		break;
5518 
5519 	case IEEE80211_S_ASSOC:
5520 		if ((error = iwm_assoc(sc)) != 0) {
5521 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5522 			    error));
5523 			return;
5524 		}
5525 		break;
5526 
5527 	case IEEE80211_S_RUN: {
5528 		struct iwm_host_cmd cmd = {
5529 			.id = IWM_LQ_CMD,
5530 			.len = { sizeof(in->in_lq), },
5531 			.flags = IWM_CMD_SYNC,
5532 		};
5533 
5534 		in = (struct iwm_node *)ic->ic_bss;
5535 		iwm_mvm_power_mac_update_mode(sc, in);
5536 		iwm_mvm_enable_beacon_filter(sc, in);
5537 		iwm_mvm_update_quotas(sc, in);
5538 		iwm_setrates(in);
5539 
5540 		cmd.data[0] = &in->in_lq;
5541 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
5542 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
5543 		}
5544 
5545 		callout_schedule(&sc->sc_calib_to, hz/2);
5546 
5547 		break; }
5548 
5549 	default:
5550 		DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
5551 		break;
5552 	}
5553 
5554 	sc->sc_newstate(ic, nstate, arg);
5555 }
5556 
5557 static int
5558 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5559 {
5560 	struct iwm_newstate_state *iwmns;
5561 	struct ifnet *ifp = IC2IFP(ic);
5562 	struct iwm_softc *sc = ifp->if_softc;
5563 
5564 	callout_stop(&sc->sc_calib_to);
5565 
5566 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5567 	if (!iwmns) {
5568 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5569 		return ENOMEM;
5570 	}
5571 
5572 	iwmns->ns_nstate = nstate;
5573 	iwmns->ns_arg = arg;
5574 	iwmns->ns_generation = sc->sc_generation;
5575 
5576 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5577 
5578 	return 0;
5579 }
5580 
5581 static void
5582 iwm_endscan_cb(struct work *work __unused, void *arg)
5583 {
5584 	struct iwm_softc *sc = arg;
5585 	struct ieee80211com *ic = &sc->sc_ic;
5586 	int done;
5587 
5588 	DPRINTF(("scan ended\n"));
5589 
5590 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
5591 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
5592 		int error;
5593 		done = 0;
5594 		if ((error = iwm_mvm_scan_request(sc,
5595 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
5596 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5597 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5598 			done = 1;
5599 		}
5600 	} else {
5601 		done = 1;
5602 	}
5603 
5604 	if (done) {
5605 		if (!sc->sc_scanband) {
5606 			ieee80211_cancel_scan(ic);
5607 		} else {
5608 			ieee80211_end_scan(ic);
5609 		}
5610 		sc->sc_scanband = 0;
5611 	}
5612 }
5613 
5614 static int
5615 iwm_init_hw(struct iwm_softc *sc)
5616 {
5617 	struct ieee80211com *ic = &sc->sc_ic;
5618 	int error, i, qid;
5619 
5620 	if ((error = iwm_preinit(sc)) != 0)
5621 		return error;
5622 
5623 	if ((error = iwm_start_hw(sc)) != 0)
5624 		return error;
5625 
5626 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
5627 		return error;
5628 	}
5629 
5630 	/*
5631 	 * should stop and start HW since that INIT
5632 	 * image just loaded
5633 	 */
5634 	iwm_stop_device(sc);
5635 	if ((error = iwm_start_hw(sc)) != 0) {
5636 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
5637 		return error;
5638 	}
5639 
5640 	/* omstart, this time with the regular firmware */
5641 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5642 	if (error) {
5643 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
5644 		goto error;
5645 	}
5646 
5647 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
5648 		goto error;
5649 
5650 	/* Send phy db control command and then phy db calibration*/
5651 	if ((error = iwm_send_phy_db_data(sc)) != 0)
5652 		goto error;
5653 
5654 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
5655 		goto error;
5656 
5657 	/* Add auxiliary station for scanning */
5658 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
5659 		goto error;
5660 
5661 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5662 		/*
5663 		 * The channel used here isn't relevant as it's
5664 		 * going to be overwritten in the other flows.
5665 		 * For now use the first channel we have.
5666 		 */
5667 		if ((error = iwm_mvm_phy_ctxt_add(sc,
5668 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5669 			goto error;
5670 	}
5671 
5672 	error = iwm_mvm_power_update_device(sc);
5673 	if (error)
5674 		goto error;
5675 
5676 	/* Mark TX rings as active. */
5677 	for (qid = 0; qid < 4; qid++) {
5678 		iwm_enable_txq(sc, qid, qid);
5679 	}
5680 
5681 	return 0;
5682 
5683  error:
5684 	iwm_stop_device(sc);
5685 	return error;
5686 }
5687 
5688 /* Allow multicast from our BSSID. */
5689 static int
5690 iwm_allow_mcast(struct iwm_softc *sc)
5691 {
5692 	struct ieee80211com *ic = &sc->sc_ic;
5693 	struct ieee80211_node *ni = ic->ic_bss;
5694 	struct iwm_mcast_filter_cmd *cmd;
5695 	size_t size;
5696 	int error;
5697 
5698 	size = roundup(sizeof(*cmd), 4);
5699 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
5700 	if (cmd == NULL)
5701 		return ENOMEM;
5702 	cmd->filter_own = 1;
5703 	cmd->port_id = 0;
5704 	cmd->count = 0;
5705 	cmd->pass_all = 1;
5706 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5707 
5708 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5709 	    IWM_CMD_SYNC, size, cmd);
5710 	kmem_intr_free(cmd, size);
5711 	return error;
5712 }
5713 
5714 /*
5715  * ifnet interfaces
5716  */
5717 
5718 static int
5719 iwm_init(struct ifnet *ifp)
5720 {
5721 	struct iwm_softc *sc = ifp->if_softc;
5722 	int error;
5723 
5724 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5725 		return 0;
5726 	}
5727 	sc->sc_generation++;
5728 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5729 
5730 	if ((error = iwm_init_hw(sc)) != 0) {
5731 		iwm_stop(ifp, 1);
5732 		return error;
5733 	}
5734 
5735 	/*
5736  	 * Ok, firmware loaded and we are jogging
5737 	 */
5738 
5739 	ifp->if_flags &= ~IFF_OACTIVE;
5740 	ifp->if_flags |= IFF_RUNNING;
5741 
5742 	ieee80211_begin_scan(&sc->sc_ic, 0);
5743 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5744 
5745 	return 0;
5746 }
5747 
5748 /*
5749  * Dequeue packets from sendq and call send.
5750  * mostly from iwn
5751  */
5752 static void
5753 iwm_start(struct ifnet *ifp)
5754 {
5755 	struct iwm_softc *sc = ifp->if_softc;
5756 	struct ieee80211com *ic = &sc->sc_ic;
5757 	struct ieee80211_node *ni;
5758 	struct ether_header *eh;
5759 	struct mbuf *m;
5760 	int ac;
5761 
5762 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5763 		return;
5764 
5765 	for (;;) {
5766 		/* why isn't this done per-queue? */
5767 		if (sc->qfullmsk != 0) {
5768 			ifp->if_flags |= IFF_OACTIVE;
5769 			break;
5770 		}
5771 
5772 		/* need to send management frames even if we're not RUNning */
5773 		IF_DEQUEUE(&ic->ic_mgtq, m);
5774 		if (m) {
5775 			ni = (void *)m->m_pkthdr.rcvif;
5776 			ac = 0;
5777 			goto sendit;
5778 		}
5779 		if (ic->ic_state != IEEE80211_S_RUN) {
5780 			break;
5781 		}
5782 
5783 		IFQ_DEQUEUE(&ifp->if_snd, m);
5784 		if (!m)
5785 			break;
5786 		if (m->m_len < sizeof (*eh) &&
5787 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
5788 			ifp->if_oerrors++;
5789 			continue;
5790 		}
5791 		if (ifp->if_bpf != NULL)
5792 			bpf_mtap(ifp, m);
5793 
5794 		eh = mtod(m, struct ether_header *);
5795 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
5796 		if (ni == NULL) {
5797 			m_freem(m);
5798 			ifp->if_oerrors++;
5799 			continue;
5800 		}
5801 		/* classify mbuf so we can find which tx ring to use */
5802 		if (ieee80211_classify(ic, m, ni) != 0) {
5803 			m_freem(m);
5804 			ieee80211_free_node(ni);
5805 			ifp->if_oerrors++;
5806 			continue;
5807 		}
5808 
5809 		/* No QoS encapsulation for EAPOL frames. */
5810 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
5811 		    M_WME_GETAC(m) : WME_AC_BE;
5812 
5813 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
5814 			ieee80211_free_node(ni);
5815 			ifp->if_oerrors++;
5816 			continue;
5817 		}
5818 
5819  sendit:
5820 		if (ic->ic_rawbpf != NULL)
5821 			bpf_mtap3(ic->ic_rawbpf, m);
5822 		if (iwm_tx(sc, m, ni, ac) != 0) {
5823 			ieee80211_free_node(ni);
5824 			ifp->if_oerrors++;
5825 			continue;
5826 		}
5827 
5828 		if (ifp->if_flags & IFF_UP) {
5829 			sc->sc_tx_timer = 15;
5830 			ifp->if_timer = 1;
5831 		}
5832 	}
5833 
5834 	return;
5835 }
5836 
5837 static void
5838 iwm_stop(struct ifnet *ifp, int disable)
5839 {
5840 	struct iwm_softc *sc = ifp->if_softc;
5841 	struct ieee80211com *ic = &sc->sc_ic;
5842 
5843 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5844 	sc->sc_flags |= IWM_FLAG_STOPPED;
5845 	sc->sc_generation++;
5846 	sc->sc_scanband = 0;
5847 	sc->sc_auth_prot = 0;
5848 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5849 
5850 	if (ic->ic_state != IEEE80211_S_INIT)
5851 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5852 
5853 	callout_stop(&sc->sc_calib_to);
5854 	ifp->if_timer = sc->sc_tx_timer = 0;
5855 	iwm_stop_device(sc);
5856 }
5857 
5858 static void
5859 iwm_watchdog(struct ifnet *ifp)
5860 {
5861 	struct iwm_softc *sc = ifp->if_softc;
5862 
5863 	ifp->if_timer = 0;
5864 	if (sc->sc_tx_timer > 0) {
5865 		if (--sc->sc_tx_timer == 0) {
5866 			aprint_error_dev(sc->sc_dev, "device timeout\n");
5867 #ifdef IWM_DEBUG
5868 			iwm_nic_error(sc);
5869 #endif
5870 			ifp->if_flags &= ~IFF_UP;
5871 			iwm_stop(ifp, 1);
5872 			ifp->if_oerrors++;
5873 			return;
5874 		}
5875 		ifp->if_timer = 1;
5876 	}
5877 
5878 	ieee80211_watchdog(&sc->sc_ic);
5879 }
5880 
5881 static int
5882 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5883 {
5884 	struct iwm_softc *sc = ifp->if_softc;
5885 	struct ieee80211com *ic = &sc->sc_ic;
5886 	const struct sockaddr *sa;
5887 	int s, error = 0;
5888 
5889 	s = splnet();
5890 
5891 	switch (cmd) {
5892 	case SIOCSIFADDR:
5893 		ifp->if_flags |= IFF_UP;
5894 		/* FALLTHROUGH */
5895 	case SIOCSIFFLAGS:
5896 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5897 			break;
5898 		if (ifp->if_flags & IFF_UP) {
5899 			if (!(ifp->if_flags & IFF_RUNNING)) {
5900 				if ((error = iwm_init(ifp)) != 0)
5901 					ifp->if_flags &= ~IFF_UP;
5902 			}
5903 		} else {
5904 			if (ifp->if_flags & IFF_RUNNING)
5905 				iwm_stop(ifp, 1);
5906 		}
5907 		break;
5908 
5909 	case SIOCADDMULTI:
5910 	case SIOCDELMULTI:
5911 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
5912 		error = (cmd == SIOCADDMULTI) ?
5913 		    ether_addmulti(sa, &sc->sc_ec) :
5914 		    ether_delmulti(sa, &sc->sc_ec);
5915 
5916 		if (error == ENETRESET)
5917 			error = 0;
5918 		break;
5919 
5920 	default:
5921 		error = ieee80211_ioctl(ic, cmd, data);
5922 	}
5923 
5924 	if (error == ENETRESET) {
5925 		error = 0;
5926 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5927 		    (IFF_UP | IFF_RUNNING)) {
5928 			iwm_stop(ifp, 0);
5929 			error = iwm_init(ifp);
5930 		}
5931 	}
5932 
5933 	splx(s);
5934 	return error;
5935 }
5936 
5937 /*
5938  * The interrupt side of things
5939  */
5940 
5941 /*
5942  * error dumping routines are from iwlwifi/mvm/utils.c
5943  */
5944 
5945 /*
5946  * Note: This structure is read from the device with IO accesses,
5947  * and the reading already does the endian conversion. As it is
5948  * read with uint32_t-sized accesses, any members with a different size
5949  * need to be ordered correctly though!
5950  */
5951 struct iwm_error_event_table {
5952 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5953 	uint32_t error_id;		/* type of error */
5954 	uint32_t pc;			/* program counter */
5955 	uint32_t blink1;		/* branch link */
5956 	uint32_t blink2;		/* branch link */
5957 	uint32_t ilink1;		/* interrupt link */
5958 	uint32_t ilink2;		/* interrupt link */
5959 	uint32_t data1;		/* error-specific data */
5960 	uint32_t data2;		/* error-specific data */
5961 	uint32_t data3;		/* error-specific data */
5962 	uint32_t bcon_time;		/* beacon timer */
5963 	uint32_t tsf_low;		/* network timestamp function timer */
5964 	uint32_t tsf_hi;		/* network timestamp function timer */
5965 	uint32_t gp1;		/* GP1 timer register */
5966 	uint32_t gp2;		/* GP2 timer register */
5967 	uint32_t gp3;		/* GP3 timer register */
5968 	uint32_t ucode_ver;		/* uCode version */
5969 	uint32_t hw_ver;		/* HW Silicon version */
5970 	uint32_t brd_ver;		/* HW board version */
5971 	uint32_t log_pc;		/* log program counter */
5972 	uint32_t frame_ptr;		/* frame pointer */
5973 	uint32_t stack_ptr;		/* stack pointer */
5974 	uint32_t hcmd;		/* last host command header */
5975 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5976 				 * rxtx_flag */
5977 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5978 				 * host_flag */
5979 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5980 				 * enc_flag */
5981 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5982 				 * time_flag */
5983 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5984 				 * wico interrupt */
5985 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
5986 	uint32_t wait_event;		/* wait event() caller address */
5987 	uint32_t l2p_control;	/* L2pControlField */
5988 	uint32_t l2p_duration;	/* L2pDurationField */
5989 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5990 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5991 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5992 				 * (LMPM_PMG_SEL) */
5993 	uint32_t u_timestamp;	/* indicate when the date and time of the
5994 				 * compilation */
5995 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5996 } __packed;
5997 
5998 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5999 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6000 
6001 #ifdef IWM_DEBUG
6002 static const struct {
6003 	const char *name;
6004 	uint8_t num;
6005 } advanced_lookup[] = {
6006 	{ "NMI_INTERRUPT_WDG", 0x34 },
6007 	{ "SYSASSERT", 0x35 },
6008 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6009 	{ "BAD_COMMAND", 0x38 },
6010 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6011 	{ "FATAL_ERROR", 0x3D },
6012 	{ "NMI_TRM_HW_ERR", 0x46 },
6013 	{ "NMI_INTERRUPT_TRM", 0x4C },
6014 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6015 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6016 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6017 	{ "NMI_INTERRUPT_HOST", 0x66 },
6018 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
6019 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
6020 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6021 	{ "ADVANCED_SYSASSERT", 0 },
6022 };
6023 
6024 static const char *
6025 iwm_desc_lookup(uint32_t num)
6026 {
6027 	int i;
6028 
6029 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6030 		if (advanced_lookup[i].num == num)
6031 			return advanced_lookup[i].name;
6032 
6033 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6034 	return advanced_lookup[i].name;
6035 }
6036 
6037 /*
6038  * Support for dumping the error log seemed like a good idea ...
6039  * but it's mostly hex junk and the only sensible thing is the
6040  * hw/ucode revision (which we know anyway).  Since it's here,
6041  * I'll just leave it in, just in case e.g. the Intel guys want to
6042  * help us decipher some "ADVANCED_SYSASSERT" later.
6043  */
6044 static void
6045 iwm_nic_error(struct iwm_softc *sc)
6046 {
6047 	struct iwm_error_event_table table;
6048 	uint32_t base;
6049 
6050 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6051 	base = sc->sc_uc.uc_error_event_table;
6052 	if (base < 0x800000 || base >= 0x80C000) {
6053 		aprint_error_dev(sc->sc_dev,
6054 		    "Not valid error log pointer 0x%08x\n", base);
6055 		return;
6056 	}
6057 
6058 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
6059 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6060 		return;
6061 	}
6062 
6063 	if (!table.valid) {
6064 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6065 		return;
6066 	}
6067 
6068 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6069 		aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
6070 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6071 		    sc->sc_flags, table.valid);
6072 	}
6073 
6074 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
6075 		iwm_desc_lookup(table.error_id));
6076 	aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
6077 	aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
6078 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
6079 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
6080 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
6081 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
6082 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
6083 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
6084 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
6085 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
6086 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
6087 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
6088 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
6089 	aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
6090 	aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
6091 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
6092 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
6093 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
6094 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
6095 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
6096 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
6097 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
6098 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
6099 	aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
6100 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
6101 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
6102 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
6103 	    table.l2p_duration);
6104 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
6105 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6106 	    table.l2p_addr_match);
6107 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
6108 	    table.lmpm_pmg_sel);
6109 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
6110 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
6111 	    table.flow_handler);
6112 }
6113 #endif
6114 
6115 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
6116 do {									\
6117 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6118 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
6119 	_var_ = (void *)((_pkt_)+1);					\
6120 } while (/*CONSTCOND*/0)
6121 
6122 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6123 do {									\
6124 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6125 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6126 	_ptr_ = (void *)((_pkt_)+1);					\
6127 } while (/*CONSTCOND*/0)
6128 
6129 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6130 
6131 /*
6132  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
6133  * Basic structure from if_iwn
6134  */
6135 static void
6136 iwm_notif_intr(struct iwm_softc *sc)
6137 {
6138 	uint16_t hw;
6139 
6140 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6141 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6142 
6143 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6144 	while (sc->rxq.cur != hw) {
6145 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6146 		struct iwm_rx_packet *pkt, tmppkt;
6147 		struct iwm_cmd_response *cresp;
6148 		int qid, idx;
6149 
6150 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6151 		    BUS_DMASYNC_POSTREAD);
6152 		pkt = mtod(data->m, struct iwm_rx_packet *);
6153 
6154 		qid = pkt->hdr.qid & ~0x80;
6155 		idx = pkt->hdr.idx;
6156 
6157 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
6158 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
6159 		    pkt->hdr.code, sc->rxq.cur, hw));
6160 
6161 		/*
6162 		 * randomly get these from the firmware, no idea why.
6163 		 * they at least seem harmless, so just ignore them for now
6164 		 */
6165 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6166 		    || pkt->len_n_flags == htole32(0x55550000))) {
6167 			ADVANCE_RXQ(sc);
6168 			continue;
6169 		}
6170 
6171 		switch (pkt->hdr.code) {
6172 		case IWM_REPLY_RX_PHY_CMD:
6173 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
6174 			break;
6175 
6176 		case IWM_REPLY_RX_MPDU_CMD:
6177 			tmppkt = *pkt; // XXX m is freed by ieee80211_input()
6178 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
6179 			pkt = &tmppkt;
6180 			break;
6181 
6182 		case IWM_TX_CMD:
6183 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
6184 			break;
6185 
6186 		case IWM_MISSED_BEACONS_NOTIFICATION:
6187 			iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
6188 			break;
6189 
6190 		case IWM_MVM_ALIVE: {
6191 			struct iwm_mvm_alive_resp *resp;
6192 			SYNC_RESP_STRUCT(resp, pkt);
6193 
6194 			sc->sc_uc.uc_error_event_table
6195 			    = le32toh(resp->error_event_table_ptr);
6196 			sc->sc_uc.uc_log_event_table
6197 			    = le32toh(resp->log_event_table_ptr);
6198 			sc->sched_base = le32toh(resp->scd_base_ptr);
6199 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
6200 
6201 			sc->sc_uc.uc_intr = 1;
6202 			wakeup(&sc->sc_uc);
6203 			break; }
6204 
6205 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
6206 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
6207 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
6208 
6209 			uint16_t size = le16toh(phy_db_notif->length);
6210 			bus_dmamap_sync(sc->sc_dmat, data->map,
6211 			    sizeof(*pkt) + sizeof(*phy_db_notif),
6212 			    size, BUS_DMASYNC_POSTREAD);
6213 			iwm_phy_db_set_section(sc, phy_db_notif, size);
6214 
6215 			break; }
6216 
6217 		case IWM_STATISTICS_NOTIFICATION: {
6218 			struct iwm_notif_statistics *stats;
6219 			SYNC_RESP_STRUCT(stats, pkt);
6220 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6221 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
6222 			break; }
6223 
6224 		case IWM_NVM_ACCESS_CMD:
6225 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6226 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6227 				    sizeof(sc->sc_cmd_resp),
6228 				    BUS_DMASYNC_POSTREAD);
6229 				memcpy(sc->sc_cmd_resp,
6230 				    pkt, sizeof(sc->sc_cmd_resp));
6231 			}
6232 			break;
6233 
6234 		case IWM_PHY_CONFIGURATION_CMD:
6235 		case IWM_TX_ANT_CONFIGURATION_CMD:
6236 		case IWM_ADD_STA:
6237 		case IWM_MAC_CONTEXT_CMD:
6238 		case IWM_REPLY_SF_CFG_CMD:
6239 		case IWM_POWER_TABLE_CMD:
6240 		case IWM_PHY_CONTEXT_CMD:
6241 		case IWM_BINDING_CONTEXT_CMD:
6242 		case IWM_TIME_EVENT_CMD:
6243 		case IWM_SCAN_REQUEST_CMD:
6244 		case IWM_REPLY_BEACON_FILTERING_CMD:
6245 		case IWM_MAC_PM_POWER_TABLE:
6246 		case IWM_TIME_QUOTA_CMD:
6247 		case IWM_REMOVE_STA:
6248 		case IWM_TXPATH_FLUSH:
6249 		case IWM_LQ_CMD:
6250 			SYNC_RESP_STRUCT(cresp, pkt);
6251 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6252 				memcpy(sc->sc_cmd_resp,
6253 				    pkt, sizeof(*pkt)+sizeof(*cresp));
6254 			}
6255 			break;
6256 
6257 		/* ignore */
6258 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
6259 			break;
6260 
6261 		case IWM_INIT_COMPLETE_NOTIF:
6262 			sc->sc_init_complete = 1;
6263 			wakeup(&sc->sc_init_complete);
6264 			break;
6265 
6266 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
6267 			struct iwm_scan_complete_notif *notif;
6268 			SYNC_RESP_STRUCT(notif, pkt);
6269 
6270 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6271 			break; }
6272 
6273 		case IWM_REPLY_ERROR: {
6274 			struct iwm_error_resp *resp;
6275 			SYNC_RESP_STRUCT(resp, pkt);
6276 
6277 			aprint_error_dev(sc->sc_dev,
6278 			    "firmware error 0x%x, cmd 0x%x\n",
6279 			    le32toh(resp->error_type), resp->cmd_id);
6280 			break; }
6281 
6282 		case IWM_TIME_EVENT_NOTIFICATION: {
6283 			struct iwm_time_event_notif *notif;
6284 			SYNC_RESP_STRUCT(notif, pkt);
6285 
6286 			if (notif->status) {
6287 				if (le32toh(notif->action) &
6288 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
6289 					sc->sc_auth_prot = 2;
6290 				else
6291 					sc->sc_auth_prot = 0;
6292 			} else {
6293 				sc->sc_auth_prot = -1;
6294 			}
6295 			wakeup(&sc->sc_auth_prot);
6296 			break; }
6297 
6298 		case IWM_MCAST_FILTER_CMD:
6299 			break;
6300 
6301 		default:
6302 			aprint_error_dev(sc->sc_dev,
6303 			    "code %02x frame %d/%d %x UNHANDLED "
6304 			    "(this should not happen)\n",
6305 			    pkt->hdr.code, qid, idx, pkt->len_n_flags);
6306 			break;
6307 		}
6308 
6309 		/*
6310 		 * Why test bit 0x80?  The Linux driver:
6311 		 *
6312 		 * There is one exception:  uCode sets bit 15 when it
6313 		 * originates the response/notification, i.e. when the
6314 		 * response/notification is not a direct response to a
6315 		 * command sent by the driver.  For example, uCode issues
6316 		 * IWM_REPLY_RX when it sends a received frame to the driver;
6317 		 * it is not a direct response to any driver command.
6318 		 *
6319 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
6320 		 * uses a slightly different format for pkt->hdr, and "qid"
6321 		 * is actually the upper byte of a two-byte field.
6322 		 */
6323 		if (!(pkt->hdr.qid & (1 << 7))) {
6324 			iwm_cmd_done(sc, pkt);
6325 		}
6326 
6327 		ADVANCE_RXQ(sc);
6328 	}
6329 
6330 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6331 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6332 
6333 	/*
6334 	 * Tell the firmware what we have processed.
6335 	 * Seems like the hardware gets upset unless we align
6336 	 * the write by 8??
6337 	 */
6338 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6339 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6340 }
6341 
6342 static int
6343 iwm_intr(void *arg)
6344 {
6345 	struct iwm_softc *sc = arg;
6346 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6347 	int handled = 0;
6348 	int r1, r2, rv = 0;
6349 	int isperiodic = 0;
6350 
6351 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6352 
6353 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6354 		uint32_t *ict = sc->ict_dma.vaddr;
6355 		int tmp;
6356 
6357 		tmp = htole32(ict[sc->ict_cur]);
6358 		if (!tmp)
6359 			goto out_ena;
6360 
6361 		/*
6362 		 * ok, there was something.  keep plowing until we have all.
6363 		 */
6364 		r1 = r2 = 0;
6365 		while (tmp) {
6366 			r1 |= tmp;
6367 			ict[sc->ict_cur] = 0;
6368 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6369 			tmp = htole32(ict[sc->ict_cur]);
6370 		}
6371 
6372 		/* this is where the fun begins.  don't ask */
6373 		if (r1 == 0xffffffff)
6374 			r1 = 0;
6375 
6376 		/* i am not expected to understand this */
6377 		if (r1 & 0xc0000)
6378 			r1 |= 0x8000;
6379 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6380 	} else {
6381 		r1 = IWM_READ(sc, IWM_CSR_INT);
6382 		/* "hardware gone" (where, fishing?) */
6383 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6384 			goto out;
6385 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6386 	}
6387 	if (r1 == 0 && r2 == 0) {
6388 		goto out_ena;
6389 	}
6390 
6391 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6392 
6393 	/* ignored */
6394 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6395 
6396 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6397 #ifdef IWM_DEBUG
6398 		int i;
6399 
6400 		iwm_nic_error(sc);
6401 
6402 		/* Dump driver status (TX and RX rings) while we're here. */
6403 		DPRINTF(("driver status:\n"));
6404 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
6405 			struct iwm_tx_ring *ring = &sc->txq[i];
6406 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
6407 			    "queued=%-3d\n",
6408 			    i, ring->qid, ring->cur, ring->queued));
6409 		}
6410 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
6411 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
6412 #endif
6413 
6414 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
6415 		ifp->if_flags &= ~IFF_UP;
6416 		iwm_stop(ifp, 1);
6417 		rv = 1;
6418 		goto out;
6419 
6420 	}
6421 
6422 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6423 		handled |= IWM_CSR_INT_BIT_HW_ERR;
6424 		aprint_error_dev(sc->sc_dev,
6425 		    "hardware error, stopping device\n");
6426 		ifp->if_flags &= ~IFF_UP;
6427 		iwm_stop(ifp, 1);
6428 		rv = 1;
6429 		goto out;
6430 	}
6431 
6432 	/* firmware chunk loaded */
6433 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6434 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6435 		handled |= IWM_CSR_INT_BIT_FH_TX;
6436 
6437 		sc->sc_fw_chunk_done = 1;
6438 		wakeup(&sc->sc_fw);
6439 	}
6440 
6441 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6442 		handled |= IWM_CSR_INT_BIT_RF_KILL;
6443 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6444 			DPRINTF(("%s: rfkill switch, disabling interface\n",
6445 			    DEVNAME(sc)));
6446 			ifp->if_flags &= ~IFF_UP;
6447 			iwm_stop(ifp, 1);
6448 		}
6449 	}
6450 
6451 	/*
6452 	 * The Linux driver uses periodic interrupts to avoid races.
6453 	 * We cargo-cult like it's going out of fashion.
6454 	 */
6455 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6456 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6457 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6458 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6459 			IWM_WRITE_1(sc,
6460 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6461 		isperiodic = 1;
6462 	}
6463 
6464 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
6465 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6466 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6467 
6468 		iwm_notif_intr(sc);
6469 
6470 		/* enable periodic interrupt, see above */
6471 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
6472 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6473 			    IWM_CSR_INT_PERIODIC_ENA);
6474 	}
6475 
6476 	if (__predict_false(r1 & ~handled))
6477 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
6478 	rv = 1;
6479 
6480  out_ena:
6481 	iwm_restore_interrupts(sc);
6482  out:
6483 	return rv;
6484 }
6485 
6486 /*
6487  * Autoconf glue-sniffing
6488  */
6489 
6490 static const pci_product_id_t iwm_devices[] = {
6491 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
6492 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
6493 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
6494 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
6495 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
6496 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
6497 };
6498 
6499 static int
6500 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
6501 {
6502 	struct pci_attach_args *pa = aux;
6503 
6504 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
6505 		return 0;
6506 
6507 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
6508 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
6509 			return 1;
6510 
6511 	return 0;
6512 }
6513 
6514 static int
6515 iwm_preinit(struct iwm_softc *sc)
6516 {
6517 	int error;
6518 
6519 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
6520 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6521 		return error;
6522 	}
6523 
6524 	if (sc->sc_flags & IWM_FLAG_ATTACHED)
6525 		return 0;
6526 
6527 	if ((error = iwm_start_hw(sc)) != 0) {
6528 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6529 		return error;
6530 	}
6531 
6532 	error = iwm_run_init_mvm_ucode(sc, 1);
6533 	iwm_stop_device(sc);
6534 	return error;
6535 }
6536 
6537 static void
6538 iwm_attach_hook(device_t dev)
6539 {
6540 	struct iwm_softc *sc = device_private(dev);
6541 	struct ieee80211com *ic = &sc->sc_ic;
6542 	struct ifnet *ifp = &sc->sc_ec.ec_if;
6543 
6544 	KASSERT(!cold);
6545 
6546 	if (iwm_preinit(sc) != 0)
6547 		return;
6548 
6549 	sc->sc_flags |= IWM_FLAG_ATTACHED;
6550 
6551 	aprint_normal_dev(sc->sc_dev,
6552 	    "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
6553 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6554 	    IWM_UCODE_MAJOR(sc->sc_fwver),
6555 	    IWM_UCODE_MINOR(sc->sc_fwver),
6556 	    IWM_UCODE_API(sc->sc_fwver),
6557 	    ether_sprintf(sc->sc_nvm.hw_addr));
6558 
6559 	ic->ic_ifp = ifp;
6560 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6561 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6562 	ic->ic_state = IEEE80211_S_INIT;
6563 
6564 	/* Set device capabilities. */
6565 	ic->ic_caps =
6566 	    IEEE80211_C_WEP |		/* WEP */
6567 	    IEEE80211_C_WPA |		/* 802.11i */
6568 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6569 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
6570 
6571 	/* not all hardware can do 5GHz band */
6572 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
6573 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
6574 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
6575 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
6576 
6577 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
6578 		sc->sc_phyctxt[i].id = i;
6579 	}
6580 
6581 	sc->sc_amrr.amrr_min_success_threshold =  1;
6582 	sc->sc_amrr.amrr_max_success_threshold = 15;
6583 
6584 	/* IBSS channel undefined for now. */
6585 	ic->ic_ibss_chan = &ic->ic_channels[1];
6586 
6587 #if 0
6588 	/* Max RSSI */
6589 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6590 #endif
6591 
6592 	ifp->if_softc = sc;
6593 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
6594 	ifp->if_init = iwm_init;
6595 	ifp->if_stop = iwm_stop;
6596 	ifp->if_ioctl = iwm_ioctl;
6597 	ifp->if_start = iwm_start;
6598 	ifp->if_watchdog = iwm_watchdog;
6599 	IFQ_SET_READY(&ifp->if_snd);
6600 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
6601 
6602 	if_initialize(ifp);
6603 	ieee80211_ifattach(ic);
6604 	if_register(ifp);
6605 
6606 	ic->ic_node_alloc = iwm_node_alloc;
6607 
6608 	/* Override 802.11 state transition machine. */
6609 	sc->sc_newstate = ic->ic_newstate;
6610 	ic->ic_newstate = iwm_newstate;
6611 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
6612 	ieee80211_announce(ic);
6613 
6614 	iwm_radiotap_attach(sc);
6615 	callout_init(&sc->sc_calib_to, 0);
6616 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
6617 
6618 	//task_set(&sc->init_task, iwm_init_task, sc);
6619 
6620 	if (pmf_device_register(dev, NULL, NULL))
6621 		pmf_class_network_register(dev, ifp);
6622 	else
6623 		aprint_error_dev(dev, "couldn't establish power handler\n");
6624 }
6625 
6626 static void
6627 iwm_attach(device_t parent, device_t self, void *aux)
6628 {
6629 	struct iwm_softc *sc = device_private(self);
6630 	struct pci_attach_args *pa = aux;
6631 	pci_intr_handle_t ih;
6632 	pcireg_t reg, memtype;
6633 	const char *intrstr;
6634 	int error;
6635 	int txq_i;
6636 
6637 	sc->sc_dev = self;
6638 	sc->sc_pct = pa->pa_pc;
6639 	sc->sc_pcitag = pa->pa_tag;
6640 	sc->sc_dmat = pa->pa_dmat;
6641 	sc->sc_pciid = pa->pa_id;
6642 
6643 	pci_aprint_devinfo(pa, NULL);
6644 
6645 	/*
6646 	 * Get the offset of the PCI Express Capability Structure in PCI
6647 	 * Configuration Space.
6648 	 */
6649 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6650 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6651 	if (error == 0) {
6652 		aprint_error_dev(self,
6653 		    "PCIe capability structure not found!\n");
6654 		return;
6655 	}
6656 
6657 	/* Clear device-specific "PCI retry timeout" register (41h). */
6658 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6659 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6660 
6661 	/* Enable bus-mastering and hardware bug workaround. */
6662 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6663 	reg |= PCI_COMMAND_MASTER_ENABLE;
6664 	/* if !MSI */
6665 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6666 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6667 	}
6668 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6669 
6670 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6671 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6672 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
6673 	if (error != 0) {
6674 		aprint_error_dev(self, "can't map mem space\n");
6675 		return;
6676 	}
6677 
6678 	/* Install interrupt handler. */
6679 	if (pci_intr_map(pa, &ih)) {
6680 		aprint_error_dev(self, "can't map interrupt\n");
6681 		return;
6682 	}
6683 
6684 	char intrbuf[PCI_INTRSTR_LEN];
6685 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
6686 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
6687 	if (sc->sc_ih == NULL) {
6688 		aprint_error_dev(self, "can't establish interrupt");
6689 		if (intrstr != NULL)
6690 			aprint_error(" at %s", intrstr);
6691 		aprint_error("\n");
6692 		return;
6693 	}
6694 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
6695 
6696 	sc->sc_wantresp = -1;
6697 
6698 	switch (PCI_PRODUCT(sc->sc_pciid)) {
6699 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
6700 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
6701 		sc->sc_fwname = "iwlwifi-7260-9.ucode";
6702 		sc->host_interrupt_operation_mode = 1;
6703 		break;
6704 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
6705 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
6706 		sc->sc_fwname = "iwlwifi-3160-9.ucode";
6707 		sc->host_interrupt_operation_mode = 1;
6708 		break;
6709 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
6710 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
6711 		sc->sc_fwname = "iwlwifi-7265-9.ucode";
6712 		sc->host_interrupt_operation_mode = 0;
6713 		break;
6714 	default:
6715 		aprint_error_dev(self, "unknown product %#x",
6716 		    PCI_PRODUCT(sc->sc_pciid));
6717 		return;
6718 	}
6719 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
6720 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6721 
6722 	/*
6723 	 * We now start fiddling with the hardware
6724 	 */
6725 
6726 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6727 	if (iwm_prepare_card_hw(sc) != 0) {
6728 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6729 		return;
6730 	}
6731 
6732 	/* Allocate DMA memory for firmware transfers. */
6733 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6734 		aprint_error_dev(sc->sc_dev,
6735 		    "could not allocate memory for firmware\n");
6736 		return;
6737 	}
6738 
6739 	/* Allocate "Keep Warm" page. */
6740 	if ((error = iwm_alloc_kw(sc)) != 0) {
6741 		aprint_error_dev(sc->sc_dev,
6742 		    "could not allocate keep warm page\n");
6743 		goto fail1;
6744 	}
6745 
6746 	/* We use ICT interrupts */
6747 	if ((error = iwm_alloc_ict(sc)) != 0) {
6748 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
6749 		goto fail2;
6750 	}
6751 
6752 	/* Allocate TX scheduler "rings". */
6753 	if ((error = iwm_alloc_sched(sc)) != 0) {
6754 		aprint_error_dev(sc->sc_dev,
6755 		    "could not allocate TX scheduler rings\n");
6756 		goto fail3;
6757 	}
6758 
6759 	/* Allocate TX rings */
6760 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
6761 		if ((error = iwm_alloc_tx_ring(sc,
6762 		    &sc->txq[txq_i], txq_i)) != 0) {
6763 			aprint_error_dev(sc->sc_dev,
6764 			    "could not allocate TX ring %d\n", txq_i);
6765 			goto fail4;
6766 		}
6767 	}
6768 
6769 	/* Allocate RX ring. */
6770 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6771 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
6772 		goto fail4;
6773 	}
6774 
6775 	workqueue_create(&sc->sc_eswq, "iwmes",
6776 	    iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
6777 	workqueue_create(&sc->sc_nswq, "iwmns",
6778 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
6779 
6780 	/* Clear pending interrupts. */
6781 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6782 
6783 	/*
6784 	 * We can't do normal attach before the file system is mounted
6785 	 * because we cannot read the MAC address without loading the
6786 	 * firmware from disk.  So we postpone until mountroot is done.
6787 	 * Notably, this will require a full driver unload/load cycle
6788 	 * (or reboot) in case the firmware is not present when the
6789 	 * hook runs.
6790 	 */
6791 	config_mountroot(self, iwm_attach_hook);
6792 
6793 	return;
6794 
6795 	/* Free allocated memory if something failed during attachment. */
6796 fail4:	while (--txq_i >= 0)
6797 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
6798 	iwm_free_sched(sc);
6799 fail3:	if (sc->ict_dma.vaddr != NULL)
6800 		iwm_free_ict(sc);
6801 fail2:	iwm_free_kw(sc);
6802 fail1:	iwm_free_fwmem(sc);
6803 }
6804 
6805 /*
6806  * Attach the interface to 802.11 radiotap.
6807  */
6808 void
6809 iwm_radiotap_attach(struct iwm_softc *sc)
6810 {
6811 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
6812 
6813 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
6814 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
6815 	    &sc->sc_drvbpf);
6816 
6817 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
6818 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
6819 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
6820 
6821 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
6822 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
6823 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
6824 }
6825 
6826 #if 0
6827 static void
6828 iwm_init_task(void *arg1)
6829 {
6830 	struct iwm_softc *sc = arg1;
6831 	struct ifnet *ifp = &sc->sc_ic.ic_if;
6832 	int s;
6833 
6834 	s = splnet();
6835 	while (sc->sc_flags & IWM_FLAG_BUSY)
6836 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
6837 	sc->sc_flags |= IWM_FLAG_BUSY;
6838 
6839 	iwm_stop(ifp, 0);
6840 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
6841 		iwm_init(ifp);
6842 
6843 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6844 	wakeup(&sc->sc_flags);
6845 	splx(s);
6846 }
6847 
6848 static void
6849 iwm_wakeup(struct iwm_softc *sc)
6850 {
6851 	pcireg_t reg;
6852 
6853 	/* Clear device-specific "PCI retry timeout" register (41h). */
6854 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6855 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6856 
6857 	iwm_init_task(sc);
6858 }
6859 
6860 static int
6861 iwm_activate(device_t self, enum devact act)
6862 {
6863 	struct iwm_softc *sc = device_private(self);
6864 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6865 
6866 	switch (act) {
6867 	case DVACT_DEACTIVATE:
6868 		if (ifp->if_flags & IFF_RUNNING)
6869 			iwm_stop(ifp, 0);
6870 		return 0;
6871 	default:
6872 		return EOPNOTSUPP;
6873 	}
6874 }
6875 #endif
6876 
6877 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
6878 	NULL, NULL);
6879