xref: /onnv-gate/usr/src/uts/common/io/ath/ath_main.c (revision 1000:dd54117d55b1)
1*1000Sxc151355 /*
2*1000Sxc151355  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3*1000Sxc151355  * Use is subject to license terms.
4*1000Sxc151355  */
5*1000Sxc151355 
6*1000Sxc151355 /*
7*1000Sxc151355  * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
8*1000Sxc151355  * All rights reserved.
9*1000Sxc151355  *
10*1000Sxc151355  * Redistribution and use in source and binary forms, with or without
11*1000Sxc151355  * modification, are permitted provided that the following conditions
12*1000Sxc151355  * are met:
13*1000Sxc151355  * 1. Redistributions of source code must retain the above copyright
14*1000Sxc151355  * notice, this list of conditions and the following disclaimer,
15*1000Sxc151355  * without modification.
16*1000Sxc151355  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17*1000Sxc151355  * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18*1000Sxc151355  * redistribution must be conditioned upon including a substantially
19*1000Sxc151355  * similar Disclaimer requirement for further binary redistribution.
20*1000Sxc151355  * 3. Neither the names of the above-listed copyright holders nor the names
21*1000Sxc151355  * of any contributors may be used to endorse or promote products derived
22*1000Sxc151355  * from this software without specific prior written permission.
23*1000Sxc151355  *
24*1000Sxc151355  * NO WARRANTY
25*1000Sxc151355  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26*1000Sxc151355  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27*1000Sxc151355  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
28*1000Sxc151355  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
29*1000Sxc151355  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
30*1000Sxc151355  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31*1000Sxc151355  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32*1000Sxc151355  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
33*1000Sxc151355  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34*1000Sxc151355  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
35*1000Sxc151355  * THE POSSIBILITY OF SUCH DAMAGES.
36*1000Sxc151355  *
37*1000Sxc151355  */
38*1000Sxc151355 
39*1000Sxc151355 #pragma ident	"%Z%%M%	%I%	%E% SMI"
40*1000Sxc151355 
41*1000Sxc151355 /*
42*1000Sxc151355  * Driver for the Atheros Wireless LAN controller.
43*1000Sxc151355  *
44*1000Sxc151355  * The Atheros driver can be devided into 2 parts: H/W related(we called LLD:
45*1000Sxc151355  * Low Level Driver) and IEEE80211 protocol related(we called IEEE80211),
46*1000Sxc151355  * and each part has several sub modules.
47*1000Sxc151355  * The following is the high level structure of ath driver.
48*1000Sxc151355  * (The arrows between modules indicate function call direction.)
49*1000Sxc151355  *
50*1000Sxc151355  *
51*1000Sxc151355  *                     ^                                |
52*1000Sxc151355  *                     |                                | GLD thread
53*1000Sxc151355  *                     |                                V
54*1000Sxc151355  *             ==================  =========================================
55*1000Sxc151355  *             |[1]             |  |[2]                                    |
56*1000Sxc151355  *             |                |  |    GLD Callback functions registered  |
57*1000Sxc151355  *             |   IEEE80211    |  =========================       by      |
58*1000Sxc151355  *             |                |          |               |   IEEE80211   |
59*1000Sxc151355  *             |   Internal     |          V               |               |
60*1000Sxc151355  * =========   |                |========================  |               |
61*1000Sxc151355  * |[3]    |   |   Functions                            |  |               |
62*1000Sxc151355  * |       |   |                                        |  |               |
63*1000Sxc151355  * |Multi- |   ==========================================  =================
64*1000Sxc151355  * |       |       ^           |                |                  |
65*1000Sxc151355  * |Func   |       |           V                V                  V
66*1000Sxc151355  * |       |   ======================   ------------------------------------
67*1000Sxc151355  * |Thread |   |[4]                 |   |[5]                               |
68*1000Sxc151355  * |       |-->| Functions exported |   |   IEEE80211 Callback functions   |
69*1000Sxc151355  * |       |   |    by IEEE80211    |   |      registered by LLD           |
70*1000Sxc151355  * =========   ======================   ------------------------------------
71*1000Sxc151355  *                       ^                                |
72*1000Sxc151355  *                       |                                V
73*1000Sxc151355  *             -------------------------------------------------------------
74*1000Sxc151355  *             |[6]                                                        |
75*1000Sxc151355  *             |                LLD Internal functions                     |
76*1000Sxc151355  *             |                                                           |
77*1000Sxc151355  *             -------------------------------------------------------------
78*1000Sxc151355  *                                        ^
79*1000Sxc151355  *                                        | Software interrupt thread
80*1000Sxc151355  *                                        |
81*1000Sxc151355  *
82*1000Sxc151355  * Modules 1/2/3/4 constitute part IEEE80211, and modules 5/6 constitute LLD.
83*1000Sxc151355  * The short description of each module is as below:
84*1000Sxc151355  *      Module 1: IEEE80211 Internal functions, including ieee80211 state
85*1000Sxc151355  *                machine, convert functions between 802.3 frame and
86*1000Sxc151355  *                802.11 frame, and node maintain function, etc.
87*1000Sxc151355  *      Module 2: GLD callback functions, which are intercepting the calls from
88*1000Sxc151355  *                GLD to LLD, and adding IEEE80211's mutex protection.
89*1000Sxc151355  *      Module 3: Multi-func thread, which is responsible for scan timing,
90*1000Sxc151355  *                rate control timing and calibrate timing.
91*1000Sxc151355  *      Module 4: Functions exported by IEEE80211, which can be called from
92*1000Sxc151355  *                other modules.
93*1000Sxc151355  *      Module 5: IEEE80211 callback functions registered by LLD, which include
94*1000Sxc151355  *                GLD related callbacks and some other functions needed by
95*1000Sxc151355  *                IEEE80211.
96*1000Sxc151355  *      Module 6: LLD Internal functions, which are responsible for allocing
97*1000Sxc151355  *                descriptor/buffer, handling interrupt and other H/W
98*1000Sxc151355  *                operations.
99*1000Sxc151355  *
100*1000Sxc151355  * All functions are running in 3 types of thread:
101*1000Sxc151355  * 1. GLD callbacks threads, such as ioctl, intr, etc.
102*1000Sxc151355  * 2. Multi-Func thread in IEEE80211 which is responsible for scan,
103*1000Sxc151355  *    rate control and calibrate.
104*1000Sxc151355  * 3. Software Interrupt thread originated in LLD.
105*1000Sxc151355  *
106*1000Sxc151355  * The lock strategy is as below:
107*1000Sxc151355  * There have 4 queues for tx, each queue has one asc_txqlock[i] to
108*1000Sxc151355  *      prevent conflicts access to queue resource from different thread.
109*1000Sxc151355  *
110*1000Sxc151355  * All the transmit buffers are contained in asc_txbuf which are
111*1000Sxc151355  *      protected by asc_txbuflock.
112*1000Sxc151355  *
113*1000Sxc151355  * Each receive buffers are contained in asc_rxbuf which are protected
114*1000Sxc151355  *      by asc_rxbuflock.
115*1000Sxc151355  *
116*1000Sxc151355  * In ath struct, asc_genlock is a general lock, protecting most other
117*1000Sxc151355  *      operational data in ath_softc struct and HAL accesses.
118*1000Sxc151355  *      It is acquired by the interupt handler and most "mode-ctrl" routines.
119*1000Sxc151355  *
120*1000Sxc151355  * In ieee80211com struct, isc_genlock is a general lock to protect
121*1000Sxc151355  *      necessary data and functions in ieee80211_com struct. Some data in
122*1000Sxc151355  *      ieee802.11_com don't need protection. For example, isc_dev is writen
123*1000Sxc151355  *      only in ath_attach(), but read in many other functions, so protection
124*1000Sxc151355  *      is not necessary.
125*1000Sxc151355  *
126*1000Sxc151355  * Any of the locks can be acquired singly, but where multiple
127*1000Sxc151355  * locks are acquired, they *must* be in the order:
128*1000Sxc151355  *
129*1000Sxc151355  *    isc_genlock >> asc_genlock >> asc_txqlock[i] >>
130*1000Sxc151355  *        asc_txbuflock >> asc_rxbuflock
131*1000Sxc151355  *
132*1000Sxc151355  * Note:
133*1000Sxc151355  * 1. All the IEEE80211 callback functions(except isc_gld_intr)
134*1000Sxc151355  *    registered by LLD in module [5] are protected by isc_genlock before
135*1000Sxc151355  *    calling from IEEE80211.
136*1000Sxc151355  * 2. Module [4] have 3 important functions ieee80211_input(),
137*1000Sxc151355  *    ieee80211_new_state() and _ieee80211_new_state().
138*1000Sxc151355  *    The functions in module [6] should avoid holding mutex or other locks
139*1000Sxc151355  *    during the call to ieee80211_input().
140*1000Sxc151355  *    In particular, the soft interrupt thread that calls ieee80211_input()
141*1000Sxc151355  *    may in some cases carry out processing that includes sending an outgoing
142*1000Sxc151355  *    packet, resulting in a call to the driver's ath_mgmt_send() routine.
143*1000Sxc151355  *    If the ath_mgmt_send() routine were to try to acquire a mutex being held
144*1000Sxc151355  *    by soft interrupt thread at the time it calls ieee80211_input(),
145*1000Sxc151355  *    this could result in a panic due to recursive mutex entry.
146*1000Sxc151355  *    ieee80211_new_state() and _ieee80211_new_state() are almost the same
147*1000Sxc151355  *    except that the latter function asserts isc_genlock is owned in its entry.
148*1000Sxc151355  *    so ieee80211_new_state() is only called by ath_bmiss_handler()
149*1000Sxc151355  *    from soft interrupt handler thread.
150*1000Sxc151355  *    As the same reason to ieee80211_input, we can't hold any other mutex.
151*1000Sxc151355  * 3. *None* of these locks may be held across calls out to the
152*1000Sxc151355  *    GLD routines gld_recv() in ieee80211_input().
153*1000Sxc151355  *
154*1000Sxc151355  */
155*1000Sxc151355 
156*1000Sxc151355 #include <sys/param.h>
157*1000Sxc151355 #include <sys/types.h>
158*1000Sxc151355 #include <sys/signal.h>
159*1000Sxc151355 #include <sys/stream.h>
160*1000Sxc151355 #include <sys/termio.h>
161*1000Sxc151355 #include <sys/errno.h>
162*1000Sxc151355 #include <sys/file.h>
163*1000Sxc151355 #include <sys/cmn_err.h>
164*1000Sxc151355 #include <sys/stropts.h>
165*1000Sxc151355 #include <sys/strsubr.h>
166*1000Sxc151355 #include <sys/strtty.h>
167*1000Sxc151355 #include <sys/kbio.h>
168*1000Sxc151355 #include <sys/cred.h>
169*1000Sxc151355 #include <sys/stat.h>
170*1000Sxc151355 #include <sys/consdev.h>
171*1000Sxc151355 #include <sys/kmem.h>
172*1000Sxc151355 #include <sys/modctl.h>
173*1000Sxc151355 #include <sys/ddi.h>
174*1000Sxc151355 #include <sys/sunddi.h>
175*1000Sxc151355 #include <sys/pci.h>
176*1000Sxc151355 #include <sys/errno.h>
177*1000Sxc151355 #include <sys/gld.h>
178*1000Sxc151355 #include <sys/dlpi.h>
179*1000Sxc151355 #include <sys/ethernet.h>
180*1000Sxc151355 #include <sys/list.h>
181*1000Sxc151355 #include <sys/byteorder.h>
182*1000Sxc151355 #include <sys/strsun.h>
183*1000Sxc151355 #include <sys/policy.h>
184*1000Sxc151355 #include <inet/common.h>
185*1000Sxc151355 #include <inet/nd.h>
186*1000Sxc151355 #include <inet/mi.h>
187*1000Sxc151355 #include <inet/wifi_ioctl.h>
188*1000Sxc151355 #include "ath_hal.h"
189*1000Sxc151355 #include "ath_impl.h"
190*1000Sxc151355 #include "ath_aux.h"
191*1000Sxc151355 #include "ath_rate.h"
192*1000Sxc151355 
193*1000Sxc151355 extern void ath_halfix_init(void);
194*1000Sxc151355 extern void ath_halfix_finit(void);
195*1000Sxc151355 extern int32_t ath_getset(ath_t *asc, mblk_t *mp, uint32_t cmd);
196*1000Sxc151355 
197*1000Sxc151355 /*
198*1000Sxc151355  * PIO access attributes for registers
199*1000Sxc151355  */
200*1000Sxc151355 static ddi_device_acc_attr_t ath_reg_accattr = {
201*1000Sxc151355 	DDI_DEVICE_ATTR_V0,
202*1000Sxc151355 	DDI_STRUCTURE_LE_ACC,
203*1000Sxc151355 	DDI_STRICTORDER_ACC
204*1000Sxc151355 };
205*1000Sxc151355 
206*1000Sxc151355 /*
207*1000Sxc151355  * DMA access attributes for descriptors: NOT to be byte swapped.
208*1000Sxc151355  */
209*1000Sxc151355 static ddi_device_acc_attr_t ath_desc_accattr = {
210*1000Sxc151355 	DDI_DEVICE_ATTR_V0,
211*1000Sxc151355 	DDI_STRUCTURE_LE_ACC,
212*1000Sxc151355 	DDI_STRICTORDER_ACC
213*1000Sxc151355 };
214*1000Sxc151355 
215*1000Sxc151355 /*
216*1000Sxc151355  * Describes the chip's DMA engine
217*1000Sxc151355  */
218*1000Sxc151355 static ddi_dma_attr_t dma_attr = {
219*1000Sxc151355 	DMA_ATTR_V0,			/* dma_attr version */
220*1000Sxc151355 	0x0000000000000000ull,		/* dma_attr_addr_lo */
221*1000Sxc151355 	0xFFFFFFFFFFFFFFFFull,		/* dma_attr_addr_hi */
222*1000Sxc151355 	0x00000000FFFFFFFFull,		/* dma_attr_count_max */
223*1000Sxc151355 	0x0000000000000001ull,		/* dma_attr_align */
224*1000Sxc151355 	0x00000FFF,			/* dma_attr_burstsizes */
225*1000Sxc151355 	0x00000001,			/* dma_attr_minxfer */
226*1000Sxc151355 	0x000000000000FFFFull,		/* dma_attr_maxxfer */
227*1000Sxc151355 	0xFFFFFFFFFFFFFFFFull,		/* dma_attr_seg */
228*1000Sxc151355 	1,				/* dma_attr_sgllen */
229*1000Sxc151355 	0x00000001,			/* dma_attr_granular */
230*1000Sxc151355 	0				/* dma_attr_flags */
231*1000Sxc151355 };
232*1000Sxc151355 
233*1000Sxc151355 static uint8_t ath_broadcast_addr[] = {
234*1000Sxc151355 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
235*1000Sxc151355 };
236*1000Sxc151355 
237*1000Sxc151355 static kmutex_t ath_loglock;
238*1000Sxc151355 static void *ath_soft_state_p = NULL;
239*1000Sxc151355 
240*1000Sxc151355 /*
241*1000Sxc151355  * Available debug flags:
242*1000Sxc151355  * ATH_DBG_INIT, ATH_DBG_GLD, ATH_DBG_HAL, ATH_DBG_INT, ATH_DBG_ATTACH,
243*1000Sxc151355  * ATH_DBG_DETACH, ATH_DBG_AUX, ATH_DBG_WIFICFG, ATH_DBG_OSDEP
244*1000Sxc151355  */
245*1000Sxc151355 uint32_t ath_dbg_flags = 0;
246*1000Sxc151355 
247*1000Sxc151355 /*
248*1000Sxc151355  * Exception/warning cases not leading to panic.
249*1000Sxc151355  */
250*1000Sxc151355 void
251*1000Sxc151355 ath_problem(const int8_t *fmt, ...)
252*1000Sxc151355 {
253*1000Sxc151355 	va_list args;
254*1000Sxc151355 
255*1000Sxc151355 	mutex_enter(&ath_loglock);
256*1000Sxc151355 
257*1000Sxc151355 	va_start(args, fmt);
258*1000Sxc151355 	vcmn_err(CE_WARN, fmt, args);
259*1000Sxc151355 	va_end(args);
260*1000Sxc151355 
261*1000Sxc151355 	mutex_exit(&ath_loglock);
262*1000Sxc151355 }
263*1000Sxc151355 
264*1000Sxc151355 /*
265*1000Sxc151355  * Normal log information independent of debug.
266*1000Sxc151355  */
267*1000Sxc151355 void
268*1000Sxc151355 ath_log(const int8_t *fmt, ...)
269*1000Sxc151355 {
270*1000Sxc151355 	va_list args;
271*1000Sxc151355 
272*1000Sxc151355 	mutex_enter(&ath_loglock);
273*1000Sxc151355 
274*1000Sxc151355 	va_start(args, fmt);
275*1000Sxc151355 	vcmn_err(CE_CONT, fmt, args);
276*1000Sxc151355 	va_end(args);
277*1000Sxc151355 
278*1000Sxc151355 	mutex_exit(&ath_loglock);
279*1000Sxc151355 }
280*1000Sxc151355 
281*1000Sxc151355 void
282*1000Sxc151355 ath_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
283*1000Sxc151355 {
284*1000Sxc151355 	va_list args;
285*1000Sxc151355 
286*1000Sxc151355 	if (dbg_flags & ath_dbg_flags) {
287*1000Sxc151355 		mutex_enter(&ath_loglock);
288*1000Sxc151355 		va_start(args, fmt);
289*1000Sxc151355 		vcmn_err(CE_CONT, fmt, args);
290*1000Sxc151355 		va_end(args);
291*1000Sxc151355 		mutex_exit(&ath_loglock);
292*1000Sxc151355 	}
293*1000Sxc151355 }
294*1000Sxc151355 
295*1000Sxc151355 void
296*1000Sxc151355 ath_setup_desc(ath_t *asc, struct ath_buf *bf)
297*1000Sxc151355 {
298*1000Sxc151355 	struct ath_desc *ds;
299*1000Sxc151355 
300*1000Sxc151355 	ds = bf->bf_desc;
301*1000Sxc151355 	ds->ds_link = bf->bf_daddr;
302*1000Sxc151355 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
303*1000Sxc151355 	ATH_HAL_SETUPRXDESC(asc->asc_ah, ds,
304*1000Sxc151355 	    bf->bf_dma.alength,		/* buffer size */
305*1000Sxc151355 	    0);
306*1000Sxc151355 
307*1000Sxc151355 	if (asc->asc_rxlink != NULL)
308*1000Sxc151355 		*asc->asc_rxlink = bf->bf_daddr;
309*1000Sxc151355 	asc->asc_rxlink = &ds->ds_link;
310*1000Sxc151355 }
311*1000Sxc151355 
312*1000Sxc151355 
313*1000Sxc151355 /*
314*1000Sxc151355  * Allocate an area of memory and a DMA handle for accessing it
315*1000Sxc151355  */
316*1000Sxc151355 static int
317*1000Sxc151355 ath_alloc_dma_mem(dev_info_t *devinfo, size_t memsize,
318*1000Sxc151355 	ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
319*1000Sxc151355 	uint_t bind_flags, dma_area_t *dma_p)
320*1000Sxc151355 {
321*1000Sxc151355 	int err;
322*1000Sxc151355 
323*1000Sxc151355 	/*
324*1000Sxc151355 	 * Allocate handle
325*1000Sxc151355 	 */
326*1000Sxc151355 	err = ddi_dma_alloc_handle(devinfo, &dma_attr,
327*1000Sxc151355 		DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
328*1000Sxc151355 	if (err != DDI_SUCCESS)
329*1000Sxc151355 		return (DDI_FAILURE);
330*1000Sxc151355 
331*1000Sxc151355 	/*
332*1000Sxc151355 	 * Allocate memory
333*1000Sxc151355 	 */
334*1000Sxc151355 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
335*1000Sxc151355 	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
336*1000Sxc151355 	    &dma_p->alength, &dma_p->acc_hdl);
337*1000Sxc151355 	if (err != DDI_SUCCESS)
338*1000Sxc151355 		return (DDI_FAILURE);
339*1000Sxc151355 
340*1000Sxc151355 	/*
341*1000Sxc151355 	 * Bind the two together
342*1000Sxc151355 	 */
343*1000Sxc151355 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
344*1000Sxc151355 		dma_p->mem_va, dma_p->alength, bind_flags,
345*1000Sxc151355 		DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
346*1000Sxc151355 	if (err != DDI_DMA_MAPPED)
347*1000Sxc151355 		return (DDI_FAILURE);
348*1000Sxc151355 
349*1000Sxc151355 	dma_p->nslots = ~0U;
350*1000Sxc151355 	dma_p->size = ~0U;
351*1000Sxc151355 	dma_p->token = ~0U;
352*1000Sxc151355 	dma_p->offset = 0;
353*1000Sxc151355 	return (DDI_SUCCESS);
354*1000Sxc151355 }
355*1000Sxc151355 
356*1000Sxc151355 /*
357*1000Sxc151355  * Free one allocated area of DMAable memory
358*1000Sxc151355  */
359*1000Sxc151355 static void
360*1000Sxc151355 ath_free_dma_mem(dma_area_t *dma_p)
361*1000Sxc151355 {
362*1000Sxc151355 	if (dma_p->dma_hdl != NULL) {
363*1000Sxc151355 		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
364*1000Sxc151355 		if (dma_p->acc_hdl != NULL) {
365*1000Sxc151355 			ddi_dma_mem_free(&dma_p->acc_hdl);
366*1000Sxc151355 			dma_p->acc_hdl = NULL;
367*1000Sxc151355 		}
368*1000Sxc151355 		ddi_dma_free_handle(&dma_p->dma_hdl);
369*1000Sxc151355 		dma_p->ncookies = 0;
370*1000Sxc151355 		dma_p->dma_hdl = NULL;
371*1000Sxc151355 	}
372*1000Sxc151355 }
373*1000Sxc151355 
374*1000Sxc151355 
375*1000Sxc151355 static int
376*1000Sxc151355 ath_desc_alloc(dev_info_t *devinfo, ath_t *asc)
377*1000Sxc151355 {
378*1000Sxc151355 	int i, err;
379*1000Sxc151355 	size_t size;
380*1000Sxc151355 	struct ath_desc *ds;
381*1000Sxc151355 	struct ath_buf *bf;
382*1000Sxc151355 
383*1000Sxc151355 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
384*1000Sxc151355 
385*1000Sxc151355 	err = ath_alloc_dma_mem(devinfo, size, &ath_desc_accattr,
386*1000Sxc151355 	    DDI_DMA_CONSISTENT, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
387*1000Sxc151355 	    &asc->asc_desc_dma);
388*1000Sxc151355 
389*1000Sxc151355 	/* virtual address of the first descriptor */
390*1000Sxc151355 	asc->asc_desc = (struct ath_desc *)asc->asc_desc_dma.mem_va;
391*1000Sxc151355 
392*1000Sxc151355 	ds = asc->asc_desc;
393*1000Sxc151355 	ATH_DEBUG((ATH_DBG_INIT, "ath: ath_desc_alloc(): DMA map: "
394*1000Sxc151355 	    "%p (%d) -> %p\n",
395*1000Sxc151355 	    asc->asc_desc, asc->asc_desc_dma.alength,
396*1000Sxc151355 	    asc->asc_desc_dma.cookie.dmac_address));
397*1000Sxc151355 
398*1000Sxc151355 	/* allocate data structures to describe TX/RX DMA buffers */
399*1000Sxc151355 	asc->asc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
400*1000Sxc151355 	bf = (struct ath_buf *)kmem_zalloc(asc->asc_vbuflen, KM_SLEEP);
401*1000Sxc151355 	asc->asc_vbufptr = bf;
402*1000Sxc151355 
403*1000Sxc151355 	/* DMA buffer size for each TX/RX packet */
404*1000Sxc151355 	asc->asc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) +
405*1000Sxc151355 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
406*1000Sxc151355 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
407*1000Sxc151355 	    IEEE80211_WEP_CRCLEN), asc->asc_cachelsz);
408*1000Sxc151355 
409*1000Sxc151355 	/* create RX buffer list and allocate DMA memory */
410*1000Sxc151355 	list_create(&asc->asc_rxbuf_list, sizeof (struct ath_buf),
411*1000Sxc151355 	    offsetof(struct ath_buf, bf_node));
412*1000Sxc151355 	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) {
413*1000Sxc151355 		bf->bf_desc = ds;
414*1000Sxc151355 		bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
415*1000Sxc151355 		    ((caddr_t)ds - (caddr_t)asc->asc_desc);
416*1000Sxc151355 		list_insert_tail(&asc->asc_rxbuf_list, bf);
417*1000Sxc151355 
418*1000Sxc151355 		/* alloc DMA memory */
419*1000Sxc151355 		err = ath_alloc_dma_mem(devinfo, asc->asc_dmabuf_size,
420*1000Sxc151355 		    &ath_desc_accattr,
421*1000Sxc151355 		    DDI_DMA_STREAMING, DDI_DMA_READ | DDI_DMA_STREAMING,
422*1000Sxc151355 		    &bf->bf_dma);
423*1000Sxc151355 		if (err != DDI_SUCCESS)
424*1000Sxc151355 			return (err);
425*1000Sxc151355 	}
426*1000Sxc151355 
427*1000Sxc151355 	/* create TX buffer list and allocate DMA memory */
428*1000Sxc151355 	list_create(&asc->asc_txbuf_list, sizeof (struct ath_buf),
429*1000Sxc151355 	    offsetof(struct ath_buf, bf_node));
430*1000Sxc151355 	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++) {
431*1000Sxc151355 		bf->bf_desc = ds;
432*1000Sxc151355 		bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
433*1000Sxc151355 		    ((caddr_t)ds - (caddr_t)asc->asc_desc);
434*1000Sxc151355 		list_insert_tail(&asc->asc_txbuf_list, bf);
435*1000Sxc151355 
436*1000Sxc151355 		/* alloc DMA memory */
437*1000Sxc151355 		err = ath_alloc_dma_mem(devinfo, size, &ath_desc_accattr,
438*1000Sxc151355 		    DDI_DMA_STREAMING, DDI_DMA_STREAMING, &bf->bf_dma);
439*1000Sxc151355 		if (err != DDI_SUCCESS)
440*1000Sxc151355 			return (err);
441*1000Sxc151355 	}
442*1000Sxc151355 
443*1000Sxc151355 	return (DDI_SUCCESS);
444*1000Sxc151355 }
445*1000Sxc151355 
446*1000Sxc151355 static void
447*1000Sxc151355 ath_desc_free(ath_t *asc)
448*1000Sxc151355 {
449*1000Sxc151355 	struct ath_buf *bf;
450*1000Sxc151355 
451*1000Sxc151355 	/* Free TX DMA buffer */
452*1000Sxc151355 	bf = list_head(&asc->asc_txbuf_list);
453*1000Sxc151355 	while (bf != NULL) {
454*1000Sxc151355 		ath_free_dma_mem(&bf->bf_dma);
455*1000Sxc151355 		list_remove(&asc->asc_txbuf_list, bf);
456*1000Sxc151355 		bf = list_head(&asc->asc_txbuf_list);
457*1000Sxc151355 	}
458*1000Sxc151355 	list_destroy(&asc->asc_txbuf_list);
459*1000Sxc151355 
460*1000Sxc151355 	/* Free RX DMA uffer */
461*1000Sxc151355 	bf = list_head(&asc->asc_rxbuf_list);
462*1000Sxc151355 	while (bf != NULL) {
463*1000Sxc151355 		ath_free_dma_mem(&bf->bf_dma);
464*1000Sxc151355 		list_remove(&asc->asc_rxbuf_list, bf);
465*1000Sxc151355 		bf = list_head(&asc->asc_rxbuf_list);
466*1000Sxc151355 	}
467*1000Sxc151355 	list_destroy(&asc->asc_rxbuf_list);
468*1000Sxc151355 
469*1000Sxc151355 	/* Free descriptor DMA buffer */
470*1000Sxc151355 	ath_free_dma_mem(&asc->asc_desc_dma);
471*1000Sxc151355 
472*1000Sxc151355 	kmem_free((void *)asc->asc_vbufptr, asc->asc_vbuflen);
473*1000Sxc151355 	asc->asc_vbufptr = NULL;
474*1000Sxc151355 }
475*1000Sxc151355 
476*1000Sxc151355 static void
477*1000Sxc151355 ath_printrxbuf(struct ath_buf *bf, int32_t done)
478*1000Sxc151355 {
479*1000Sxc151355 	struct ath_desc *ds = bf->bf_desc;
480*1000Sxc151355 
481*1000Sxc151355 	ATH_DEBUG((ATH_DBG_RECV, "ath: R (%p %p) %08x %08x %08x "
482*1000Sxc151355 	    "%08x %08x %08x %c\n",
483*1000Sxc151355 	    ds, bf->bf_daddr,
484*1000Sxc151355 	    ds->ds_link, ds->ds_data,
485*1000Sxc151355 	    ds->ds_ctl0, ds->ds_ctl1,
486*1000Sxc151355 	    ds->ds_hw[0], ds->ds_hw[1],
487*1000Sxc151355 	    !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'));
488*1000Sxc151355 }
489*1000Sxc151355 
490*1000Sxc151355 static void
491*1000Sxc151355 ath_rx_handler(ath_t *asc)
492*1000Sxc151355 {
493*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
494*1000Sxc151355 	struct ath_buf *bf;
495*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
496*1000Sxc151355 	struct ath_desc *ds;
497*1000Sxc151355 	mblk_t *rx_mp;
498*1000Sxc151355 	struct ieee80211_frame *wh, whbuf;
499*1000Sxc151355 	int32_t len, loop = 1;
500*1000Sxc151355 	uint8_t phyerr;
501*1000Sxc151355 	HAL_STATUS status;
502*1000Sxc151355 	HAL_NODE_STATS hal_node_stats;
503*1000Sxc151355 
504*1000Sxc151355 	do {
505*1000Sxc151355 		mutex_enter(&asc->asc_rxbuflock);
506*1000Sxc151355 		bf = list_head(&asc->asc_rxbuf_list);
507*1000Sxc151355 		if (bf == NULL) {
508*1000Sxc151355 			ATH_DEBUG((ATH_DBG_RECV, "ath: ath_rx_handler(): "
509*1000Sxc151355 			    "no buffer\n"));
510*1000Sxc151355 			mutex_exit(&asc->asc_rxbuflock);
511*1000Sxc151355 			break;
512*1000Sxc151355 		}
513*1000Sxc151355 		ASSERT(bf->bf_dma.cookie.dmac_address != NULL);
514*1000Sxc151355 		ds = bf->bf_desc;
515*1000Sxc151355 		if (ds->ds_link == bf->bf_daddr) {
516*1000Sxc151355 			/*
517*1000Sxc151355 			 * Never process the self-linked entry at the end,
518*1000Sxc151355 			 * this may be met at heavy load.
519*1000Sxc151355 			 */
520*1000Sxc151355 			mutex_exit(&asc->asc_rxbuflock);
521*1000Sxc151355 			break;
522*1000Sxc151355 		}
523*1000Sxc151355 
524*1000Sxc151355 		status = ATH_HAL_RXPROCDESC(ah, ds,
525*1000Sxc151355 		    bf->bf_daddr,
526*1000Sxc151355 		    ATH_PA2DESC(asc, ds->ds_link));
527*1000Sxc151355 		if (status == HAL_EINPROGRESS) {
528*1000Sxc151355 			mutex_exit(&asc->asc_rxbuflock);
529*1000Sxc151355 			break;
530*1000Sxc151355 		}
531*1000Sxc151355 		list_remove(&asc->asc_rxbuf_list, bf);
532*1000Sxc151355 		mutex_exit(&asc->asc_rxbuflock);
533*1000Sxc151355 
534*1000Sxc151355 		if (ds->ds_rxstat.rs_status != 0) {
535*1000Sxc151355 			if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC)
536*1000Sxc151355 				asc->asc_stats.ast_rx_crcerr++;
537*1000Sxc151355 			if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO)
538*1000Sxc151355 				asc->asc_stats.ast_rx_fifoerr++;
539*1000Sxc151355 			if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT)
540*1000Sxc151355 				asc->asc_stats.ast_rx_badcrypt++;
541*1000Sxc151355 			if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) {
542*1000Sxc151355 				asc->asc_stats.ast_rx_phyerr++;
543*1000Sxc151355 				phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
544*1000Sxc151355 				asc->asc_stats.ast_rx_phy[phyerr]++;
545*1000Sxc151355 			}
546*1000Sxc151355 			goto rx_next;
547*1000Sxc151355 		}
548*1000Sxc151355 		len = ds->ds_rxstat.rs_datalen;
549*1000Sxc151355 
550*1000Sxc151355 		/* less than sizeof(struct ieee80211_frame) */
551*1000Sxc151355 		if (len < 20) {
552*1000Sxc151355 			asc->asc_stats.ast_rx_tooshort++;
553*1000Sxc151355 			goto rx_next;
554*1000Sxc151355 		}
555*1000Sxc151355 
556*1000Sxc151355 		if ((rx_mp = allocb(asc->asc_dmabuf_size, BPRI_MED)) == NULL) {
557*1000Sxc151355 			ath_problem("ath: ath_rx_handler(): "
558*1000Sxc151355 			    "allocing mblk buffer failed.\n");
559*1000Sxc151355 			return;
560*1000Sxc151355 		}
561*1000Sxc151355 
562*1000Sxc151355 		ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU);
563*1000Sxc151355 		bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len);
564*1000Sxc151355 
565*1000Sxc151355 		rx_mp->b_wptr += len;
566*1000Sxc151355 		wh = (struct ieee80211_frame *)rx_mp->b_rptr;
567*1000Sxc151355 		if ((wh->ifrm_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
568*1000Sxc151355 		    IEEE80211_FC0_TYPE_CTL) {
569*1000Sxc151355 			/*
570*1000Sxc151355 			 * Ignore control frame received in promisc mode.
571*1000Sxc151355 			 */
572*1000Sxc151355 			freemsg(rx_mp);
573*1000Sxc151355 			goto rx_next;
574*1000Sxc151355 		}
575*1000Sxc151355 		/* Remove the CRC at the end of IEEE80211 frame */
576*1000Sxc151355 		rx_mp->b_wptr -= IEEE80211_CRC_LEN;
577*1000Sxc151355 		if (wh->ifrm_fc[1] & IEEE80211_FC1_WEP) {
578*1000Sxc151355 			/*
579*1000Sxc151355 			 * WEP is decrypted by hardware. Clear WEP bit
580*1000Sxc151355 			 * and trim WEP header for ieee80211_input().
581*1000Sxc151355 			 */
582*1000Sxc151355 			wh->ifrm_fc[1] &= ~IEEE80211_FC1_WEP;
583*1000Sxc151355 			bcopy(wh, &whbuf, sizeof (whbuf));
584*1000Sxc151355 			/*
585*1000Sxc151355 			 * Remove WEP related fields between
586*1000Sxc151355 			 * header and payload.
587*1000Sxc151355 			 */
588*1000Sxc151355 			rx_mp->b_rptr += IEEE80211_WEP_IVLEN +
589*1000Sxc151355 			    IEEE80211_WEP_KIDLEN;
590*1000Sxc151355 			bcopy(&whbuf, rx_mp->b_rptr, sizeof (whbuf));
591*1000Sxc151355 			/*
592*1000Sxc151355 			 * Remove WEP CRC from the tail.
593*1000Sxc151355 			 */
594*1000Sxc151355 			rx_mp->b_wptr -= IEEE80211_WEP_CRCLEN;
595*1000Sxc151355 		}
596*1000Sxc151355 #ifdef DEBUG
597*1000Sxc151355 		ath_printrxbuf(bf, status == HAL_OK);
598*1000Sxc151355 #endif /* DEBUG */
599*1000Sxc151355 		ieee80211_input(isc, rx_mp,
600*1000Sxc151355 		    ds->ds_rxstat.rs_rssi,
601*1000Sxc151355 		    ds->ds_rxstat.rs_tstamp,
602*1000Sxc151355 		    ds->ds_rxstat.rs_antenna);
603*1000Sxc151355 rx_next:
604*1000Sxc151355 		mutex_enter(&asc->asc_rxbuflock);
605*1000Sxc151355 		list_insert_tail(&asc->asc_rxbuf_list, bf);
606*1000Sxc151355 		mutex_exit(&asc->asc_rxbuflock);
607*1000Sxc151355 		ath_setup_desc(asc, bf);
608*1000Sxc151355 	} while (loop);
609*1000Sxc151355 
610*1000Sxc151355 	/* rx signal state monitoring */
611*1000Sxc151355 	ATH_HAL_RXMONITOR(ah, &hal_node_stats);
612*1000Sxc151355 	ATH_HAL_RXENA(ah);	/* in case of RXEOL */
613*1000Sxc151355 }
614*1000Sxc151355 
615*1000Sxc151355 static void
616*1000Sxc151355 ath_printtxbuf(struct ath_buf *bf, int done)
617*1000Sxc151355 {
618*1000Sxc151355 	struct ath_desc *ds = bf->bf_desc;
619*1000Sxc151355 
620*1000Sxc151355 	ATH_DEBUG((ATH_DBG_SEND, "ath: T(%p %p) %08x %08x %08x %08x %08x"
621*1000Sxc151355 	    " %08x %08x %08x %c\n",
622*1000Sxc151355 	    ds, bf->bf_daddr,
623*1000Sxc151355 	    ds->ds_link, ds->ds_data,
624*1000Sxc151355 	    ds->ds_ctl0, ds->ds_ctl1,
625*1000Sxc151355 	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
626*1000Sxc151355 	    !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'));
627*1000Sxc151355 }
628*1000Sxc151355 
629*1000Sxc151355 /*
630*1000Sxc151355  * The input parameter mp has following assumption:
631*1000Sxc151355  * the first mblk is for ieee80211 header, and there has enough space left
632*1000Sxc151355  * for WEP option at the end of this mblk.
633*1000Sxc151355  * The continue mblks are for payload.
634*1000Sxc151355  */
635*1000Sxc151355 static int32_t
636*1000Sxc151355 ath_xmit(ath_t *asc, struct ieee80211_node *in,
637*1000Sxc151355     struct ath_buf *bf, mblk_t *mp, mblk_t *mp_header)
638*1000Sxc151355 {
639*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
640*1000Sxc151355 	struct ieee80211_frame *wh;
641*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
642*1000Sxc151355 	uint32_t subtype, flags, ctsduration, antenna;
643*1000Sxc151355 	int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen, try0;
644*1000Sxc151355 	uint8_t rix, cix, txrate, ctsrate, *tmp_ptr;
645*1000Sxc151355 	struct ath_desc *ds;
646*1000Sxc151355 	struct ath_txq *txq;
647*1000Sxc151355 	HAL_PKT_TYPE atype;
648*1000Sxc151355 	const HAL_RATE_TABLE *rt;
649*1000Sxc151355 	HAL_BOOL shortPreamble;
650*1000Sxc151355 	mblk_t *mp0;
651*1000Sxc151355 	struct ath_node *an;
652*1000Sxc151355 
653*1000Sxc151355 	/*
654*1000Sxc151355 	 * CRC are added by H/W, not encaped by driver,
655*1000Sxc151355 	 * but we must count it in pkt length.
656*1000Sxc151355 	 */
657*1000Sxc151355 	pktlen = IEEE80211_CRC_LEN;
658*1000Sxc151355 
659*1000Sxc151355 	wh = (struct ieee80211_frame *)mp_header->b_rptr;
660*1000Sxc151355 	iswep = wh->ifrm_fc[1] & IEEE80211_FC1_WEP;
661*1000Sxc151355 	keyix = HAL_TXKEYIX_INVALID;
662*1000Sxc151355 	hdrlen = sizeof (struct ieee80211_frame);
663*1000Sxc151355 	if (iswep) {
664*1000Sxc151355 		hdrlen += IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN;
665*1000Sxc151355 		pktlen += IEEE80211_WEP_CRCLEN;
666*1000Sxc151355 		keyix = isc->isc_wep_txkey;
667*1000Sxc151355 	}
668*1000Sxc151355 	tmp_ptr = (uint8_t *)bf->bf_dma.mem_va;
669*1000Sxc151355 
670*1000Sxc151355 	/* Copy 80211 header from mblk to DMA txbuf */
671*1000Sxc151355 	mblen = mp_header->b_wptr - mp_header->b_rptr;
672*1000Sxc151355 	bcopy(mp_header->b_rptr, tmp_ptr, mblen);
673*1000Sxc151355 	tmp_ptr += mblen;
674*1000Sxc151355 	pktlen += mblen;
675*1000Sxc151355 	mbslen = mblen;
676*1000Sxc151355 
677*1000Sxc151355 	/*
678*1000Sxc151355 	 * If mp==NULL, then it's a management frame,
679*1000Sxc151355 	 * else it's a data frame.
680*1000Sxc151355 	 */
681*1000Sxc151355 	if (mp != NULL) {
682*1000Sxc151355 		/*
683*1000Sxc151355 		 * Copy the first mblk to DMA txbuf
684*1000Sxc151355 		 * (this mblk includes ether header).
685*1000Sxc151355 		 */
686*1000Sxc151355 		mblen = mp->b_wptr - mp->b_rptr - sizeof (struct ether_header);
687*1000Sxc151355 		bcopy(mp->b_rptr + sizeof (struct ether_header),
688*1000Sxc151355 			tmp_ptr, mblen);
689*1000Sxc151355 		tmp_ptr += mblen;
690*1000Sxc151355 		pktlen += mblen;
691*1000Sxc151355 		mbslen += mblen;
692*1000Sxc151355 
693*1000Sxc151355 		/* Copy subsequent mblks to DMA txbuf */
694*1000Sxc151355 		for (mp0 = mp->b_cont; mp0 != NULL; mp0 = mp0->b_cont) {
695*1000Sxc151355 			mblen = mp0->b_wptr - mp0->b_rptr;
696*1000Sxc151355 			bcopy(mp0->b_rptr, tmp_ptr, mblen);
697*1000Sxc151355 			tmp_ptr += mblen;
698*1000Sxc151355 			pktlen += mblen;
699*1000Sxc151355 			mbslen += mblen;
700*1000Sxc151355 		}
701*1000Sxc151355 	}
702*1000Sxc151355 
703*1000Sxc151355 	bf->bf_in = in;
704*1000Sxc151355 
705*1000Sxc151355 	/* setup descriptors */
706*1000Sxc151355 	ds = bf->bf_desc;
707*1000Sxc151355 	rt = asc->asc_currates;
708*1000Sxc151355 
709*1000Sxc151355 	/*
710*1000Sxc151355 	 * The 802.11 layer marks whether or not we should
711*1000Sxc151355 	 * use short preamble based on the current mode and
712*1000Sxc151355 	 * negotiated parameters.
713*1000Sxc151355 	 */
714*1000Sxc151355 	if ((isc->isc_flags & IEEE80211_F_SHPREAMBLE) &&
715*1000Sxc151355 	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
716*1000Sxc151355 		shortPreamble = AH_TRUE;
717*1000Sxc151355 		asc->asc_stats.ast_tx_shortpre++;
718*1000Sxc151355 	} else {
719*1000Sxc151355 		shortPreamble = AH_FALSE;
720*1000Sxc151355 	}
721*1000Sxc151355 
722*1000Sxc151355 	an = ATH_NODE(in);
723*1000Sxc151355 
724*1000Sxc151355 	/*
725*1000Sxc151355 	 * Calculate Atheros packet type from IEEE80211 packet header
726*1000Sxc151355 	 * and setup for rate calculations.
727*1000Sxc151355 	 */
728*1000Sxc151355 	switch (wh->ifrm_fc[0] & IEEE80211_FC0_TYPE_MASK) {
729*1000Sxc151355 	case IEEE80211_FC0_TYPE_MGT:
730*1000Sxc151355 		subtype = wh->ifrm_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
731*1000Sxc151355 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
732*1000Sxc151355 			atype = HAL_PKT_TYPE_BEACON;
733*1000Sxc151355 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
734*1000Sxc151355 			atype = HAL_PKT_TYPE_PROBE_RESP;
735*1000Sxc151355 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
736*1000Sxc151355 			atype = HAL_PKT_TYPE_ATIM;
737*1000Sxc151355 		else
738*1000Sxc151355 			atype = HAL_PKT_TYPE_NORMAL;
739*1000Sxc151355 		rix = 0;	/* lowest rate */
740*1000Sxc151355 		try0 = ATH_TXMAXTRY;
741*1000Sxc151355 		if (shortPreamble)
742*1000Sxc151355 			txrate = an->an_tx_mgtratesp;
743*1000Sxc151355 		else
744*1000Sxc151355 			txrate = an->an_tx_mgtrate;
745*1000Sxc151355 		/* force all ctl frames to highest queue */
746*1000Sxc151355 		txq = asc->asc_ac2q[WME_AC_VO];
747*1000Sxc151355 		break;
748*1000Sxc151355 	case IEEE80211_FC0_TYPE_CTL:
749*1000Sxc151355 		atype = HAL_PKT_TYPE_PSPOLL;
750*1000Sxc151355 		subtype = wh->ifrm_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
751*1000Sxc151355 		rix = 0;	/* lowest rate */
752*1000Sxc151355 		try0 = ATH_TXMAXTRY;
753*1000Sxc151355 		if (shortPreamble)
754*1000Sxc151355 			txrate = an->an_tx_mgtratesp;
755*1000Sxc151355 		else
756*1000Sxc151355 			txrate = an->an_tx_mgtrate;
757*1000Sxc151355 		/* force all ctl frames to highest queue */
758*1000Sxc151355 		txq = asc->asc_ac2q[WME_AC_VO];
759*1000Sxc151355 		break;
760*1000Sxc151355 	case IEEE80211_FC0_TYPE_DATA:
761*1000Sxc151355 		atype = HAL_PKT_TYPE_NORMAL;
762*1000Sxc151355 		rix = an->an_tx_rix0;
763*1000Sxc151355 		try0 = an->an_tx_try0;
764*1000Sxc151355 		if (shortPreamble)
765*1000Sxc151355 			txrate = an->an_tx_rate0sp;
766*1000Sxc151355 		else
767*1000Sxc151355 			txrate = an->an_tx_rate0;
768*1000Sxc151355 		/* Always use background queue */
769*1000Sxc151355 		txq = asc->asc_ac2q[WME_AC_BK];
770*1000Sxc151355 		break;
771*1000Sxc151355 	default:
772*1000Sxc151355 		/* Unknown 802.11 frame */
773*1000Sxc151355 		asc->asc_stats.ast_tx_invalid++;
774*1000Sxc151355 		return (1);
775*1000Sxc151355 	}
776*1000Sxc151355 	/*
777*1000Sxc151355 	 * Calculate miscellaneous flags.
778*1000Sxc151355 	 */
779*1000Sxc151355 	flags = HAL_TXDESC_CLRDMASK;
780*1000Sxc151355 	if (IEEE80211_IS_MULTICAST(wh->ifrm_addr1)) {
781*1000Sxc151355 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
782*1000Sxc151355 		asc->asc_stats.ast_tx_noack++;
783*1000Sxc151355 	} else if (pktlen > isc->isc_rtsthreshold) {
784*1000Sxc151355 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
785*1000Sxc151355 		asc->asc_stats.ast_tx_rts++;
786*1000Sxc151355 	}
787*1000Sxc151355 
788*1000Sxc151355 	/*
789*1000Sxc151355 	 * Calculate duration.  This logically belongs in the 802.11
790*1000Sxc151355 	 * layer but it lacks sufficient information to calculate it.
791*1000Sxc151355 	 */
792*1000Sxc151355 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
793*1000Sxc151355 	    (wh->ifrm_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
794*1000Sxc151355 	    IEEE80211_FC0_TYPE_CTL) {
795*1000Sxc151355 		uint16_t dur;
796*1000Sxc151355 		dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
797*1000Sxc151355 		    rix, shortPreamble);
798*1000Sxc151355 		*(uint16_t *)wh->ifrm_dur = LE_16(dur);
799*1000Sxc151355 	}
800*1000Sxc151355 
801*1000Sxc151355 	/*
802*1000Sxc151355 	 * Calculate RTS/CTS rate and duration if needed.
803*1000Sxc151355 	 */
804*1000Sxc151355 	ctsduration = 0;
805*1000Sxc151355 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
806*1000Sxc151355 		/*
807*1000Sxc151355 		 * CTS transmit rate is derived from the transmit rate
808*1000Sxc151355 		 * by looking in the h/w rate table.  We must also factor
809*1000Sxc151355 		 * in whether or not a short preamble is to be used.
810*1000Sxc151355 		 */
811*1000Sxc151355 		cix = rt->info[rix].controlRate;
812*1000Sxc151355 		ctsrate = rt->info[cix].rateCode;
813*1000Sxc151355 		if (shortPreamble)
814*1000Sxc151355 			ctsrate |= rt->info[cix].shortPreamble;
815*1000Sxc151355 		/*
816*1000Sxc151355 		 * Compute the transmit duration based on the size
817*1000Sxc151355 		 * of an ACK frame.  We call into the HAL to do the
818*1000Sxc151355 		 * computation since it depends on the characteristics
819*1000Sxc151355 		 * of the actual PHY being used.
820*1000Sxc151355 		 */
821*1000Sxc151355 		if (flags & HAL_TXDESC_RTSENA) {	/* SIFS + CTS */
822*1000Sxc151355 			ctsduration += ath_hal_computetxtime(ah,
823*1000Sxc151355 			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
824*1000Sxc151355 		}
825*1000Sxc151355 		/* SIFS + data */
826*1000Sxc151355 		ctsduration += ath_hal_computetxtime(ah,
827*1000Sxc151355 		    rt, pktlen, rix, shortPreamble);
828*1000Sxc151355 		if ((flags & HAL_TXDESC_NOACK) == 0) {	/* SIFS + ACK */
829*1000Sxc151355 			ctsduration += ath_hal_computetxtime(ah,
830*1000Sxc151355 			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
831*1000Sxc151355 		}
832*1000Sxc151355 	} else
833*1000Sxc151355 		ctsrate = 0;
834*1000Sxc151355 
835*1000Sxc151355 	/*
836*1000Sxc151355 	 * For now use the antenna on which the last good
837*1000Sxc151355 	 * frame was received on.  We assume this field is
838*1000Sxc151355 	 * initialized to 0 which gives us ``auto'' or the
839*1000Sxc151355 	 * ``default'' antenna.
840*1000Sxc151355 	 */
841*1000Sxc151355 	if (an->an_tx_antenna)
842*1000Sxc151355 		antenna = an->an_tx_antenna;
843*1000Sxc151355 	else
844*1000Sxc151355 		antenna = in->in_recv_hist[in->in_hist_cur].irh_rantenna;
845*1000Sxc151355 
846*1000Sxc151355 	if (++txq->axq_intrcnt >= ATH_TXINTR_PERIOD) {
847*1000Sxc151355 		flags |= HAL_TXDESC_INTREQ;
848*1000Sxc151355 		txq->axq_intrcnt = 0;
849*1000Sxc151355 	}
850*1000Sxc151355 
851*1000Sxc151355 	/*
852*1000Sxc151355 	 * Formulate first tx descriptor with tx controls.
853*1000Sxc151355 	 */
854*1000Sxc151355 	ATH_HAL_SETUPTXDESC(ah, ds,
855*1000Sxc151355 	    pktlen,			/* packet length */
856*1000Sxc151355 	    hdrlen,			/* header length */
857*1000Sxc151355 	    atype,			/* Atheros packet type */
858*1000Sxc151355 	    MIN(in->in_txpower, 60),	/* txpower */
859*1000Sxc151355 	    txrate, try0,		/* series 0 rate/tries */
860*1000Sxc151355 	    keyix,
861*1000Sxc151355 	    antenna,			/* antenna mode */
862*1000Sxc151355 	    flags,			/* flags */
863*1000Sxc151355 	    ctsrate,			/* rts/cts rate */
864*1000Sxc151355 	    ctsduration);		/* rts/cts duration */
865*1000Sxc151355 
866*1000Sxc151355 	ATH_DEBUG((ATH_DBG_SEND, "ath: ath_xmit(): to %s totlen=%d "
867*1000Sxc151355 	    "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
868*1000Sxc151355 	    "qnum=%d rix=%d sht=%d dur = %d\n",
869*1000Sxc151355 	    ieee80211_ether_sprintf(wh->ifrm_addr1), mbslen, an->an_tx_rate1sp,
870*1000Sxc151355 	    an->an_tx_rate2sp, an->an_tx_rate3sp,
871*1000Sxc151355 	    txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->ifrm_dur));
872*1000Sxc151355 
873*1000Sxc151355 	/*
874*1000Sxc151355 	 * Setup the multi-rate retry state only when we're
875*1000Sxc151355 	 * going to use it.  This assumes ath_hal_setuptxdesc
876*1000Sxc151355 	 * initializes the descriptors (so we don't have to)
877*1000Sxc151355 	 * when the hardware supports multi-rate retry and
878*1000Sxc151355 	 * we don't use it.
879*1000Sxc151355 	 */
880*1000Sxc151355 	if (try0 != ATH_TXMAXTRY)
881*1000Sxc151355 		ATH_HAL_SETUPXTXDESC(ah, ds,
882*1000Sxc151355 		    an->an_tx_rate1sp, 2,	/* series 1 */
883*1000Sxc151355 		    an->an_tx_rate2sp, 2,	/* series 2 */
884*1000Sxc151355 		    an->an_tx_rate3sp, 2);	/* series 3 */
885*1000Sxc151355 
886*1000Sxc151355 	ds->ds_link = 0;
887*1000Sxc151355 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
888*1000Sxc151355 	ATH_HAL_FILLTXDESC(ah, ds,
889*1000Sxc151355 	    mbslen,		/* segment length */
890*1000Sxc151355 	    AH_TRUE,		/* first segment */
891*1000Sxc151355 	    AH_TRUE,		/* last segment */
892*1000Sxc151355 	    ds);		/* first descriptor */
893*1000Sxc151355 
894*1000Sxc151355 	ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
895*1000Sxc151355 
896*1000Sxc151355 	mutex_enter(&txq->axq_lock);
897*1000Sxc151355 	list_insert_tail(&txq->axq_list, bf);
898*1000Sxc151355 	if (txq->axq_link == NULL) {
899*1000Sxc151355 		ATH_HAL_PUTTXBUF(ah, txq->axq_qnum, bf->bf_daddr);
900*1000Sxc151355 	} else {
901*1000Sxc151355 		*txq->axq_link = bf->bf_daddr;
902*1000Sxc151355 	}
903*1000Sxc151355 	txq->axq_link = &ds->ds_link;
904*1000Sxc151355 	mutex_exit(&txq->axq_lock);
905*1000Sxc151355 
906*1000Sxc151355 	ATH_HAL_TXSTART(ah, txq->axq_qnum);
907*1000Sxc151355 
908*1000Sxc151355 	return (0);
909*1000Sxc151355 }
910*1000Sxc151355 
911*1000Sxc151355 
912*1000Sxc151355 static int
913*1000Sxc151355 ath_gld_send(gld_mac_info_t *gld_p, mblk_t *mp)
914*1000Sxc151355 {
915*1000Sxc151355 	int err;
916*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
917*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
918*1000Sxc151355 	struct ieee80211_node *in;
919*1000Sxc151355 	mblk_t *mp_header;
920*1000Sxc151355 	struct ath_buf *bf = NULL;
921*1000Sxc151355 
922*1000Sxc151355 	/*
923*1000Sxc151355 	 * No data frames go out unless we're associated; this
924*1000Sxc151355 	 * should not happen as the 802.11 layer does not enable
925*1000Sxc151355 	 * the xmit queue until we enter the RUN state.
926*1000Sxc151355 	 */
927*1000Sxc151355 	if (isc->isc_state != IEEE80211_S_RUN) {
928*1000Sxc151355 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_gld_send(): "
929*1000Sxc151355 		    "discard, state %u\n", isc->isc_state));
930*1000Sxc151355 		asc->asc_stats.ast_tx_discard++;
931*1000Sxc151355 		return (GLD_NOLINK);
932*1000Sxc151355 	}
933*1000Sxc151355 
934*1000Sxc151355 	/*
935*1000Sxc151355 	 * Only supports STA mode
936*1000Sxc151355 	 */
937*1000Sxc151355 	if (isc->isc_opmode != IEEE80211_M_STA)
938*1000Sxc151355 		return (GLD_NOLINK);
939*1000Sxc151355 
940*1000Sxc151355 	/*
941*1000Sxc151355 	 * Locate AP information, so we can fill MAC address.
942*1000Sxc151355 	 */
943*1000Sxc151355 	in = isc->isc_bss;
944*1000Sxc151355 	in->in_inact = 0;
945*1000Sxc151355 
946*1000Sxc151355 	/*
947*1000Sxc151355 	 * Grab a TX buffer.
948*1000Sxc151355 	 */
949*1000Sxc151355 	mutex_enter(&asc->asc_txbuflock);
950*1000Sxc151355 	bf = list_head(&asc->asc_txbuf_list);
951*1000Sxc151355 
952*1000Sxc151355 	if (bf != NULL)
953*1000Sxc151355 		list_remove(&asc->asc_txbuf_list, bf);
954*1000Sxc151355 	mutex_exit(&asc->asc_txbuflock);
955*1000Sxc151355 
956*1000Sxc151355 	if (bf == NULL) {
957*1000Sxc151355 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_gld_send(): "
958*1000Sxc151355 		    "no TX DMA buffer available: 100 times\n"));
959*1000Sxc151355 		asc->asc_stats.ast_tx_nobuf++;
960*1000Sxc151355 
961*1000Sxc151355 		mutex_enter(&asc->asc_gld_sched_lock);
962*1000Sxc151355 		asc->asc_need_gld_sched = 1;
963*1000Sxc151355 		mutex_exit(&asc->asc_gld_sched_lock);
964*1000Sxc151355 		return (GLD_NORESOURCES);
965*1000Sxc151355 	}
966*1000Sxc151355 
967*1000Sxc151355 	mp_header = ieee80211_fill_header(isc, mp, isc->isc_wep_txkey, in);
968*1000Sxc151355 	if (mp_header == NULL) {
969*1000Sxc151355 		/* Push back the TX buf */
970*1000Sxc151355 		mutex_enter(&asc->asc_txbuflock);
971*1000Sxc151355 		list_insert_tail(&asc->asc_txbuf_list, bf);
972*1000Sxc151355 		mutex_exit(&asc->asc_txbuflock);
973*1000Sxc151355 		return (GLD_FAILURE);
974*1000Sxc151355 	}
975*1000Sxc151355 
976*1000Sxc151355 	err = ath_xmit(asc, in, bf, mp, mp_header);
977*1000Sxc151355 	freemsg(mp_header);
978*1000Sxc151355 
979*1000Sxc151355 	if (!err) {
980*1000Sxc151355 		freemsg(mp);
981*1000Sxc151355 		return (GLD_SUCCESS);
982*1000Sxc151355 	} else {
983*1000Sxc151355 		return (GLD_FAILURE);
984*1000Sxc151355 	}
985*1000Sxc151355 }
986*1000Sxc151355 
987*1000Sxc151355 static void
988*1000Sxc151355 ath_tx_processq(ath_t *asc, struct ath_txq *txq)
989*1000Sxc151355 {
990*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
991*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
992*1000Sxc151355 	struct ath_buf *bf;
993*1000Sxc151355 	struct ath_desc *ds;
994*1000Sxc151355 	struct ieee80211_node *in;
995*1000Sxc151355 	int32_t sr, lr;
996*1000Sxc151355 	HAL_STATUS status;
997*1000Sxc151355 	struct ath_node *an;
998*1000Sxc151355 
999*1000Sxc151355 	for (;;) {
1000*1000Sxc151355 		mutex_enter(&txq->axq_lock);
1001*1000Sxc151355 		bf = list_head(&txq->axq_list);
1002*1000Sxc151355 		if (bf == NULL) {
1003*1000Sxc151355 			txq->axq_link = NULL;
1004*1000Sxc151355 			mutex_exit(&txq->axq_lock);
1005*1000Sxc151355 			break;
1006*1000Sxc151355 		}
1007*1000Sxc151355 		ds = bf->bf_desc;	/* last decriptor */
1008*1000Sxc151355 		status = ATH_HAL_TXPROCDESC(ah, ds);
1009*1000Sxc151355 #ifdef DEBUG
1010*1000Sxc151355 		ath_printtxbuf(bf, status == HAL_OK);
1011*1000Sxc151355 #endif
1012*1000Sxc151355 		if (status == HAL_EINPROGRESS) {
1013*1000Sxc151355 			mutex_exit(&txq->axq_lock);
1014*1000Sxc151355 			break;
1015*1000Sxc151355 		}
1016*1000Sxc151355 		list_remove(&txq->axq_list, bf);
1017*1000Sxc151355 		mutex_exit(&txq->axq_lock);
1018*1000Sxc151355 		in = bf->bf_in;
1019*1000Sxc151355 		if (in != NULL) {
1020*1000Sxc151355 			an = ATH_NODE(in);
1021*1000Sxc151355 			/* Successful transmition */
1022*1000Sxc151355 			if (ds->ds_txstat.ts_status == 0) {
1023*1000Sxc151355 				an->an_tx_ok++;
1024*1000Sxc151355 				an->an_tx_antenna =
1025*1000Sxc151355 				    ds->ds_txstat.ts_antenna;
1026*1000Sxc151355 				if (ds->ds_txstat.ts_rate &
1027*1000Sxc151355 				    HAL_TXSTAT_ALTRATE)
1028*1000Sxc151355 					asc->asc_stats.ast_tx_altrate++;
1029*1000Sxc151355 				asc->asc_stats.ast_tx_rssidelta =
1030*1000Sxc151355 				    ds->ds_txstat.ts_rssi -
1031*1000Sxc151355 				    asc->asc_stats.ast_tx_rssi;
1032*1000Sxc151355 				asc->asc_stats.ast_tx_rssi =
1033*1000Sxc151355 				    ds->ds_txstat.ts_rssi;
1034*1000Sxc151355 			} else {
1035*1000Sxc151355 				an->an_tx_err++;
1036*1000Sxc151355 				if (ds->ds_txstat.ts_status &
1037*1000Sxc151355 				    HAL_TXERR_XRETRY)
1038*1000Sxc151355 					asc->asc_stats.
1039*1000Sxc151355 					    ast_tx_xretries++;
1040*1000Sxc151355 				if (ds->ds_txstat.ts_status &
1041*1000Sxc151355 				    HAL_TXERR_FIFO)
1042*1000Sxc151355 					asc->asc_stats.ast_tx_fifoerr++;
1043*1000Sxc151355 				if (ds->ds_txstat.ts_status &
1044*1000Sxc151355 				    HAL_TXERR_FILT)
1045*1000Sxc151355 					asc->asc_stats.
1046*1000Sxc151355 					    ast_tx_filtered++;
1047*1000Sxc151355 				an->an_tx_antenna = 0;	/* invalidate */
1048*1000Sxc151355 			}
1049*1000Sxc151355 			sr = ds->ds_txstat.ts_shortretry;
1050*1000Sxc151355 			lr = ds->ds_txstat.ts_longretry;
1051*1000Sxc151355 			asc->asc_stats.ast_tx_shortretry += sr;
1052*1000Sxc151355 			asc->asc_stats.ast_tx_longretry += lr;
1053*1000Sxc151355 			an->an_tx_retr += sr + lr;
1054*1000Sxc151355 		}
1055*1000Sxc151355 		bf->bf_in = NULL;
1056*1000Sxc151355 		mutex_enter(&asc->asc_txbuflock);
1057*1000Sxc151355 		list_insert_tail(&asc->asc_txbuf_list, bf);
1058*1000Sxc151355 		mutex_exit(&asc->asc_txbuflock);
1059*1000Sxc151355 		mutex_enter(&asc->asc_gld_sched_lock);
1060*1000Sxc151355 		/*
1061*1000Sxc151355 		 * Reschedule stalled outbound packets
1062*1000Sxc151355 		 */
1063*1000Sxc151355 		if (asc->asc_need_gld_sched) {
1064*1000Sxc151355 			asc->asc_need_gld_sched = 0;
1065*1000Sxc151355 			gld_sched(isc->isc_dev);
1066*1000Sxc151355 		}
1067*1000Sxc151355 		mutex_exit(&asc->asc_gld_sched_lock);
1068*1000Sxc151355 	}
1069*1000Sxc151355 }
1070*1000Sxc151355 
1071*1000Sxc151355 
1072*1000Sxc151355 static void
1073*1000Sxc151355 ath_tx_handler(ath_t *asc)
1074*1000Sxc151355 {
1075*1000Sxc151355 	int i;
1076*1000Sxc151355 
1077*1000Sxc151355 	/*
1078*1000Sxc151355 	 * Process each active queue.
1079*1000Sxc151355 	 */
1080*1000Sxc151355 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1081*1000Sxc151355 		if (ATH_TXQ_SETUP(asc, i)) {
1082*1000Sxc151355 			ath_tx_processq(asc, &asc->asc_txq[i]);
1083*1000Sxc151355 		}
1084*1000Sxc151355 	}
1085*1000Sxc151355 }
1086*1000Sxc151355 
1087*1000Sxc151355 static struct ieee80211_node *
1088*1000Sxc151355 ath_node_alloc(ieee80211com_t *isc)
1089*1000Sxc151355 {
1090*1000Sxc151355 	struct ath_node *an;
1091*1000Sxc151355 	ath_t *asc = (ath_t *)isc;
1092*1000Sxc151355 
1093*1000Sxc151355 	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1094*1000Sxc151355 	ath_rate_update(asc, &an->an_node, 0);
1095*1000Sxc151355 	return (&an->an_node);
1096*1000Sxc151355 }
1097*1000Sxc151355 
1098*1000Sxc151355 static void
1099*1000Sxc151355 ath_node_free(ieee80211com_t *isc, struct ieee80211_node *in)
1100*1000Sxc151355 {
1101*1000Sxc151355 	ath_t *asc = (ath_t *)isc;
1102*1000Sxc151355 	struct ath_buf *bf;
1103*1000Sxc151355 	struct ath_txq *txq;
1104*1000Sxc151355 	int32_t i;
1105*1000Sxc151355 
1106*1000Sxc151355 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1107*1000Sxc151355 		if (ATH_TXQ_SETUP(asc, i)) {
1108*1000Sxc151355 			txq = &asc->asc_txq[i];
1109*1000Sxc151355 			mutex_enter(&txq->axq_lock);
1110*1000Sxc151355 			bf = list_head(&txq->axq_list);
1111*1000Sxc151355 			while (bf != NULL) {
1112*1000Sxc151355 				if (bf->bf_in == in) {
1113*1000Sxc151355 					bf->bf_in = NULL;
1114*1000Sxc151355 				}
1115*1000Sxc151355 				bf = list_next(&txq->axq_list, bf);
1116*1000Sxc151355 			}
1117*1000Sxc151355 			mutex_exit(&txq->axq_lock);
1118*1000Sxc151355 		}
1119*1000Sxc151355 	}
1120*1000Sxc151355 	kmem_free(in, sizeof (struct ath_node));
1121*1000Sxc151355 }
1122*1000Sxc151355 
1123*1000Sxc151355 static void
1124*1000Sxc151355 ath_node_copy(struct ieee80211_node *dst, const struct ieee80211_node *src)
1125*1000Sxc151355 {
1126*1000Sxc151355 	bcopy(src, dst, sizeof (struct ieee80211_node));
1127*1000Sxc151355 }
1128*1000Sxc151355 
1129*1000Sxc151355 /*
1130*1000Sxc151355  * Transmit a management frame.  On failure we reclaim the skbuff.
1131*1000Sxc151355  * Note that management frames come directly from the 802.11 layer
1132*1000Sxc151355  * and do not honor the send queue flow control.  Need to investigate
1133*1000Sxc151355  * using priority queueing so management frames can bypass data.
1134*1000Sxc151355  */
1135*1000Sxc151355 static int32_t
1136*1000Sxc151355 ath_mgmt_send(ieee80211com_t *isc, mblk_t *mp)
1137*1000Sxc151355 {
1138*1000Sxc151355 	ath_t *asc = (ath_t *)isc;
1139*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1140*1000Sxc151355 	struct ieee80211_node *in;
1141*1000Sxc151355 	struct ath_buf *bf = NULL;
1142*1000Sxc151355 	struct ieee80211_frame *wh;
1143*1000Sxc151355 	int32_t error = 0;
1144*1000Sxc151355 
1145*1000Sxc151355 	/* Grab a TX buffer */
1146*1000Sxc151355 	mutex_enter(&asc->asc_txbuflock);
1147*1000Sxc151355 	bf = list_head(&asc->asc_txbuf_list);
1148*1000Sxc151355 	if (bf != NULL)
1149*1000Sxc151355 		list_remove(&asc->asc_txbuf_list, bf);
1150*1000Sxc151355 	if (list_empty(&asc->asc_txbuf_list)) {
1151*1000Sxc151355 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): "
1152*1000Sxc151355 		    "stop queue\n"));
1153*1000Sxc151355 		asc->asc_stats.ast_tx_qstop++;
1154*1000Sxc151355 	}
1155*1000Sxc151355 	mutex_exit(&asc->asc_txbuflock);
1156*1000Sxc151355 	if (bf == NULL) {
1157*1000Sxc151355 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): discard, "
1158*1000Sxc151355 		    "no xmit buf\n"));
1159*1000Sxc151355 		asc->asc_stats.ast_tx_nobufmgt++;
1160*1000Sxc151355 		goto bad;
1161*1000Sxc151355 	}
1162*1000Sxc151355 	wh = (struct ieee80211_frame *)mp->b_rptr;
1163*1000Sxc151355 	if ((wh->ifrm_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
1164*1000Sxc151355 	    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
1165*1000Sxc151355 		/* fill time stamp */
1166*1000Sxc151355 		uint64_t tsf;
1167*1000Sxc151355 		uint32_t *tstamp;
1168*1000Sxc151355 
1169*1000Sxc151355 		tsf = ATH_HAL_GETTSF64(ah);
1170*1000Sxc151355 		/* adjust 100us delay to xmit */
1171*1000Sxc151355 		tsf += 100;
1172*1000Sxc151355 		tstamp = (uint32_t *)&wh[1];
1173*1000Sxc151355 		tstamp[0] = LE_32(tsf & 0xffffffff);
1174*1000Sxc151355 		tstamp[1] = LE_32(tsf >> 32);
1175*1000Sxc151355 	}
1176*1000Sxc151355 	/*
1177*1000Sxc151355 	 * Locate node state.  When operating
1178*1000Sxc151355 	 * in station mode we always use ic_bss.
1179*1000Sxc151355 	 */
1180*1000Sxc151355 	if (isc->isc_opmode != IEEE80211_M_STA) {
1181*1000Sxc151355 		in = ieee80211_find_node(isc, wh->ifrm_addr1);
1182*1000Sxc151355 		if (in == NULL)
1183*1000Sxc151355 			in = isc->isc_bss;
1184*1000Sxc151355 	} else
1185*1000Sxc151355 		in = isc->isc_bss;
1186*1000Sxc151355 
1187*1000Sxc151355 	error = ath_xmit(asc, in, bf, NULL, mp);
1188*1000Sxc151355 	if (error == 0) {
1189*1000Sxc151355 		asc->asc_stats.ast_tx_mgmt++;
1190*1000Sxc151355 		freemsg(mp);
1191*1000Sxc151355 		return (0);
1192*1000Sxc151355 	}
1193*1000Sxc151355 bad:
1194*1000Sxc151355 	if (bf != NULL) {
1195*1000Sxc151355 		mutex_enter(&asc->asc_txbuflock);
1196*1000Sxc151355 		list_insert_tail(&asc->asc_txbuf_list, bf);
1197*1000Sxc151355 		mutex_exit(&asc->asc_txbuflock);
1198*1000Sxc151355 	}
1199*1000Sxc151355 	freemsg(mp);
1200*1000Sxc151355 	return (error);
1201*1000Sxc151355 }
1202*1000Sxc151355 
1203*1000Sxc151355 static int32_t
1204*1000Sxc151355 ath_new_state(ieee80211com_t *isc, enum ieee80211_state nstate)
1205*1000Sxc151355 {
1206*1000Sxc151355 	ath_t *asc = (ath_t *)isc;
1207*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1208*1000Sxc151355 	struct ieee80211_node *in;
1209*1000Sxc151355 	int32_t i, error;
1210*1000Sxc151355 	uint8_t *bssid;
1211*1000Sxc151355 	uint32_t rfilt;
1212*1000Sxc151355 	enum ieee80211_state ostate;
1213*1000Sxc151355 
1214*1000Sxc151355 	static const HAL_LED_STATE leds[] = {
1215*1000Sxc151355 	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
1216*1000Sxc151355 	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
1217*1000Sxc151355 	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
1218*1000Sxc151355 	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
1219*1000Sxc151355 	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
1220*1000Sxc151355 	};
1221*1000Sxc151355 	if (asc->asc_invalid == 1)
1222*1000Sxc151355 		return (0);
1223*1000Sxc151355 
1224*1000Sxc151355 	ostate = isc->isc_state;
1225*1000Sxc151355 
1226*1000Sxc151355 	ATH_HAL_SETLEDSTATE(ah, leds[nstate]);	/* set LED */
1227*1000Sxc151355 
1228*1000Sxc151355 	if (nstate == IEEE80211_S_INIT) {
1229*1000Sxc151355 		asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1230*1000Sxc151355 		ATH_HAL_INTRSET(ah, asc->asc_imask);
1231*1000Sxc151355 		error = 0;			/* cheat + use error return */
1232*1000Sxc151355 		goto bad;
1233*1000Sxc151355 	}
1234*1000Sxc151355 	in = isc->isc_bss;
1235*1000Sxc151355 	error = ath_chan_set(asc, in->in_chan);
1236*1000Sxc151355 	if (error != 0)
1237*1000Sxc151355 		goto bad;
1238*1000Sxc151355 
1239*1000Sxc151355 	rfilt = ath_calcrxfilter(asc);
1240*1000Sxc151355 	if (nstate == IEEE80211_S_SCAN)
1241*1000Sxc151355 		bssid = isc->isc_macaddr;
1242*1000Sxc151355 	else
1243*1000Sxc151355 		bssid = in->in_bssid;
1244*1000Sxc151355 	ATH_HAL_SETRXFILTER(ah, rfilt);
1245*1000Sxc151355 
1246*1000Sxc151355 	if (nstate == IEEE80211_S_RUN && isc->isc_opmode != IEEE80211_M_IBSS)
1247*1000Sxc151355 		ATH_HAL_SETASSOCID(ah, bssid, in->in_associd);
1248*1000Sxc151355 	else
1249*1000Sxc151355 		ATH_HAL_SETASSOCID(ah, bssid, 0);
1250*1000Sxc151355 	if (isc->isc_flags & IEEE80211_F_WEPON) {
1251*1000Sxc151355 		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1252*1000Sxc151355 			if (ATH_HAL_KEYISVALID(ah, i))
1253*1000Sxc151355 				ATH_HAL_KEYSETMAC(ah, i, bssid);
1254*1000Sxc151355 		}
1255*1000Sxc151355 	}
1256*1000Sxc151355 
1257*1000Sxc151355 	if ((nstate == IEEE80211_S_RUN) &&
1258*1000Sxc151355 	    (ostate != IEEE80211_S_RUN)) {
1259*1000Sxc151355 		/* Configure the beacon and sleep timers. */
1260*1000Sxc151355 		ath_beacon_config(asc);
1261*1000Sxc151355 	} else {
1262*1000Sxc151355 		asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1263*1000Sxc151355 		ATH_HAL_INTRSET(ah, asc->asc_imask);
1264*1000Sxc151355 	}
1265*1000Sxc151355 	/*
1266*1000Sxc151355 	 * Reset the rate control state.
1267*1000Sxc151355 	 */
1268*1000Sxc151355 	ath_rate_ctl_reset(asc, nstate);
1269*1000Sxc151355 
1270*1000Sxc151355 	if (nstate == IEEE80211_S_RUN) {
1271*1000Sxc151355 		nvlist_t *attr_list = NULL;
1272*1000Sxc151355 		sysevent_id_t eid;
1273*1000Sxc151355 		int32_t err = 0;
1274*1000Sxc151355 		char *str_name = "ATH";
1275*1000Sxc151355 		char str_value[256] = {0};
1276*1000Sxc151355 
1277*1000Sxc151355 		ATH_DEBUG((ATH_DBG_80211, "ath: ath new state(RUN): "
1278*1000Sxc151355 		    "ic_flags=0x%08x iv=%d"
1279*1000Sxc151355 		    " bssid=%s capinfo=0x%04x chan=%d\n",
1280*1000Sxc151355 		    isc->isc_flags,
1281*1000Sxc151355 		    in->in_intval,
1282*1000Sxc151355 		    ieee80211_ether_sprintf(in->in_bssid),
1283*1000Sxc151355 		    in->in_capinfo,
1284*1000Sxc151355 		    ieee80211_chan2ieee(isc, in->in_chan)));
1285*1000Sxc151355 
1286*1000Sxc151355 		(void) sprintf(str_value, "%s%s%d", "-i ",
1287*1000Sxc151355 		    ddi_driver_name(asc->asc_dev),
1288*1000Sxc151355 		    ddi_get_instance(asc->asc_dev));
1289*1000Sxc151355 		if (nvlist_alloc(&attr_list,
1290*1000Sxc151355 		    NV_UNIQUE_NAME_TYPE, KM_SLEEP) == 0) {
1291*1000Sxc151355 			err = nvlist_add_string(attr_list,
1292*1000Sxc151355 			    str_name, str_value);
1293*1000Sxc151355 			if (err != DDI_SUCCESS)
1294*1000Sxc151355 				ATH_DEBUG((ATH_DBG_80211, "ath: "
1295*1000Sxc151355 				    "ath_new_state: error log event\n"));
1296*1000Sxc151355 			err = ddi_log_sysevent(asc->asc_dev,
1297*1000Sxc151355 			    DDI_VENDOR_SUNW, "class",
1298*1000Sxc151355 			    "subclass", attr_list,
1299*1000Sxc151355 			    &eid, DDI_NOSLEEP);
1300*1000Sxc151355 			if (err != DDI_SUCCESS)
1301*1000Sxc151355 				ATH_DEBUG((ATH_DBG_80211, "ath: "
1302*1000Sxc151355 				    "ath_new_state(): error log event\n"));
1303*1000Sxc151355 			nvlist_free(attr_list);
1304*1000Sxc151355 		}
1305*1000Sxc151355 	}
1306*1000Sxc151355 
1307*1000Sxc151355 	return (0);
1308*1000Sxc151355 bad:
1309*1000Sxc151355 	return (error);
1310*1000Sxc151355 }
1311*1000Sxc151355 
1312*1000Sxc151355 /*
1313*1000Sxc151355  * Periodically recalibrate the PHY to account
1314*1000Sxc151355  * for temperature/environment changes.
1315*1000Sxc151355  */
1316*1000Sxc151355 static void
1317*1000Sxc151355 ath_calibrate(ieee80211com_t *isc)
1318*1000Sxc151355 {
1319*1000Sxc151355 	ath_t *asc = (ath_t *)isc;
1320*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1321*1000Sxc151355 	struct ieee80211channel *ch;
1322*1000Sxc151355 	HAL_CHANNEL hchan;
1323*1000Sxc151355 
1324*1000Sxc151355 	asc->asc_stats.ast_per_cal++;
1325*1000Sxc151355 
1326*1000Sxc151355 	/*
1327*1000Sxc151355 	 * Convert to a HAL channel description with the flags
1328*1000Sxc151355 	 * constrained to reflect the current operating mode.
1329*1000Sxc151355 	 */
1330*1000Sxc151355 	ch = isc->isc_ibss_chan;
1331*1000Sxc151355 	hchan.channel = ch->ich_freq;
1332*1000Sxc151355 	hchan.channelFlags = ath_chan2flags(isc, ch);
1333*1000Sxc151355 
1334*1000Sxc151355 	if (ATH_HAL_GETRFGAIN(ah) == HAL_RFGAIN_NEED_CHANGE) {
1335*1000Sxc151355 		/*
1336*1000Sxc151355 		 * Rfgain is out of bounds, reset the chip
1337*1000Sxc151355 		 * to load new gain values.
1338*1000Sxc151355 		 */
1339*1000Sxc151355 		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1340*1000Sxc151355 		    "Need change RFgain\n"));
1341*1000Sxc151355 		asc->asc_stats.ast_per_rfgain++;
1342*1000Sxc151355 		ath_reset(asc);
1343*1000Sxc151355 	}
1344*1000Sxc151355 	if (!ATH_HAL_CALIBRATE(ah, &hchan)) {
1345*1000Sxc151355 		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1346*1000Sxc151355 		    "calibration of channel %u failed\n",
1347*1000Sxc151355 		    ch->ich_freq));
1348*1000Sxc151355 		asc->asc_stats.ast_per_calfail++;
1349*1000Sxc151355 	}
1350*1000Sxc151355 }
1351*1000Sxc151355 
1352*1000Sxc151355 static uint_t
1353*1000Sxc151355 ath_gld_intr(gld_mac_info_t *gld_p)
1354*1000Sxc151355 {
1355*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1356*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1357*1000Sxc151355 	HAL_INT status;
1358*1000Sxc151355 	enum ieee80211_state isc_state;
1359*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
1360*1000Sxc151355 
1361*1000Sxc151355 	mutex_enter(&asc->asc_genlock);
1362*1000Sxc151355 
1363*1000Sxc151355 	if (!ATH_HAL_INTRPEND(ah)) {	/* shared irq, not for us */
1364*1000Sxc151355 		mutex_exit(&asc->asc_genlock);
1365*1000Sxc151355 		return (DDI_INTR_UNCLAIMED);
1366*1000Sxc151355 	}
1367*1000Sxc151355 
1368*1000Sxc151355 	ATH_HAL_GETISR(ah, &status);
1369*1000Sxc151355 	status &= asc->asc_imask;
1370*1000Sxc151355 	if (status & HAL_INT_FATAL) {
1371*1000Sxc151355 		asc->asc_stats.ast_hardware++;
1372*1000Sxc151355 		mutex_exit(&asc->asc_genlock);
1373*1000Sxc151355 		goto reset;
1374*1000Sxc151355 	} else if (status & HAL_INT_RXORN) {
1375*1000Sxc151355 		asc->asc_stats.ast_rxorn++;
1376*1000Sxc151355 		mutex_exit(&asc->asc_genlock);
1377*1000Sxc151355 		goto reset;
1378*1000Sxc151355 	} else {
1379*1000Sxc151355 		if (status & HAL_INT_RXEOL) {
1380*1000Sxc151355 			asc->asc_stats.ast_rxeol++;
1381*1000Sxc151355 			asc->asc_rxlink = NULL;
1382*1000Sxc151355 		}
1383*1000Sxc151355 		if (status & HAL_INT_TXURN) {
1384*1000Sxc151355 			asc->asc_stats.ast_txurn++;
1385*1000Sxc151355 			ATH_HAL_UPDATETXTRIGLEVEL(ah, AH_TRUE);
1386*1000Sxc151355 		}
1387*1000Sxc151355 		if (status & HAL_INT_RX) {
1388*1000Sxc151355 			asc->asc_rx_pend = 1;
1389*1000Sxc151355 			ddi_trigger_softintr(asc->asc_softint_id);
1390*1000Sxc151355 		}
1391*1000Sxc151355 		if (status & HAL_INT_TX) {
1392*1000Sxc151355 			ath_tx_handler(asc);
1393*1000Sxc151355 		}
1394*1000Sxc151355 
1395*1000Sxc151355 		mutex_exit(&asc->asc_genlock);
1396*1000Sxc151355 
1397*1000Sxc151355 		if (status & HAL_INT_SWBA) {
1398*1000Sxc151355 			/* This will occur only in Host-AP or Ad-Hoc mode */
1399*1000Sxc151355 			return (DDI_INTR_CLAIMED);
1400*1000Sxc151355 		}
1401*1000Sxc151355 		if (status & HAL_INT_BMISS) {
1402*1000Sxc151355 			mutex_enter(&isc->isc_genlock);
1403*1000Sxc151355 			isc_state = isc->isc_state;
1404*1000Sxc151355 			mutex_exit(&isc->isc_genlock);
1405*1000Sxc151355 			if (isc_state == IEEE80211_S_RUN) {
1406*1000Sxc151355 				(void) ieee80211_new_state(isc,
1407*1000Sxc151355 				    IEEE80211_S_ASSOC, -1);
1408*1000Sxc151355 			}
1409*1000Sxc151355 		}
1410*1000Sxc151355 	}
1411*1000Sxc151355 
1412*1000Sxc151355 	return (DDI_INTR_CLAIMED);
1413*1000Sxc151355 reset:
1414*1000Sxc151355 	mutex_enter(&isc->isc_genlock);
1415*1000Sxc151355 	ath_reset(asc);
1416*1000Sxc151355 	mutex_exit(&isc->isc_genlock);
1417*1000Sxc151355 	return (DDI_INTR_CLAIMED);
1418*1000Sxc151355 }
1419*1000Sxc151355 
1420*1000Sxc151355 static uint_t
1421*1000Sxc151355 ath_softint_handler(caddr_t data)
1422*1000Sxc151355 {
1423*1000Sxc151355 	ath_t *asc = (ath_t *)data;
1424*1000Sxc151355 
1425*1000Sxc151355 	/*
1426*1000Sxc151355 	 * Check if the soft interrupt is triggered by another
1427*1000Sxc151355 	 * driver at the same level.
1428*1000Sxc151355 	 */
1429*1000Sxc151355 	mutex_enter(&asc->asc_genlock);
1430*1000Sxc151355 	if (asc->asc_rx_pend) { /* Soft interrupt for this driver */
1431*1000Sxc151355 		asc->asc_rx_pend = 0;
1432*1000Sxc151355 		mutex_exit(&asc->asc_genlock);
1433*1000Sxc151355 		ath_rx_handler((ath_t *)data);
1434*1000Sxc151355 		return (DDI_INTR_CLAIMED);
1435*1000Sxc151355 	}
1436*1000Sxc151355 	mutex_exit(&asc->asc_genlock);
1437*1000Sxc151355 	return (DDI_INTR_UNCLAIMED);
1438*1000Sxc151355 }
1439*1000Sxc151355 
1440*1000Sxc151355 /*
1441*1000Sxc151355  * following are gld callback routine
1442*1000Sxc151355  * ath_gld_send, ath_gld_ioctl, ath_gld_gstat
1443*1000Sxc151355  * are listed in other corresponding sections.
1444*1000Sxc151355  * reset the hardware w/o losing operational state.  this is
1445*1000Sxc151355  * basically a more efficient way of doing ath_gld_stop, ath_gld_start,
1446*1000Sxc151355  * followed by state transitions to the current 802.11
1447*1000Sxc151355  * operational state.  used to recover from errors rx overrun
1448*1000Sxc151355  * and to reset the hardware when rf gain settings must be reset.
1449*1000Sxc151355  */
1450*1000Sxc151355 
1451*1000Sxc151355 static int
1452*1000Sxc151355 ath_gld_reset(gld_mac_info_t *gld_p)
1453*1000Sxc151355 {
1454*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1455*1000Sxc151355 
1456*1000Sxc151355 	ath_reset(asc);
1457*1000Sxc151355 	return (GLD_SUCCESS);
1458*1000Sxc151355 }
1459*1000Sxc151355 
1460*1000Sxc151355 
1461*1000Sxc151355 static int
1462*1000Sxc151355 ath_gld_stop(gld_mac_info_t *gld_p)
1463*1000Sxc151355 {
1464*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1465*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
1466*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1467*1000Sxc151355 
1468*1000Sxc151355 	(void) _ieee80211_new_state(isc, IEEE80211_S_INIT, -1);
1469*1000Sxc151355 	ATH_HAL_INTRSET(ah, 0);
1470*1000Sxc151355 	ath_draintxq(asc);
1471*1000Sxc151355 	if (! asc->asc_invalid)
1472*1000Sxc151355 		ath_stoprecv(asc);
1473*1000Sxc151355 	else
1474*1000Sxc151355 		asc->asc_rxlink = NULL;
1475*1000Sxc151355 	ATH_HAL_SETPOWER(ah, HAL_PM_FULL_SLEEP, 0);
1476*1000Sxc151355 
1477*1000Sxc151355 	asc->asc_invalid = 1;
1478*1000Sxc151355 
1479*1000Sxc151355 	return (GLD_SUCCESS);
1480*1000Sxc151355 }
1481*1000Sxc151355 
1482*1000Sxc151355 int
1483*1000Sxc151355 ath_gld_start(gld_mac_info_t *gld_p)
1484*1000Sxc151355 {
1485*1000Sxc151355 	int ret;
1486*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1487*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
1488*1000Sxc151355 	struct ieee80211_node *in;
1489*1000Sxc151355 	enum ieee80211_phymode mode;
1490*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1491*1000Sxc151355 	HAL_STATUS status;
1492*1000Sxc151355 	HAL_CHANNEL hchan;
1493*1000Sxc151355 
1494*1000Sxc151355 	/*
1495*1000Sxc151355 	 * Stop anything previously setup.  This is safe
1496*1000Sxc151355 	 * whether this is the first time through or not.
1497*1000Sxc151355 	 */
1498*1000Sxc151355 	ret = ath_gld_stop(gld_p);
1499*1000Sxc151355 	if (ret != GLD_SUCCESS)
1500*1000Sxc151355 		return (ret);
1501*1000Sxc151355 
1502*1000Sxc151355 	/*
1503*1000Sxc151355 	 * The basic interface to setting the hardware in a good
1504*1000Sxc151355 	 * state is ``reset''.  On return the hardware is known to
1505*1000Sxc151355 	 * be powered up and with interrupts disabled.  This must
1506*1000Sxc151355 	 * be followed by initialization of the appropriate bits
1507*1000Sxc151355 	 * and then setup of the interrupt mask.
1508*1000Sxc151355 	 */
1509*1000Sxc151355 	hchan.channel = isc->isc_ibss_chan->ich_freq;
1510*1000Sxc151355 	hchan.channelFlags = ath_chan2flags(isc, isc->isc_ibss_chan);
1511*1000Sxc151355 	if (!ATH_HAL_RESET(ah, (HAL_OPMODE)isc->isc_opmode,
1512*1000Sxc151355 	    &hchan, AH_FALSE, &status)) {
1513*1000Sxc151355 		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_gld_start(): "
1514*1000Sxc151355 		    "unable to reset hardware, hal status %u\n", status));
1515*1000Sxc151355 		return (GLD_FAILURE);
1516*1000Sxc151355 	}
1517*1000Sxc151355 	/*
1518*1000Sxc151355 	 * Setup the hardware after reset: the key cache
1519*1000Sxc151355 	 * is filled as needed and the receive engine is
1520*1000Sxc151355 	 * set going.  Frame transmit is handled entirely
1521*1000Sxc151355 	 * in the frame output path; there's nothing to do
1522*1000Sxc151355 	 * here except setup the interrupt mask.
1523*1000Sxc151355 	 */
1524*1000Sxc151355 	ath_initkeytable(asc);
1525*1000Sxc151355 
1526*1000Sxc151355 	if (ath_startrecv(asc))
1527*1000Sxc151355 		return (GLD_FAILURE);
1528*1000Sxc151355 
1529*1000Sxc151355 	/*
1530*1000Sxc151355 	 * Enable interrupts.
1531*1000Sxc151355 	 */
1532*1000Sxc151355 	asc->asc_imask = HAL_INT_RX | HAL_INT_TX
1533*1000Sxc151355 	    | HAL_INT_RXEOL | HAL_INT_RXORN
1534*1000Sxc151355 	    | HAL_INT_FATAL | HAL_INT_GLOBAL;
1535*1000Sxc151355 	ATH_HAL_INTRSET(ah, asc->asc_imask);
1536*1000Sxc151355 
1537*1000Sxc151355 	isc->isc_state = IEEE80211_S_INIT;
1538*1000Sxc151355 
1539*1000Sxc151355 	/*
1540*1000Sxc151355 	 * The hardware should be ready to go now so it's safe
1541*1000Sxc151355 	 * to kick the 802.11 state machine as it's likely to
1542*1000Sxc151355 	 * immediately call back to us to send mgmt frames.
1543*1000Sxc151355 	 */
1544*1000Sxc151355 	in = isc->isc_bss;
1545*1000Sxc151355 	in->in_chan = isc->isc_ibss_chan;
1546*1000Sxc151355 	mode = ieee80211_chan2mode(isc, in->in_chan);
1547*1000Sxc151355 	if (mode != asc->asc_curmode)
1548*1000Sxc151355 		ath_setcurmode(asc, mode);
1549*1000Sxc151355 	asc->asc_invalid = 0;
1550*1000Sxc151355 	return (GLD_SUCCESS);
1551*1000Sxc151355 }
1552*1000Sxc151355 
1553*1000Sxc151355 
1554*1000Sxc151355 static int32_t
1555*1000Sxc151355 ath_gld_saddr(gld_mac_info_t *gld_p, unsigned char *macaddr)
1556*1000Sxc151355 {
1557*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1558*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1559*1000Sxc151355 
1560*1000Sxc151355 	ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_saddr(): "
1561*1000Sxc151355 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1562*1000Sxc151355 	    macaddr[0], macaddr[1], macaddr[2],
1563*1000Sxc151355 	    macaddr[3], macaddr[4], macaddr[5]));
1564*1000Sxc151355 
1565*1000Sxc151355 	IEEE80211_ADDR_COPY(asc->asc_isc.isc_macaddr, macaddr);
1566*1000Sxc151355 	ATH_HAL_SETMAC(ah, asc->asc_isc.isc_macaddr);
1567*1000Sxc151355 
1568*1000Sxc151355 	ath_reset(asc);
1569*1000Sxc151355 	return (GLD_SUCCESS);
1570*1000Sxc151355 }
1571*1000Sxc151355 
1572*1000Sxc151355 static int
1573*1000Sxc151355 ath_gld_set_promiscuous(gld_mac_info_t *macinfo, int mode)
1574*1000Sxc151355 {
1575*1000Sxc151355 	ath_t *asc = ATH_STATE(macinfo);
1576*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1577*1000Sxc151355 	uint32_t rfilt;
1578*1000Sxc151355 
1579*1000Sxc151355 	rfilt = ATH_HAL_GETRXFILTER(ah);
1580*1000Sxc151355 	switch (mode) {
1581*1000Sxc151355 	case GLD_MAC_PROMISC_PHYS:
1582*1000Sxc151355 		ATH_HAL_SETRXFILTER(ah, rfilt | HAL_RX_FILTER_PROM);
1583*1000Sxc151355 		break;
1584*1000Sxc151355 	case GLD_MAC_PROMISC_MULTI:
1585*1000Sxc151355 		rfilt |= HAL_RX_FILTER_MCAST;
1586*1000Sxc151355 		rfilt &= ~HAL_RX_FILTER_PROM;
1587*1000Sxc151355 		ATH_HAL_SETRXFILTER(ah, rfilt);
1588*1000Sxc151355 		break;
1589*1000Sxc151355 	case GLD_MAC_PROMISC_NONE:
1590*1000Sxc151355 		ATH_HAL_SETRXFILTER(ah, rfilt & (~HAL_RX_FILTER_PROM));
1591*1000Sxc151355 		break;
1592*1000Sxc151355 	default:
1593*1000Sxc151355 		break;
1594*1000Sxc151355 	}
1595*1000Sxc151355 
1596*1000Sxc151355 	return (GLD_SUCCESS);
1597*1000Sxc151355 }
1598*1000Sxc151355 
1599*1000Sxc151355 static int
1600*1000Sxc151355 ath_gld_set_multicast(gld_mac_info_t *macinfo, uchar_t *mca, int flag)
1601*1000Sxc151355 {
1602*1000Sxc151355 	uint32_t mfilt[2], val, rfilt;
1603*1000Sxc151355 	uint8_t pos;
1604*1000Sxc151355 	ath_t *asc = ATH_STATE(macinfo);
1605*1000Sxc151355 	struct ath_hal *ah = asc->asc_ah;
1606*1000Sxc151355 
1607*1000Sxc151355 	rfilt = ATH_HAL_GETRXFILTER(ah);
1608*1000Sxc151355 
1609*1000Sxc151355 	/* disable multicast */
1610*1000Sxc151355 	if (flag == GLD_MULTI_DISABLE) {
1611*1000Sxc151355 		ATH_HAL_SETRXFILTER(ah, rfilt & (~HAL_RX_FILTER_MCAST));
1612*1000Sxc151355 		return (GLD_SUCCESS);
1613*1000Sxc151355 	}
1614*1000Sxc151355 
1615*1000Sxc151355 	/* enable multicast */
1616*1000Sxc151355 	ATH_HAL_SETRXFILTER(ah, rfilt | HAL_RX_FILTER_MCAST);
1617*1000Sxc151355 
1618*1000Sxc151355 	mfilt[0] = mfilt[1] = 0;
1619*1000Sxc151355 
1620*1000Sxc151355 	/* calculate XOR of eight 6bit values */
1621*1000Sxc151355 	val = ATH_LE_READ_4(mca + 0);
1622*1000Sxc151355 	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1623*1000Sxc151355 	val = ATH_LE_READ_4(mca + 3);
1624*1000Sxc151355 	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1625*1000Sxc151355 	pos &= 0x3f;
1626*1000Sxc151355 	mfilt[pos / 32] |= (1 << (pos % 32));
1627*1000Sxc151355 	ATH_HAL_SETMCASTFILTER(ah, mfilt[0], mfilt[1]);
1628*1000Sxc151355 
1629*1000Sxc151355 	return (GLD_SUCCESS);
1630*1000Sxc151355 }
1631*1000Sxc151355 
1632*1000Sxc151355 static void
1633*1000Sxc151355 ath_wlan_ioctl(ath_t *asc, queue_t *wq, mblk_t *mp, uint32_t cmd)
1634*1000Sxc151355 {
1635*1000Sxc151355 
1636*1000Sxc151355 	struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
1637*1000Sxc151355 	uint32_t len, ret;
1638*1000Sxc151355 	mblk_t *mp1;
1639*1000Sxc151355 
1640*1000Sxc151355 	/* sanity check */
1641*1000Sxc151355 	if (iocp->ioc_count == 0 || !(mp1 = mp->b_cont)) {
1642*1000Sxc151355 		miocnak(wq, mp, 0, EINVAL);
1643*1000Sxc151355 		return;
1644*1000Sxc151355 	}
1645*1000Sxc151355 
1646*1000Sxc151355 	/* assuming single data block */
1647*1000Sxc151355 	if (mp1->b_cont) {
1648*1000Sxc151355 		freemsg(mp1->b_cont);
1649*1000Sxc151355 		mp1->b_cont = NULL;
1650*1000Sxc151355 	}
1651*1000Sxc151355 
1652*1000Sxc151355 	/* we will overwrite everything */
1653*1000Sxc151355 	mp1->b_wptr = mp1->b_rptr;
1654*1000Sxc151355 
1655*1000Sxc151355 	ret = ath_getset(asc, mp1, cmd);
1656*1000Sxc151355 
1657*1000Sxc151355 	len = msgdsize(mp1);
1658*1000Sxc151355 
1659*1000Sxc151355 	miocack(wq, mp, len, ret);
1660*1000Sxc151355 }
1661*1000Sxc151355 
1662*1000Sxc151355 static int
1663*1000Sxc151355 ath_gld_ioctl(gld_mac_info_t *gld_p, queue_t *wq, mblk_t *mp)
1664*1000Sxc151355 {
1665*1000Sxc151355 	struct iocblk *iocp;
1666*1000Sxc151355 	int32_t cmd, err;
1667*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1668*1000Sxc151355 	boolean_t need_privilege;
1669*1000Sxc151355 
1670*1000Sxc151355 	/*
1671*1000Sxc151355 	 * Validate the command before bothering with the mutexen ...
1672*1000Sxc151355 	 */
1673*1000Sxc151355 	iocp = (struct iocblk *)mp->b_rptr;
1674*1000Sxc151355 	cmd = iocp->ioc_cmd;
1675*1000Sxc151355 	need_privilege = B_TRUE;
1676*1000Sxc151355 	switch (cmd) {
1677*1000Sxc151355 	case WLAN_SET_PARAM:
1678*1000Sxc151355 	case WLAN_COMMAND:
1679*1000Sxc151355 		break;
1680*1000Sxc151355 	case WLAN_GET_PARAM:
1681*1000Sxc151355 		need_privilege = B_FALSE;
1682*1000Sxc151355 		break;
1683*1000Sxc151355 	default:
1684*1000Sxc151355 		ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_ioctl(): "
1685*1000Sxc151355 		    "unknown cmd 0x%x", cmd));
1686*1000Sxc151355 		miocnak(wq, mp, 0, EINVAL);
1687*1000Sxc151355 		return (GLD_SUCCESS);
1688*1000Sxc151355 	}
1689*1000Sxc151355 
1690*1000Sxc151355 	if (need_privilege) {
1691*1000Sxc151355 		/*
1692*1000Sxc151355 		 * Check for specific net_config privilege on Solaris 10+.
1693*1000Sxc151355 		 * Otherwise just check for root access ...
1694*1000Sxc151355 		 */
1695*1000Sxc151355 		if (secpolicy_net_config != NULL)
1696*1000Sxc151355 			err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1697*1000Sxc151355 		else
1698*1000Sxc151355 			err = drv_priv(iocp->ioc_cr);
1699*1000Sxc151355 		if (err != 0) {
1700*1000Sxc151355 			miocnak(wq, mp, 0, err);
1701*1000Sxc151355 			return (GLD_SUCCESS);
1702*1000Sxc151355 		}
1703*1000Sxc151355 	}
1704*1000Sxc151355 
1705*1000Sxc151355 	ath_wlan_ioctl(asc, wq, mp, cmd);
1706*1000Sxc151355 	return (GLD_SUCCESS);
1707*1000Sxc151355 }
1708*1000Sxc151355 
1709*1000Sxc151355 static int
1710*1000Sxc151355 ath_gld_gstat(gld_mac_info_t *gld_p, struct gld_stats *glds_p)
1711*1000Sxc151355 {
1712*1000Sxc151355 	ath_t *asc = ATH_STATE(gld_p);
1713*1000Sxc151355 	ieee80211com_t *isc = (ieee80211com_t *)asc;
1714*1000Sxc151355 	struct ieee80211_node *in = isc->isc_bss;
1715*1000Sxc151355 	struct ath_node *an = ATH_NODE(in);
1716*1000Sxc151355 	struct ieee80211_rateset *rs = &in->in_rates;
1717*1000Sxc151355 
1718*1000Sxc151355 	glds_p->glds_crc	= asc->asc_stats.ast_rx_crcerr;
1719*1000Sxc151355 	glds_p->glds_multircv	= 0;
1720*1000Sxc151355 	glds_p->glds_multixmt	= 0;
1721*1000Sxc151355 	glds_p->glds_excoll	= 0;
1722*1000Sxc151355 	glds_p->glds_xmtretry	= an->an_tx_retr;
1723*1000Sxc151355 	glds_p->glds_defer	= 0;
1724*1000Sxc151355 	glds_p->glds_noxmtbuf	= asc->asc_stats.ast_tx_nobuf;
1725*1000Sxc151355 	glds_p->glds_norcvbuf	= asc->asc_stats.ast_rx_fifoerr;
1726*1000Sxc151355 	glds_p->glds_short	= asc->asc_stats.ast_rx_tooshort;
1727*1000Sxc151355 	glds_p->glds_missed	= asc->asc_stats.ast_rx_badcrypt;
1728*1000Sxc151355 	glds_p->glds_speed	= 1000000*(rs->ir_rates[in->in_txrate] &
1729*1000Sxc151355 	    IEEE80211_RATE_VAL) / 2;
1730*1000Sxc151355 	glds_p->glds_duplex	= GLD_DUPLEX_FULL;
1731*1000Sxc151355 
1732*1000Sxc151355 	return (GLD_SUCCESS);
1733*1000Sxc151355 }
1734*1000Sxc151355 
1735*1000Sxc151355 static int
1736*1000Sxc151355 ath_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1737*1000Sxc151355 {
1738*1000Sxc151355 	ath_t *asc;
1739*1000Sxc151355 	ieee80211com_t *isc;
1740*1000Sxc151355 	struct ath_hal *ah;
1741*1000Sxc151355 	uint8_t csz;
1742*1000Sxc151355 	HAL_STATUS status;
1743*1000Sxc151355 	caddr_t regs;
1744*1000Sxc151355 	uint32_t i, val;
1745*1000Sxc151355 	uint16_t vendor_id, device_id, command;
1746*1000Sxc151355 	const char *athname;
1747*1000Sxc151355 	int32_t ath_countrycode = CTRY_DEFAULT;	/* country code */
1748*1000Sxc151355 	int32_t err, ath_regdomain = 0; /* regulatory domain */
1749*1000Sxc151355 	char strbuf[32];
1750*1000Sxc151355 
1751*1000Sxc151355 	switch (cmd) {
1752*1000Sxc151355 	case DDI_RESUME:
1753*1000Sxc151355 		return (DDI_FAILURE);
1754*1000Sxc151355 	case DDI_ATTACH:
1755*1000Sxc151355 		break;
1756*1000Sxc151355 	default:
1757*1000Sxc151355 		return (DDI_FAILURE);
1758*1000Sxc151355 	}
1759*1000Sxc151355 
1760*1000Sxc151355 	if (ddi_soft_state_zalloc(ath_soft_state_p,
1761*1000Sxc151355 	    ddi_get_instance(devinfo)) != DDI_SUCCESS) {
1762*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1763*1000Sxc151355 		    "Unable to alloc softstate\n"));
1764*1000Sxc151355 		return (DDI_FAILURE);
1765*1000Sxc151355 	}
1766*1000Sxc151355 
1767*1000Sxc151355 	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1768*1000Sxc151355 	isc = (ieee80211com_t *)asc;
1769*1000Sxc151355 	asc->asc_dev = devinfo;
1770*1000Sxc151355 
1771*1000Sxc151355 	ath_halfix_init();
1772*1000Sxc151355 
1773*1000Sxc151355 	mutex_init(&asc->asc_genlock, NULL, MUTEX_DRIVER, NULL);
1774*1000Sxc151355 	mutex_init(&asc->asc_txbuflock, NULL, MUTEX_DRIVER, NULL);
1775*1000Sxc151355 	mutex_init(&asc->asc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
1776*1000Sxc151355 	mutex_init(&asc->asc_gld_sched_lock, NULL, MUTEX_DRIVER, NULL);
1777*1000Sxc151355 
1778*1000Sxc151355 	err = pci_config_setup(devinfo, &asc->asc_cfg_handle);
1779*1000Sxc151355 	if (err != DDI_SUCCESS) {
1780*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1781*1000Sxc151355 		    "pci_config_setup() failed"));
1782*1000Sxc151355 		goto attach_fail0;
1783*1000Sxc151355 	}
1784*1000Sxc151355 
1785*1000Sxc151355 	csz = pci_config_get8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ);
1786*1000Sxc151355 	asc->asc_cachelsz = csz << 2;
1787*1000Sxc151355 	vendor_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_VENID);
1788*1000Sxc151355 	device_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_DEVID);
1789*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): vendor 0x%x, "
1790*1000Sxc151355 	    "device id 0x%x, cache size %d\n", vendor_id, device_id, csz));
1791*1000Sxc151355 
1792*1000Sxc151355 	athname = ath_hal_probe(vendor_id, device_id);
1793*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): athname: %s\n",
1794*1000Sxc151355 	    athname ? athname : "Atheros ???"));
1795*1000Sxc151355 
1796*1000Sxc151355 	/*
1797*1000Sxc151355 	 * Enable response to memory space accesses,
1798*1000Sxc151355 	 * and enabe bus master.
1799*1000Sxc151355 	 */
1800*1000Sxc151355 	command = PCI_COMM_MAE | PCI_COMM_ME;
1801*1000Sxc151355 	pci_config_put16(asc->asc_cfg_handle, PCI_CONF_COMM, command);
1802*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1803*1000Sxc151355 	    "set command reg to 0x%x \n", command));
1804*1000Sxc151355 
1805*1000Sxc151355 	pci_config_put8(asc->asc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
1806*1000Sxc151355 	val = pci_config_get32(asc->asc_cfg_handle, 0x40);
1807*1000Sxc151355 	if ((val & 0x0000ff00) != 0)
1808*1000Sxc151355 		pci_config_put32(asc->asc_cfg_handle, 0x40, val & 0xffff00ff);
1809*1000Sxc151355 
1810*1000Sxc151355 	err = ddi_regs_map_setup(devinfo, 1,
1811*1000Sxc151355 	    &regs, 0, 0, &ath_reg_accattr, &asc->asc_io_handle);
1812*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1813*1000Sxc151355 	    "regs map1 = %x err=%d\n", regs, err));
1814*1000Sxc151355 	if (err != DDI_SUCCESS) {
1815*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1816*1000Sxc151355 		    "ddi_regs_map_setup() failed"));
1817*1000Sxc151355 		goto attach_fail1;
1818*1000Sxc151355 	}
1819*1000Sxc151355 
1820*1000Sxc151355 	ah = ath_hal_attach(device_id, asc, 0, regs, &status);
1821*1000Sxc151355 	if (ah == NULL) {
1822*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1823*1000Sxc151355 		    "unable to attach hw; HAL status %u\n", status));
1824*1000Sxc151355 		goto attach_fail2;
1825*1000Sxc151355 	}
1826*1000Sxc151355 	ATH_HAL_INTRSET(ah, 0);
1827*1000Sxc151355 	asc->asc_ah = ah;
1828*1000Sxc151355 
1829*1000Sxc151355 	if (ah->ah_abi != HAL_ABI_VERSION) {
1830*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1831*1000Sxc151355 		    "HAL ABI mismatch detected (0x%x != 0x%x)\n",
1832*1000Sxc151355 		    ah->ah_abi, HAL_ABI_VERSION));
1833*1000Sxc151355 		goto attach_fail3;
1834*1000Sxc151355 	}
1835*1000Sxc151355 
1836*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1837*1000Sxc151355 	    "HAL ABI version 0x%x\n", ah->ah_abi));
1838*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1839*1000Sxc151355 	    "HAL mac version %d.%d, phy version %d.%d\n",
1840*1000Sxc151355 	    ah->ah_macVersion, ah->ah_macRev,
1841*1000Sxc151355 	    ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
1842*1000Sxc151355 	if (ah->ah_analog5GhzRev)
1843*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1844*1000Sxc151355 		    "HAL 5ghz radio version %d.%d\n",
1845*1000Sxc151355 		    ah->ah_analog5GhzRev >> 4,
1846*1000Sxc151355 		    ah->ah_analog5GhzRev & 0xf));
1847*1000Sxc151355 	if (ah->ah_analog2GhzRev)
1848*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1849*1000Sxc151355 		    "HAL 2ghz radio version %d.%d\n",
1850*1000Sxc151355 		    ah->ah_analog2GhzRev >> 4,
1851*1000Sxc151355 		    ah->ah_analog2GhzRev & 0xf));
1852*1000Sxc151355 
1853*1000Sxc151355 	/*
1854*1000Sxc151355 	 * Check if the MAC has multi-rate retry support.
1855*1000Sxc151355 	 * We do this by trying to setup a fake extended
1856*1000Sxc151355 	 * descriptor.  MAC's that don't have support will
1857*1000Sxc151355 	 * return false w/o doing anything.  MAC's that do
1858*1000Sxc151355 	 * support it will return true w/o doing anything.
1859*1000Sxc151355 	 */
1860*1000Sxc151355 	asc->asc_mrretry = ATH_HAL_SETUPXTXDESC(ah, NULL, 0, 0, 0, 0, 0, 0);
1861*1000Sxc151355 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1862*1000Sxc151355 	    "multi rate retry support=%x\n",
1863*1000Sxc151355 	    asc->asc_mrretry));
1864*1000Sxc151355 
1865*1000Sxc151355 	ATH_HAL_GETREGDOMAIN(ah, (uint32_t *)&ath_regdomain);
1866*1000Sxc151355 	ATH_HAL_GETCOUNTRYCODE(ah, &ath_countrycode);
1867*1000Sxc151355 	/*
1868*1000Sxc151355 	 * Collect the channel list using the default country
1869*1000Sxc151355 	 * code and including outdoor channels.  The 802.11 layer
1870*1000Sxc151355 	 * is resposible for filtering this list to a set of
1871*1000Sxc151355 	 * channels that it considers ok to use.
1872*1000Sxc151355 	 */
1873*1000Sxc151355 	asc->asc_have11g = 0;
1874*1000Sxc151355 
1875*1000Sxc151355 	/* enable outdoor use, enable extended channels */
1876*1000Sxc151355 	err = ath_getchannels(asc, ath_countrycode, AH_FALSE, AH_TRUE);
1877*1000Sxc151355 	if (err != 0)
1878*1000Sxc151355 		goto attach_fail3;
1879*1000Sxc151355 
1880*1000Sxc151355 	/*
1881*1000Sxc151355 	 * Setup rate tables for all potential media types.
1882*1000Sxc151355 	 */
1883*1000Sxc151355 	ath_rate_setup(asc, IEEE80211_MODE_11A);
1884*1000Sxc151355 	ath_rate_setup(asc, IEEE80211_MODE_11B);
1885*1000Sxc151355 	ath_rate_setup(asc, IEEE80211_MODE_11G);
1886*1000Sxc151355 	ath_rate_setup(asc, IEEE80211_MODE_TURBO);
1887*1000Sxc151355 
1888*1000Sxc151355 	/* Setup here so ath_rate_update is happy */
1889*1000Sxc151355 	ath_setcurmode(asc, IEEE80211_MODE_11A);
1890*1000Sxc151355 
1891*1000Sxc151355 	err = ath_desc_alloc(devinfo, asc);
1892*1000Sxc151355 	if (err != DDI_SUCCESS) {
1893*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1894*1000Sxc151355 		    "failed to allocate descriptors: %d\n", err));
1895*1000Sxc151355 		goto attach_fail3;
1896*1000Sxc151355 	}
1897*1000Sxc151355 
1898*1000Sxc151355 	/* Setup transmit queues in the HAL */
1899*1000Sxc151355 	if (ath_txq_setup(asc))
1900*1000Sxc151355 		goto attach_fail4;
1901*1000Sxc151355 
1902*1000Sxc151355 	ATH_HAL_GETMAC(ah, asc->asc_isc.isc_macaddr);
1903*1000Sxc151355 
1904*1000Sxc151355 	/* setup gld */
1905*1000Sxc151355 	if ((isc->isc_dev = gld_mac_alloc(devinfo)) == NULL) {
1906*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1907*1000Sxc151355 		"gld_mac_alloc = %p\n", (void *)isc->isc_dev));
1908*1000Sxc151355 		goto attach_fail4;
1909*1000Sxc151355 	}
1910*1000Sxc151355 
1911*1000Sxc151355 	/* pre initialize some variables for isc */
1912*1000Sxc151355 	isc->isc_dev->gldm_private	= (caddr_t)asc;
1913*1000Sxc151355 
1914*1000Sxc151355 	isc->isc_gld_reset		= ath_gld_reset;
1915*1000Sxc151355 	isc->isc_gld_start		= ath_gld_start;
1916*1000Sxc151355 	isc->isc_gld_stop		= ath_gld_stop;
1917*1000Sxc151355 	isc->isc_gld_saddr		= ath_gld_saddr;
1918*1000Sxc151355 	isc->isc_gld_send		= ath_gld_send;
1919*1000Sxc151355 	isc->isc_gld_set_promiscuous	= ath_gld_set_promiscuous;
1920*1000Sxc151355 	isc->isc_gld_gstat		= ath_gld_gstat;
1921*1000Sxc151355 	isc->isc_gld_ioctl		= ath_gld_ioctl;
1922*1000Sxc151355 	isc->isc_gld_set_multicast	= ath_gld_set_multicast;
1923*1000Sxc151355 	isc->isc_gld_intr		= ath_gld_intr;
1924*1000Sxc151355 
1925*1000Sxc151355 	isc->isc_mgmt_send = ath_mgmt_send;
1926*1000Sxc151355 	isc->isc_new_state = ath_new_state;
1927*1000Sxc151355 	isc->isc_phytype = IEEE80211_T_OFDM;
1928*1000Sxc151355 	isc->isc_opmode = IEEE80211_M_STA;
1929*1000Sxc151355 	isc->isc_caps = IEEE80211_C_WEP | IEEE80211_C_IBSS |
1930*1000Sxc151355 	    IEEE80211_C_HOSTAP;
1931*1000Sxc151355 	/* 11g support is identified when we fetch the channel set */
1932*1000Sxc151355 	if (asc->asc_have11g)
1933*1000Sxc151355 		isc->isc_caps |= IEEE80211_C_SHPREAMBLE;
1934*1000Sxc151355 	isc->isc_node_alloc = ath_node_alloc;
1935*1000Sxc151355 	isc->isc_node_free = ath_node_free;
1936*1000Sxc151355 	isc->isc_node_copy = ath_node_copy;
1937*1000Sxc151355 	isc->isc_rate_ctl = ath_rate_ctl;
1938*1000Sxc151355 	isc->isc_calibrate = ath_calibrate;
1939*1000Sxc151355 	(void) ieee80211_ifattach(isc->isc_dev);
1940*1000Sxc151355 
1941*1000Sxc151355 	isc->isc_dev->gldm_devinfo		= devinfo;
1942*1000Sxc151355 	isc->isc_dev->gldm_vendor_addr		= asc->asc_isc.isc_macaddr;
1943*1000Sxc151355 	isc->isc_dev->gldm_broadcast_addr	= ath_broadcast_addr;
1944*1000Sxc151355 	isc->isc_dev->gldm_ident		= "Atheros driver";
1945*1000Sxc151355 	isc->isc_dev->gldm_type			= DL_ETHER;
1946*1000Sxc151355 	isc->isc_dev->gldm_minpkt		= 0;
1947*1000Sxc151355 	isc->isc_dev->gldm_maxpkt		= 1500;
1948*1000Sxc151355 	isc->isc_dev->gldm_addrlen		= ETHERADDRL;
1949*1000Sxc151355 	isc->isc_dev->gldm_saplen		= -2;
1950*1000Sxc151355 	isc->isc_dev->gldm_ppa			= ddi_get_instance(devinfo);
1951*1000Sxc151355 
1952*1000Sxc151355 	asc->asc_rx_pend = 0;
1953*1000Sxc151355 	ATH_HAL_INTRSET(ah, 0);
1954*1000Sxc151355 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
1955*1000Sxc151355 	    &asc->asc_softint_id, NULL, 0, ath_softint_handler, (caddr_t)asc);
1956*1000Sxc151355 	if (err != DDI_SUCCESS) {
1957*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1958*1000Sxc151355 		    "ddi_add_softintr() failed"));
1959*1000Sxc151355 		goto attach_fail5;
1960*1000Sxc151355 	}
1961*1000Sxc151355 
1962*1000Sxc151355 	if (ddi_get_iblock_cookie(devinfo, 0, &asc->asc_iblock)
1963*1000Sxc151355 	    != DDI_SUCCESS) {
1964*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1965*1000Sxc151355 		    "Can not get iblock cookie for INT\n"));
1966*1000Sxc151355 		goto attach_fail6;
1967*1000Sxc151355 	}
1968*1000Sxc151355 
1969*1000Sxc151355 	if (ddi_add_intr(devinfo, 0, NULL, NULL, gld_intr,
1970*1000Sxc151355 	    (caddr_t)asc->asc_isc.isc_dev) != DDI_SUCCESS) {
1971*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1972*1000Sxc151355 		    "Can not set intr for ATH driver\n"));
1973*1000Sxc151355 		goto attach_fail6;
1974*1000Sxc151355 	}
1975*1000Sxc151355 	isc->isc_dev->gldm_cookie = asc->asc_iblock;
1976*1000Sxc151355 
1977*1000Sxc151355 	if (err = gld_register(devinfo, "ath", isc->isc_dev)) {
1978*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1979*1000Sxc151355 		    "gld_register err %x\n", err));
1980*1000Sxc151355 		goto attach_fail7;
1981*1000Sxc151355 	}
1982*1000Sxc151355 
1983*1000Sxc151355 	/* Create minor node of type DDI_NT_NET_WIFI */
1984*1000Sxc151355 	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
1985*1000Sxc151355 	    ATH_NODENAME, isc->isc_dev->gldm_ppa);
1986*1000Sxc151355 	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
1987*1000Sxc151355 	    isc->isc_dev->gldm_ppa + 1, DDI_NT_NET_WIFI, 0);
1988*1000Sxc151355 	if (err != DDI_SUCCESS)
1989*1000Sxc151355 		ATH_DEBUG((ATH_DBG_ATTACH, "WARN: ath: ath_attach(): "
1990*1000Sxc151355 		    "Create minor node failed - %d\n", err));
1991*1000Sxc151355 
1992*1000Sxc151355 	asc->asc_invalid = 1;
1993*1000Sxc151355 	return (DDI_SUCCESS);
1994*1000Sxc151355 attach_fail7:
1995*1000Sxc151355 	ddi_remove_intr(devinfo, 0, asc->asc_iblock);
1996*1000Sxc151355 attach_fail6:
1997*1000Sxc151355 	ddi_remove_softintr(asc->asc_softint_id);
1998*1000Sxc151355 attach_fail5:
1999*1000Sxc151355 	gld_mac_free(isc->isc_dev);
2000*1000Sxc151355 attach_fail4:
2001*1000Sxc151355 	ath_desc_free(asc);
2002*1000Sxc151355 attach_fail3:
2003*1000Sxc151355 	ah->ah_detach(asc->asc_ah);
2004*1000Sxc151355 attach_fail2:
2005*1000Sxc151355 	ddi_regs_map_free(&asc->asc_io_handle);
2006*1000Sxc151355 attach_fail1:
2007*1000Sxc151355 	pci_config_teardown(&asc->asc_cfg_handle);
2008*1000Sxc151355 attach_fail0:
2009*1000Sxc151355 	asc->asc_invalid = 1;
2010*1000Sxc151355 	mutex_destroy(&asc->asc_txbuflock);
2011*1000Sxc151355 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2012*1000Sxc151355 		if (ATH_TXQ_SETUP(asc, i)) {
2013*1000Sxc151355 			struct ath_txq *txq = &asc->asc_txq[i];
2014*1000Sxc151355 			mutex_destroy(&txq->axq_lock);
2015*1000Sxc151355 		}
2016*1000Sxc151355 	}
2017*1000Sxc151355 	mutex_destroy(&asc->asc_rxbuflock);
2018*1000Sxc151355 	mutex_destroy(&asc->asc_genlock);
2019*1000Sxc151355 	mutex_destroy(&asc->asc_gld_sched_lock);
2020*1000Sxc151355 	ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo));
2021*1000Sxc151355 
2022*1000Sxc151355 	return (DDI_FAILURE);
2023*1000Sxc151355 }
2024*1000Sxc151355 
2025*1000Sxc151355 static int32_t
2026*1000Sxc151355 ath_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2027*1000Sxc151355 {
2028*1000Sxc151355 	ath_t *asc;
2029*1000Sxc151355 	int32_t i;
2030*1000Sxc151355 
2031*1000Sxc151355 	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2032*1000Sxc151355 	ASSERT(asc != NULL);
2033*1000Sxc151355 
2034*1000Sxc151355 	switch (cmd) {
2035*1000Sxc151355 	default:
2036*1000Sxc151355 		return (DDI_FAILURE);
2037*1000Sxc151355 
2038*1000Sxc151355 	case DDI_SUSPEND:
2039*1000Sxc151355 		return (DDI_FAILURE);
2040*1000Sxc151355 
2041*1000Sxc151355 	case DDI_DETACH:
2042*1000Sxc151355 		break;
2043*1000Sxc151355 	}
2044*1000Sxc151355 
2045*1000Sxc151355 	ASSERT(asc->asc_isc.isc_mf_thread == NULL);
2046*1000Sxc151355 
2047*1000Sxc151355 	/* disable interrupts */
2048*1000Sxc151355 	ATH_HAL_INTRSET(asc->asc_ah, 0);
2049*1000Sxc151355 
2050*1000Sxc151355 	/* free intterrupt resources */
2051*1000Sxc151355 	ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2052*1000Sxc151355 	ddi_remove_softintr(asc->asc_softint_id);
2053*1000Sxc151355 
2054*1000Sxc151355 	/* detach 802.11 and Atheros HAL */
2055*1000Sxc151355 	ieee80211_ifdetach(asc->asc_isc.isc_dev);
2056*1000Sxc151355 	ath_desc_free(asc);
2057*1000Sxc151355 	asc->asc_ah->ah_detach(asc->asc_ah);
2058*1000Sxc151355 	ath_halfix_finit();
2059*1000Sxc151355 
2060*1000Sxc151355 	/* detach gld */
2061*1000Sxc151355 	if (gld_unregister(asc->asc_isc.isc_dev) != 0)
2062*1000Sxc151355 		return (DDI_FAILURE);
2063*1000Sxc151355 	gld_mac_free(asc->asc_isc.isc_dev);
2064*1000Sxc151355 
2065*1000Sxc151355 	/* free io handle */
2066*1000Sxc151355 	ddi_regs_map_free(&asc->asc_io_handle);
2067*1000Sxc151355 	pci_config_teardown(&asc->asc_cfg_handle);
2068*1000Sxc151355 
2069*1000Sxc151355 	/* destroy locks */
2070*1000Sxc151355 	mutex_destroy(&asc->asc_txbuflock);
2071*1000Sxc151355 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2072*1000Sxc151355 		if (ATH_TXQ_SETUP(asc, i)) {
2073*1000Sxc151355 			struct ath_txq *txq = &asc->asc_txq[i];
2074*1000Sxc151355 			mutex_destroy(&txq->axq_lock);
2075*1000Sxc151355 		}
2076*1000Sxc151355 	}
2077*1000Sxc151355 	mutex_destroy(&asc->asc_rxbuflock);
2078*1000Sxc151355 	mutex_destroy(&asc->asc_genlock);
2079*1000Sxc151355 	mutex_destroy(&asc->asc_gld_sched_lock);
2080*1000Sxc151355 
2081*1000Sxc151355 	ddi_remove_minor_node(devinfo, NULL);
2082*1000Sxc151355 	ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo));
2083*1000Sxc151355 
2084*1000Sxc151355 	return (DDI_SUCCESS);
2085*1000Sxc151355 }
2086*1000Sxc151355 
2087*1000Sxc151355 static struct module_info ath_module_info = {
2088*1000Sxc151355 	0,	/* ATH_IDNUM, */
2089*1000Sxc151355 	"ath",	/* ATH_DRIVER_NAME, */
2090*1000Sxc151355 	0,
2091*1000Sxc151355 	INFPSZ,
2092*1000Sxc151355 	4096,	/* ATH_HIWAT, */
2093*1000Sxc151355 	128,	/* ATH_LOWAT */
2094*1000Sxc151355 };
2095*1000Sxc151355 
2096*1000Sxc151355 static struct qinit ath_r_qinit = {	/* read queues */
2097*1000Sxc151355 	NULL,
2098*1000Sxc151355 	gld_rsrv,
2099*1000Sxc151355 	gld_open,
2100*1000Sxc151355 	gld_close,
2101*1000Sxc151355 	NULL,
2102*1000Sxc151355 	&ath_module_info,
2103*1000Sxc151355 	NULL
2104*1000Sxc151355 };
2105*1000Sxc151355 
2106*1000Sxc151355 static struct qinit ath_w_qinit = {	/* write queues */
2107*1000Sxc151355 	gld_wput,
2108*1000Sxc151355 	gld_wsrv,
2109*1000Sxc151355 	NULL,
2110*1000Sxc151355 	NULL,
2111*1000Sxc151355 	NULL,
2112*1000Sxc151355 	&ath_module_info,
2113*1000Sxc151355 	NULL
2114*1000Sxc151355 };
2115*1000Sxc151355 
2116*1000Sxc151355 static struct streamtab ath_streamtab = {
2117*1000Sxc151355 	&ath_r_qinit,
2118*1000Sxc151355 	&ath_w_qinit,
2119*1000Sxc151355 	NULL,
2120*1000Sxc151355 	NULL
2121*1000Sxc151355 };
2122*1000Sxc151355 
2123*1000Sxc151355 static struct cb_ops ath_cb_ops = {
2124*1000Sxc151355 	nulldev,		/* cb_open */
2125*1000Sxc151355 	nulldev,		/* cb_close */
2126*1000Sxc151355 	nodev,			/* cb_strategy */
2127*1000Sxc151355 	nodev,			/* cb_print */
2128*1000Sxc151355 	nodev,			/* cb_dump */
2129*1000Sxc151355 	nodev,			/* cb_read */
2130*1000Sxc151355 	nodev,			/* cb_write */
2131*1000Sxc151355 	nodev,			/* cb_ioctl */
2132*1000Sxc151355 	nodev,			/* cb_devmap */
2133*1000Sxc151355 	nodev,			/* cb_mmap */
2134*1000Sxc151355 	nodev,			/* cb_segmap */
2135*1000Sxc151355 	nochpoll,		/* cb_chpoll */
2136*1000Sxc151355 	ddi_prop_op,		/* cb_prop_op */
2137*1000Sxc151355 	&ath_streamtab,		/* cb_stream */
2138*1000Sxc151355 	D_MP,			/* cb_flag */
2139*1000Sxc151355 	0,			/* cb_rev */
2140*1000Sxc151355 	nodev,			/* cb_aread */
2141*1000Sxc151355 	nodev			/* cb_awrite */
2142*1000Sxc151355 };
2143*1000Sxc151355 
2144*1000Sxc151355 static struct dev_ops ath_dev_ops = {
2145*1000Sxc151355 	DEVO_REV,		/* devo_rev */
2146*1000Sxc151355 	0,			/* devo_refcnt */
2147*1000Sxc151355 	gld_getinfo,		/* devo_getinfo */
2148*1000Sxc151355 	nulldev,		/* devo_identify */
2149*1000Sxc151355 	nulldev,		/* devo_probe */
2150*1000Sxc151355 	ath_attach,		/* devo_attach */
2151*1000Sxc151355 	ath_detach,		/* devo_detach */
2152*1000Sxc151355 	nodev,			/* devo_reset */
2153*1000Sxc151355 	&ath_cb_ops,		/* devo_cb_ops */
2154*1000Sxc151355 	(struct bus_ops *)NULL,	/* devo_bus_ops */
2155*1000Sxc151355 	NULL			/* devo_power */
2156*1000Sxc151355 };
2157*1000Sxc151355 
2158*1000Sxc151355 static struct modldrv ath_modldrv = {
2159*1000Sxc151355 	&mod_driverops,		/* Type of module.  This one is a driver */
2160*1000Sxc151355 	"ath driver 1.1",	/* short description */
2161*1000Sxc151355 	&ath_dev_ops		/* driver specific ops */
2162*1000Sxc151355 };
2163*1000Sxc151355 
2164*1000Sxc151355 static struct modlinkage modlinkage = {
2165*1000Sxc151355 	MODREV_1, (void *)&ath_modldrv, NULL
2166*1000Sxc151355 };
2167*1000Sxc151355 
2168*1000Sxc151355 
2169*1000Sxc151355 int
2170*1000Sxc151355 _info(struct modinfo *modinfop)
2171*1000Sxc151355 {
2172*1000Sxc151355 	return (mod_info(&modlinkage, modinfop));
2173*1000Sxc151355 }
2174*1000Sxc151355 
2175*1000Sxc151355 int
2176*1000Sxc151355 _init(void)
2177*1000Sxc151355 {
2178*1000Sxc151355 	int status;
2179*1000Sxc151355 
2180*1000Sxc151355 	status = ddi_soft_state_init(&ath_soft_state_p, sizeof (ath_t), 1);
2181*1000Sxc151355 	if (status != 0)
2182*1000Sxc151355 		return (status);
2183*1000Sxc151355 
2184*1000Sxc151355 	mutex_init(&ath_loglock, NULL, MUTEX_DRIVER, NULL);
2185*1000Sxc151355 	status = mod_install(&modlinkage);
2186*1000Sxc151355 	if (status != 0) {
2187*1000Sxc151355 		ddi_soft_state_fini(&ath_soft_state_p);
2188*1000Sxc151355 		mutex_destroy(&ath_loglock);
2189*1000Sxc151355 	}
2190*1000Sxc151355 
2191*1000Sxc151355 	return (status);
2192*1000Sxc151355 }
2193*1000Sxc151355 
2194*1000Sxc151355 int
2195*1000Sxc151355 _fini(void)
2196*1000Sxc151355 {
2197*1000Sxc151355 	int status;
2198*1000Sxc151355 
2199*1000Sxc151355 	status = mod_remove(&modlinkage);
2200*1000Sxc151355 	if (status == 0) {
2201*1000Sxc151355 		ddi_soft_state_fini(&ath_soft_state_p);
2202*1000Sxc151355 		mutex_destroy(&ath_loglock);
2203*1000Sxc151355 	}
2204*1000Sxc151355 	return (status);
2205*1000Sxc151355 }
2206