xref: /openbsd-src/sys/dev/ic/pgt.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: pgt.c,v 1.68 2011/12/01 23:34:08 miod Exp $  */
2 
3 /*
4  * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22  * Copyright (c) 2004 Brian Fundakowski Feldman
23  * All rights reserved.
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  * 1. Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  * 2. Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in the
32  *    documentation and/or other materials provided with the distribution.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44  * SUCH DAMAGE.
45  */
46 
47 #include <sys/cdefs.h>
48 #include "bpfilter.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/socket.h>
55 #include <sys/mbuf.h>
56 #include <sys/endian.h>
57 #include <sys/sockio.h>
58 #include <sys/kthread.h>
59 #include <sys/time.h>
60 #include <sys/ioctl.h>
61 #include <sys/device.h>
62 #include <sys/workq.h>
63 
64 #include <machine/bus.h>
65 #include <machine/endian.h>
66 #include <machine/intr.h>
67 
68 #include <net/if.h>
69 #include <net/if_arp.h>
70 #include <net/if_dl.h>
71 #include <net/if_llc.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78 
79 #ifdef INET
80 #include <netinet/in.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/in_var.h>
83 #include <netinet/if_ether.h>
84 #include <netinet/ip.h>
85 #endif
86 
87 #include <net80211/ieee80211_var.h>
88 #include <net80211/ieee80211_radiotap.h>
89 
90 #include <dev/ic/pgtreg.h>
91 #include <dev/ic/pgtvar.h>
92 
93 #include <dev/ic/if_wireg.h>
94 #include <dev/ic/if_wi_ieee.h>
95 #include <dev/ic/if_wivar.h>
96 
97 #ifdef PGT_DEBUG
98 #define DPRINTF(x)	do { printf x; } while (0)
99 #else
100 #define DPRINTF(x)
101 #endif
102 
103 #define	SETOID(oid, var, size) {					\
104 	if (pgt_oid_set(sc, oid, var, size) != 0)			\
105 		break;							\
106 }
107 
108 /*
109  * This is a driver for the Intersil Prism family of 802.11g network cards,
110  * based upon version 1.2 of the Linux driver and firmware found at
111  * http://www.prism54.org/.
112  */
113 
114 #define SCAN_TIMEOUT			5	/* 5 seconds */
115 
116 struct cfdriver pgt_cd = {
117         NULL, "pgt", DV_IFNET
118 };
119 
120 void	 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
121 int	 pgt_media_change(struct ifnet *ifp);
122 void	 pgt_write_memory_barrier(struct pgt_softc *);
123 uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
124 void	 pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
125 void	 pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
126 void	 pgt_debug_events(struct pgt_softc *, const char *);
127 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
128 void	 pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
129 int	 pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
130 	     struct pgt_desc *);
131 void	 pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
132 int	 pgt_load_firmware(struct pgt_softc *);
133 void	 pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
134 	     struct pgt_frag *);
135 int	 pgt_reset(struct pgt_softc *);
136 void	 pgt_stop(struct pgt_softc *, unsigned int);
137 void	 pgt_reboot(struct pgt_softc *);
138 void	 pgt_init_intr(struct pgt_softc *);
139 void	 pgt_update_intr(struct pgt_softc *, int);
140 struct mbuf
141 	*pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
142 	     struct mbuf *, struct ieee80211_node **);
143 void	 pgt_input_frames(struct pgt_softc *, struct mbuf *);
144 void	 pgt_wakeup_intr(struct pgt_softc *);
145 void	 pgt_sleep_intr(struct pgt_softc *);
146 void	 pgt_empty_traps(struct pgt_softc_kthread *);
147 void	 pgt_per_device_kthread(void *);
148 void	 pgt_async_reset(struct pgt_softc *);
149 void	 pgt_async_update(struct pgt_softc *);
150 void	 pgt_txdone(struct pgt_softc *, enum pgt_queue);
151 void	 pgt_rxdone(struct pgt_softc *, enum pgt_queue);
152 void	 pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
153 void	 pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
154 struct mbuf
155 	*pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
156 int	 pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
157 int	 pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
158 int	 pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
159 void	 pgt_state_dump(struct pgt_softc *);
160 int	 pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
161 void	 pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
162 	     struct pgt_desc *, uint16_t, int);
163 void	 pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
164 struct ieee80211_node
165 	*pgt_ieee80211_node_alloc(struct ieee80211com *);
166 void	 pgt_ieee80211_newassoc(struct ieee80211com *,
167 	     struct ieee80211_node *, int);
168 void	 pgt_ieee80211_node_free(struct ieee80211com *,
169 	    struct ieee80211_node *);
170 void	 pgt_ieee80211_node_copy(struct ieee80211com *,
171 	     struct ieee80211_node *,
172 	     const struct ieee80211_node *);
173 int	 pgt_ieee80211_send_mgmt(struct ieee80211com *,
174 	     struct ieee80211_node *, int, int, int);
175 int	 pgt_net_attach(struct pgt_softc *);
176 void	 pgt_start(struct ifnet *);
177 int	 pgt_ioctl(struct ifnet *, u_long, caddr_t);
178 void	 pgt_obj_bss2scanres(struct pgt_softc *,
179 	     struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
180 void	 node_mark_active_ap(void *, struct ieee80211_node *);
181 void	 node_mark_active_adhoc(void *, struct ieee80211_node *);
182 void	 pgt_watchdog(struct ifnet *);
183 int	 pgt_init(struct ifnet *);
184 void	 pgt_update_hw_from_sw(struct pgt_softc *, int, int);
185 void	 pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
186 	     struct pgt_obj_mlme *);
187 void	 pgt_update_sw_from_hw(struct pgt_softc *,
188 	     struct pgt_async_trap *, struct mbuf *);
189 int	 pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
190 int	 pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
191 int	 pgt_dma_alloc(struct pgt_softc *);
192 int	 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
193 void	 pgt_dma_free(struct pgt_softc *);
194 void	 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
195 void	 pgt_resume(void *, void *);
196 
197 void
198 pgt_write_memory_barrier(struct pgt_softc *sc)
199 {
200 	bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
201 	    BUS_SPACE_BARRIER_WRITE);
202 }
203 
204 u_int32_t
205 pgt_read_4(struct pgt_softc *sc, uint16_t offset)
206 {
207 	return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
208 }
209 
210 void
211 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
212 {
213 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
214 }
215 
216 /*
217  * Write out 4 bytes and cause a PCI flush by reading back in on a
218  * harmless register.
219  */
220 void
221 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
222 {
223 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
224 	(void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
225 }
226 
227 /*
228  * Print the state of events in the queues from an interrupt or a trigger.
229  */
230 void
231 pgt_debug_events(struct pgt_softc *sc, const char *when)
232 {
233 #define	COUNT(i)							\
234 	letoh32(sc->sc_cb->pcb_driver_curfrag[i]) -			\
235 	letoh32(sc->sc_cb->pcb_device_curfrag[i])
236 	if (sc->sc_debug & SC_DEBUG_EVENTS)
237 		DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
238 		    sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
239 		    COUNT(3), COUNT(4), COUNT(5)));
240 #undef COUNT
241 }
242 
243 uint32_t
244 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
245 {
246 	return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
247 	    letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
248 }
249 
250 void
251 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
252 {
253 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
254 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
255 	pd->pd_fragp->pf_flags = 0;
256 
257 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
258 	    BUS_DMASYNC_POSTWRITE);
259 }
260 
261 int
262 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
263     struct pgt_desc *pd)
264 {
265 	int error;
266 
267 	error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
268 	    PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
269 	if (error) {
270 		DPRINTF(("%s: unable to load %s tx DMA: %d\n",
271 		    sc->sc_dev.dv_xname,
272 		    pgt_queue_is_data(pq) ? "data" : "mgmt", error));
273 		return (error);
274 	}
275 	pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
276 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
277 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
278 	pd->pd_fragp->pf_flags = htole16(0);
279 
280 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
281 	    BUS_DMASYNC_POSTWRITE);
282 
283 	return (0);
284 }
285 
286 void
287 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
288 {
289         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
290 	pd->pd_dmaaddr = 0;
291 }
292 
293 int
294 pgt_load_firmware(struct pgt_softc *sc)
295 {
296 	int error, reg, dirreg, fwoff, ucodeoff, fwlen;
297 	uint8_t *ucode;
298 	uint32_t *uc;
299 	size_t size;
300 	char *name;
301 
302 	if (sc->sc_flags & SC_ISL3877)
303 		name = "pgt-isl3877";
304 	else
305 		name = "pgt-isl3890";	/* includes isl3880 */
306 
307 	error = loadfirmware(name, &ucode, &size);
308 
309 	if (error != 0) {
310 		DPRINTF(("%s: error %d, could not read firmware %s\n",
311 		    sc->sc_dev.dv_xname, error, name));
312 		return (EIO);
313 	}
314 
315 	if (size & 3) {
316 		DPRINTF(("%s: bad firmware size %u\n",
317 		    sc->sc_dev.dv_xname, size));
318 		free(ucode, M_DEVBUF);
319 		return (EINVAL);
320 	}
321 
322 	pgt_reboot(sc);
323 
324 	fwoff = 0;
325 	ucodeoff = 0;
326 	uc = (uint32_t *)ucode;
327 	reg = PGT_FIRMWARE_INTERNAL_OFFSET;
328 	while (fwoff < size) {
329 		pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
330 
331 		if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
332 			fwlen = PGT_DIRECT_MEMORY_SIZE;
333 		else
334 			fwlen = size - fwoff;
335 
336 		dirreg = PGT_DIRECT_MEMORY_OFFSET;
337 		while (fwlen > 4) {
338 			pgt_write_4(sc, dirreg, uc[ucodeoff]);
339 			fwoff += 4;
340 			dirreg += 4;
341 			reg += 4;
342 			fwlen -= 4;
343 			ucodeoff++;
344 		}
345 		pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
346 		fwoff += 4;
347 		dirreg += 4;
348 		reg += 4;
349 		fwlen -= 4;
350 		ucodeoff++;
351 	}
352 	DPRINTF(("%s: %d bytes microcode loaded from %s\n",
353 	    sc->sc_dev.dv_xname, fwoff, name));
354 
355 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
356 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
357 	reg |= PGT_CTRL_STAT_RAMBOOT;
358 	pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
359 	pgt_write_memory_barrier(sc);
360 	DELAY(PGT_WRITEIO_DELAY);
361 
362 	reg |= PGT_CTRL_STAT_RESET;
363 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
364 	pgt_write_memory_barrier(sc);
365 	DELAY(PGT_WRITEIO_DELAY);
366 
367 	reg &= ~PGT_CTRL_STAT_RESET;
368 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
369 	pgt_write_memory_barrier(sc);
370 	DELAY(PGT_WRITEIO_DELAY);
371 
372 	free(ucode, M_DEVBUF);
373 
374 	return (0);
375 }
376 
377 void
378 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
379     struct pgt_frag *pqfrags)
380 {
381 	struct pgt_desc *pd;
382 	unsigned int i;
383 
384 	sc->sc_cb->pcb_device_curfrag[pq] = 0;
385 	i = 0;
386 	/* XXX why only freeq ??? */
387 	TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
388 		pd->pd_fragnum = i;
389 		pd->pd_fragp = &pqfrags[i];
390 		if (pgt_queue_is_rx(pq))
391 			pgt_reinit_rx_desc_frag(sc, pd);
392 		i++;
393 	}
394 	sc->sc_freeq_count[pq] = i;
395 	/*
396 	 * The ring buffer describes how many free buffers are available from
397 	 * the host (for receive queues) or how many are pending (for
398 	 * transmit queues).
399 	 */
400 	if (pgt_queue_is_rx(pq))
401 		sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
402 	else
403 		sc->sc_cb->pcb_driver_curfrag[pq] = 0;
404 }
405 
406 /*
407  * Turn off interrupts, reset the device (possibly loading firmware),
408  * and put everything in a known state.
409  */
410 int
411 pgt_reset(struct pgt_softc *sc)
412 {
413 	int error;
414 
415 	/* disable all interrupts */
416 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
417 	DELAY(PGT_WRITEIO_DELAY);
418 
419 	/*
420 	 * Set up the management receive queue, assuming there are no
421 	 * requests in progress.
422 	 */
423 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
424 	    sc->sc_cbdmam->dm_mapsize,
425 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
426 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
427 	    &sc->sc_cb->pcb_data_low_rx[0]);
428 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
429 	    &sc->sc_cb->pcb_data_low_tx[0]);
430 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
431 	    &sc->sc_cb->pcb_data_high_rx[0]);
432 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
433 	    &sc->sc_cb->pcb_data_high_tx[0]);
434 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
435 	    &sc->sc_cb->pcb_mgmt_rx[0]);
436 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
437 	    &sc->sc_cb->pcb_mgmt_tx[0]);
438 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
439 	    sc->sc_cbdmam->dm_mapsize,
440 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
441 
442 	/* load firmware */
443 	if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
444 		error = pgt_load_firmware(sc);
445 		if (error) {
446 			printf("%s: firmware load failed\n",
447 			    sc->sc_dev.dv_xname);
448 			return (error);
449 		}
450 		sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
451 		DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
452 	}
453 
454 	/* upload the control block's DMA address */
455 	pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
456 	    htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
457 	DELAY(PGT_WRITEIO_DELAY);
458 
459 	/* send a reset event */
460 	pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
461 	DELAY(PGT_WRITEIO_DELAY);
462 
463 	/* await only the initialization interrupt */
464 	pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);
465 	DELAY(PGT_WRITEIO_DELAY);
466 
467 	return (0);
468 }
469 
470 /*
471  * If we're trying to reset and the device has seemingly not been detached,
472  * we'll spend a minute seeing if we can't do the reset.
473  */
474 void
475 pgt_stop(struct pgt_softc *sc, unsigned int flag)
476 {
477 	struct ieee80211com *ic;
478 	unsigned int wokeup;
479 	int tryagain = 0;
480 
481 	ic = &sc->sc_ic;
482 
483 	ic->ic_if.if_flags &= ~IFF_RUNNING;
484 	sc->sc_flags |= SC_UNINITIALIZED;
485 	sc->sc_flags |= flag;
486 
487 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
488 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
489 	pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
490 
491 trying_again:
492 	/* disable all interrupts */
493 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
494 	DELAY(PGT_WRITEIO_DELAY);
495 
496 	/* reboot card */
497 	pgt_reboot(sc);
498 
499 	do {
500 		wokeup = 0;
501 		/*
502 		 * We don't expect to be woken up, just to drop the lock
503 		 * and time out.  Only tx queues can have anything valid
504 		 * on them outside of an interrupt.
505 		 */
506 		while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
507 			struct pgt_mgmt_desc *pmd;
508 
509 			pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
510 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
511 			pmd->pmd_error = ENETRESET;
512 			wakeup_one(pmd);
513 			if (sc->sc_debug & SC_DEBUG_MGMT)
514 				DPRINTF(("%s: queue: mgmt %p <- %#x "
515 				    "(drained)\n", sc->sc_dev.dv_xname,
516 				    pmd, pmd->pmd_oid));
517 			wokeup++;
518 		}
519 		if (wokeup > 0) {
520 			if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
521 				sc->sc_flags &= ~flag;
522 				return;
523 			}
524 		}
525 	} while (wokeup > 0);
526 
527 	if (flag == SC_NEEDS_RESET) {
528 		int error;
529 
530 		DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
531 		sc->sc_flags &= ~SC_POWERSAVE;
532 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
533 		error = pgt_reset(sc);
534 		if (error == 0) {
535 			tsleep(&sc->sc_flags, 0, "pgtres", hz);
536 			if (sc->sc_flags & SC_UNINITIALIZED) {
537 				printf("%s: not responding\n",
538 				    sc->sc_dev.dv_xname);
539 				/* Thud.  It was probably removed. */
540 				if (tryagain)
541 					panic("pgt went for lunch"); /* XXX */
542 				tryagain = 1;
543 			} else {
544 				/* await all interrupts */
545 				pgt_write_4_flush(sc, PGT_REG_INT_EN,
546 				    PGT_INT_STAT_SOURCES);
547 				DELAY(PGT_WRITEIO_DELAY);
548 				ic->ic_if.if_flags |= IFF_RUNNING;
549 			}
550 		}
551 
552 		if (tryagain)
553 			goto trying_again;
554 
555 		sc->sc_flags &= ~flag;
556 		if (ic->ic_if.if_flags & IFF_RUNNING)
557 			pgt_update_hw_from_sw(sc,
558 			    ic->ic_state != IEEE80211_S_INIT,
559 			    ic->ic_opmode != IEEE80211_M_MONITOR);
560 	}
561 
562 	ic->ic_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
563 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
564 }
565 
566 void
567 pgt_attach(void *xsc)
568 {
569 	struct pgt_softc *sc = xsc;
570 	int error;
571 
572 	/* debug flags */
573 	//sc->sc_debug |= SC_DEBUG_QUEUES;	/* super verbose */
574 	//sc->sc_debug |= SC_DEBUG_MGMT;
575 	sc->sc_debug |= SC_DEBUG_UNEXPECTED;
576 	//sc->sc_debug |= SC_DEBUG_TRIGGER;	/* verbose */
577 	//sc->sc_debug |= SC_DEBUG_EVENTS;	/* super verbose */
578 	//sc->sc_debug |= SC_DEBUG_POWER;
579 	sc->sc_debug |= SC_DEBUG_TRAP;
580 	sc->sc_debug |= SC_DEBUG_LINK;
581 	//sc->sc_debug |= SC_DEBUG_RXANNEX;
582 	//sc->sc_debug |= SC_DEBUG_RXFRAG;
583 	//sc->sc_debug |= SC_DEBUG_RXETHER;
584 
585 	/* enable card if possible */
586 	if (sc->sc_enable != NULL)
587 		(*sc->sc_enable)(sc);
588 
589 	error = pgt_dma_alloc(sc);
590 	if (error)
591 		return;
592 
593 	sc->sc_ic.ic_if.if_softc = sc;
594 	TAILQ_INIT(&sc->sc_mgmtinprog);
595 	TAILQ_INIT(&sc->sc_kthread.sck_traps);
596 	sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
597 	sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
598 
599 	error = pgt_reset(sc);
600 	if (error)
601 		return;
602 
603 	tsleep(&sc->sc_flags, 0, "pgtres", hz);
604 	if (sc->sc_flags & SC_UNINITIALIZED) {
605 		printf("%s: not responding\n", sc->sc_dev.dv_xname);
606 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
607 		return;
608 	} else {
609 		/* await all interrupts */
610 		pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
611 		DELAY(PGT_WRITEIO_DELAY);
612 	}
613 
614 	error = pgt_net_attach(sc);
615 	if (error)
616 		return;
617 
618 	if (kthread_create(pgt_per_device_kthread, sc, NULL,
619 	    sc->sc_dev.dv_xname) != 0)
620 		return;
621 
622 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
623 }
624 
625 int
626 pgt_detach(struct pgt_softc *sc)
627 {
628 	if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
629 		/* device was not initialized correctly, so leave early */
630 		goto out;
631 
632 	/* stop card */
633 	pgt_stop(sc, SC_DYING);
634 	pgt_reboot(sc);
635 
636 	ieee80211_ifdetach(&sc->sc_ic.ic_if);
637 	if_detach(&sc->sc_ic.ic_if);
638 
639 out:
640 	/* disable card if possible */
641 	if (sc->sc_disable != NULL)
642 		(*sc->sc_disable)(sc);
643 
644 	pgt_dma_free(sc);
645 
646 	return (0);
647 }
648 
649 void
650 pgt_reboot(struct pgt_softc *sc)
651 {
652 	uint32_t reg;
653 
654 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
655 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
656 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
657 	pgt_write_memory_barrier(sc);
658 	DELAY(PGT_WRITEIO_DELAY);
659 
660 	reg |= PGT_CTRL_STAT_RESET;
661 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
662 	pgt_write_memory_barrier(sc);
663 	DELAY(PGT_WRITEIO_DELAY);
664 
665 	reg &= ~PGT_CTRL_STAT_RESET;
666 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
667 	pgt_write_memory_barrier(sc);
668 	DELAY(PGT_RESET_DELAY);
669 }
670 
671 void
672 pgt_init_intr(struct pgt_softc *sc)
673 {
674 	if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
675 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
676 			DPRINTF(("%s: spurious initialization\n",
677 			    sc->sc_dev.dv_xname));
678 	} else {
679 		sc->sc_flags &= ~SC_UNINITIALIZED;
680 		wakeup(&sc->sc_flags);
681 	}
682 }
683 
684 /*
685  * If called with a NULL last_nextpkt, only the mgmt queue will be checked
686  * for new packets.
687  */
688 void
689 pgt_update_intr(struct pgt_softc *sc, int hack)
690 {
691 	/* priority order */
692 	enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
693 	    PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
694 	    PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
695 	    PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
696 	};
697 	struct mbuf *m;
698 	uint32_t npend;
699 	unsigned int dirtycount;
700 	int i;
701 
702 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
703 	    sc->sc_cbdmam->dm_mapsize,
704 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
705 	pgt_debug_events(sc, "intr");
706 	/*
707 	 * Check for completion of tx in their dirty queues.
708 	 * Check completion of rx into their dirty queues.
709 	 */
710 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
711 		size_t qdirty, qfree;
712 
713 		qdirty = sc->sc_dirtyq_count[pqs[i]];
714 		qfree = sc->sc_freeq_count[pqs[i]];
715 		/*
716 		 * We want the wrap-around here.
717 		 */
718 		if (pgt_queue_is_rx(pqs[i])) {
719 			int data;
720 
721 			data = pgt_queue_is_data(pqs[i]);
722 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
723 			if (hack && data)
724 				continue;
725 #endif
726 			npend = pgt_queue_frags_pending(sc, pqs[i]);
727 			/*
728 			 * Receive queues clean up below, so qdirty must
729 			 * always be 0.
730 			 */
731 			if (npend > qfree) {
732 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
733 					DPRINTF(("%s: rx queue [%u] "
734 					    "overflowed by %u\n",
735 					    sc->sc_dev.dv_xname, pqs[i],
736 					    npend - qfree));
737 				sc->sc_flags |= SC_INTR_RESET;
738 				break;
739 			}
740 			while (qfree-- > npend)
741 				pgt_rxdone(sc, pqs[i]);
742 		} else {
743 			npend = pgt_queue_frags_pending(sc, pqs[i]);
744 			if (npend > qdirty) {
745 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
746 					DPRINTF(("%s: tx queue [%u] "
747 					    "underflowed by %u\n",
748 					    sc->sc_dev.dv_xname, pqs[i],
749 					    npend - qdirty));
750 				sc->sc_flags |= SC_INTR_RESET;
751 				break;
752 			}
753 			/*
754 			 * If the free queue was empty, or the data transmit
755 			 * queue just became empty, wake up any waiters.
756 			 */
757 			if (qdirty > npend) {
758 				if (pgt_queue_is_data(pqs[i])) {
759 					sc->sc_ic.ic_if.if_timer = 0;
760 					sc->sc_ic.ic_if.if_flags &=
761 					    ~IFF_OACTIVE;
762 				}
763 				while (qdirty-- > npend)
764 					pgt_txdone(sc, pqs[i]);
765 			}
766 		}
767 	}
768 
769 	/*
770 	 * This is the deferred completion for received management frames
771 	 * and where we queue network frames for stack input.
772 	 */
773 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
774 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
775 		struct pgt_mgmt_desc *pmd;
776 
777 		pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
778 		/*
779 		 * If there is no mgmt request in progress or the operation
780 		 * returned is explicitly a trap, this pmd will essentially
781 		 * be ignored.
782 		 */
783 		pgt_mgmtrx_completion(sc, pmd);
784 	}
785 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
786 	    htole32(dirtycount +
787 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
788 
789 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
790 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
791 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
792 			pgt_input_frames(sc, m);
793 	}
794 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
795 	    htole32(dirtycount +
796 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
797 
798 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
799 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
800 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
801 			pgt_input_frames(sc, m);
802 	}
803 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
804 	    htole32(dirtycount +
805 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
806 
807 	/*
808 	 * Write out what we've finished with.
809 	 */
810 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
811 	    sc->sc_cbdmam->dm_mapsize,
812 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
813 }
814 
815 struct mbuf *
816 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
817     struct mbuf *m, struct ieee80211_node **ni)
818 {
819 	struct ieee80211com *ic;
820 	struct ieee80211_frame *frame;
821 	struct llc *snap;
822 
823 	ic = &sc->sc_ic;
824 	if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
825 		*ni = ieee80211_ref_node(ic->ic_bss);
826 		(*ni)->ni_inact = 0;
827 		return (m);
828 	}
829 
830 	M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
831 	if (m == NULL)
832 		return (m);
833 	if (m->m_len < sizeof(*frame) + sizeof(*snap)) {
834 		m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
835 		if (m == NULL)
836 			return (m);
837 	}
838 	frame = mtod(m, struct ieee80211_frame *);
839 	snap = (struct llc *)&frame[1];
840 	if (ni != NULL) {
841 		if (ic->ic_opmode == IEEE80211_M_STA) {
842 			*ni = ieee80211_ref_node(ic->ic_bss);
843 		}
844 #ifndef IEEE80211_STA_ONLY
845 		else {
846 			*ni = ieee80211_find_node(ic, eh->ether_shost);
847 			/*
848 			 * Make up associations for ad-hoc mode.  To support
849 			 * ad-hoc WPA, we'll need to maintain a bounded
850 			 * pool of ad-hoc stations.
851 			 */
852 			if (*ni == NULL &&
853 			    ic->ic_opmode != IEEE80211_M_HOSTAP) {
854 				*ni = ieee80211_dup_bss(ic, eh->ether_shost);
855 				if (*ni != NULL) {
856 					(*ni)->ni_associd = 1;
857 					ic->ic_newassoc(ic, *ni, 1);
858 				}
859 			}
860 			if (*ni == NULL) {
861 				m_freem(m);
862 				return (NULL);
863 			}
864 		}
865 #endif
866 		(*ni)->ni_inact = 0;
867 	}
868 	snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
869 	snap->llc_control = LLC_UI;
870 	snap->llc_snap.org_code[0] = 0;
871 	snap->llc_snap.org_code[1] = 0;
872 	snap->llc_snap.org_code[2] = 0;
873 	snap->llc_snap.ether_type = eh->ether_type;
874 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
875 	/* Doesn't look like much of the 802.11 header is available. */
876 	*(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
877 	/*
878 	 * Translate the addresses; WDS is not handled.
879 	 */
880 	switch (ic->ic_opmode) {
881 	case IEEE80211_M_STA:
882 		frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
883 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
884 		IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
885 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
886 		break;
887 #ifndef IEEE80211_STA_ONLY
888 	case IEEE80211_M_IBSS:
889 	case IEEE80211_M_AHDEMO:
890 		frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
891 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
892 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
893 		IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
894 		break;
895 	case IEEE80211_M_HOSTAP:
896 		/* HostAP forwarding defaults to being done on firmware. */
897 		frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
898 		IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
899 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
900 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
901 		break;
902 #endif
903 	default:
904 		break;
905 	}
906 	return (m);
907 }
908 
909 void
910 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
911 {
912 	struct ether_header eh;
913 	struct ifnet *ifp;
914 	struct ieee80211_channel *chan;
915 	struct ieee80211_rxinfo rxi;
916 	struct ieee80211_node *ni;
917 	struct ieee80211com *ic;
918 	struct pgt_rx_annex *pra;
919 	struct pgt_rx_header *pha;
920 	struct mbuf *next;
921 	unsigned int n;
922 	uint32_t rstamp;
923 	uint8_t rssi;
924 
925 	ic = &sc->sc_ic;
926 	ifp = &ic->ic_if;
927 	for (next = m; m != NULL; m = next) {
928 		next = m->m_nextpkt;
929 		m->m_nextpkt = NULL;
930 
931 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
932 			if (m->m_len < sizeof(*pha)) {
933 				m = m_pullup(m, sizeof(*pha));
934 				if (m == NULL) {
935 					if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
936 						DPRINTF(("%s: m_pullup "
937 						    "failure\n",
938 						    sc->sc_dev.dv_xname));
939 					ifp->if_ierrors++;
940 					continue;
941 				}
942 			}
943 			pha = mtod(m, struct pgt_rx_header *);
944 			pra = NULL;
945 			goto input;
946 		}
947 
948 		if (m->m_len < sizeof(*pra)) {
949 			m = m_pullup(m, sizeof(*pra));
950 			if (m == NULL) {
951 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
952 					DPRINTF(("%s: m_pullup failure\n",
953 					    sc->sc_dev.dv_xname));
954 				ifp->if_ierrors++;
955 				continue;
956 			}
957 		}
958 		pra = mtod(m, struct pgt_rx_annex *);
959 		pha = &pra->pra_header;
960 		if (sc->sc_debug & SC_DEBUG_RXANNEX)
961 			DPRINTF(("%s: rx annex: ? %04x "
962 			    "len %u clock %u flags %02x ? %02x rate %u ? %02x "
963 			    "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
964 			    sc->sc_dev.dv_xname,
965 			    letoh16(pha->pra_unknown0),
966 			    letoh16(pha->pra_length),
967 			    letoh32(pha->pra_clock), pha->pra_flags,
968 			    pha->pra_unknown1, pha->pra_rate,
969 			    pha->pra_unknown2, letoh32(pha->pra_frequency),
970 			    pha->pra_unknown3, pha->pra_rssi,
971 			    pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
972 		if (sc->sc_debug & SC_DEBUG_RXETHER)
973 			DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
974 			    sc->sc_dev.dv_xname,
975 			    ether_sprintf(pra->pra_ether_dhost),
976 			    ether_sprintf(pra->pra_ether_shost),
977 			    ntohs(pra->pra_ether_type)));
978 
979 		memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
980 		memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
981 		eh.ether_type = pra->pra_ether_type;
982 
983 input:
984 		/*
985 		 * This flag is set if e.g. packet could not be decrypted.
986 		 */
987 		if (pha->pra_flags & PRA_FLAG_BAD) {
988 			ifp->if_ierrors++;
989 			m_freem(m);
990 			continue;
991 		}
992 
993 		/*
994 		 * After getting what we want, chop off the annex, then
995 		 * turn into something that looks like it really was
996 		 * 802.11.
997 		 */
998 		rssi = pha->pra_rssi;
999 		rstamp = letoh32(pha->pra_clock);
1000 		n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
1001 		if (n <= IEEE80211_CHAN_MAX)
1002 			chan = &ic->ic_channels[n];
1003 		else
1004 			chan = ic->ic_bss->ni_chan;
1005 		/* Send to 802.3 listeners. */
1006 		if (pra) {
1007 			m_adj(m, sizeof(*pra));
1008 		} else
1009 			m_adj(m, sizeof(*pha));
1010 
1011 		m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1012 		if (m != NULL) {
1013 #if NBPFILTER > 0
1014 			if (sc->sc_drvbpf != NULL) {
1015 				struct mbuf mb;
1016 				struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
1017 
1018 				tap->wr_flags = 0;
1019 				tap->wr_chan_freq = htole16(chan->ic_freq);
1020 				tap->wr_chan_flags = htole16(chan->ic_flags);
1021 				tap->wr_rssi = rssi;
1022 				tap->wr_max_rssi = ic->ic_max_rssi;
1023 
1024 				mb.m_data = (caddr_t)tap;
1025 				mb.m_len = sc->sc_rxtap_len;
1026 				mb.m_next = m;
1027 				mb.m_nextpkt = NULL;
1028 				mb.m_type = 0;
1029 				mb.m_flags = 0;
1030 				bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
1031 			}
1032 #endif
1033 			rxi.rxi_flags = 0;
1034 			ni->ni_rssi = rxi.rxi_rssi = rssi;
1035 			ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1036 			ieee80211_input(ifp, m, ni, &rxi);
1037 			/*
1038 			 * The frame may have caused the node to be marked for
1039 			 * reclamation (e.g. in response to a DEAUTH message)
1040 			 * so use free_node here instead of unref_node.
1041 			 */
1042 			if (ni == ic->ic_bss)
1043 				ieee80211_unref_node(&ni);
1044 			else
1045 				ieee80211_release_node(&sc->sc_ic, ni);
1046 		} else {
1047 			ifp->if_ierrors++;
1048 		}
1049 	}
1050 }
1051 
1052 void
1053 pgt_wakeup_intr(struct pgt_softc *sc)
1054 {
1055 	int shouldupdate;
1056 	int i;
1057 
1058 	shouldupdate = 0;
1059 	/* Check for any queues being empty before updating. */
1060 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1061 	    sc->sc_cbdmam->dm_mapsize,
1062 	    BUS_DMASYNC_POSTREAD);
1063 	for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
1064 		if (pgt_queue_is_tx(i))
1065 			shouldupdate = pgt_queue_frags_pending(sc, i);
1066 		else
1067 			shouldupdate = pgt_queue_frags_pending(sc, i) <
1068 			    sc->sc_freeq_count[i];
1069 	}
1070 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1071 		shouldupdate = 1;
1072 	if (sc->sc_debug & SC_DEBUG_POWER)
1073 		DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1074 		    sc->sc_dev.dv_xname, shouldupdate));
1075 	sc->sc_flags &= ~SC_POWERSAVE;
1076 	if (shouldupdate) {
1077 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1078 		DELAY(PGT_WRITEIO_DELAY);
1079 	}
1080 }
1081 
1082 void
1083 pgt_sleep_intr(struct pgt_softc *sc)
1084 {
1085 	int allowed;
1086 	int i;
1087 
1088 	allowed = 1;
1089 	/* Check for any queues not being empty before allowing. */
1090 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1091 	    sc->sc_cbdmam->dm_mapsize,
1092 	    BUS_DMASYNC_POSTREAD);
1093 	for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
1094 		if (pgt_queue_is_tx(i))
1095 			allowed = pgt_queue_frags_pending(sc, i) == 0;
1096 		else
1097 			allowed = pgt_queue_frags_pending(sc, i) >=
1098 			    sc->sc_freeq_count[i];
1099 	}
1100 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1101 		allowed = 0;
1102 	if (sc->sc_debug & SC_DEBUG_POWER)
1103 		DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1104 		    sc->sc_dev.dv_xname, allowed));
1105 	if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
1106 		sc->sc_flags |= SC_POWERSAVE;
1107 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
1108 		DELAY(PGT_WRITEIO_DELAY);
1109 	}
1110 }
1111 
1112 void
1113 pgt_empty_traps(struct pgt_softc_kthread *sck)
1114 {
1115 	struct pgt_async_trap *pa;
1116 	struct mbuf *m;
1117 
1118 	while (!TAILQ_EMPTY(&sck->sck_traps)) {
1119 		pa = TAILQ_FIRST(&sck->sck_traps);
1120 		TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1121 		m = pa->pa_mbuf;
1122 		m_freem(m);
1123 	}
1124 }
1125 
1126 void
1127 pgt_per_device_kthread(void *argp)
1128 {
1129 	struct pgt_softc *sc;
1130 	struct pgt_softc_kthread *sck;
1131 	struct pgt_async_trap *pa;
1132 	struct mbuf *m;
1133 	int s;
1134 
1135 	sc = argp;
1136 	sck = &sc->sc_kthread;
1137 	while (!sck->sck_exit) {
1138 		if (!sck->sck_update && !sck->sck_reset &&
1139 		    TAILQ_EMPTY(&sck->sck_traps))
1140 			tsleep(&sc->sc_kthread, 0, "pgtkth", 0);
1141 		if (sck->sck_reset) {
1142 			DPRINTF(("%s: [thread] async reset\n",
1143 			    sc->sc_dev.dv_xname));
1144 			sck->sck_reset = 0;
1145 			sck->sck_update = 0;
1146 			pgt_empty_traps(sck);
1147 			s = splnet();
1148 			pgt_stop(sc, SC_NEEDS_RESET);
1149 			splx(s);
1150 		} else if (!TAILQ_EMPTY(&sck->sck_traps)) {
1151 			DPRINTF(("%s: [thread] got a trap\n",
1152 			    sc->sc_dev.dv_xname));
1153 			pa = TAILQ_FIRST(&sck->sck_traps);
1154 			TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1155 			m = pa->pa_mbuf;
1156 			m_adj(m, sizeof(*pa));
1157 			pgt_update_sw_from_hw(sc, pa, m);
1158 			m_freem(m);
1159 		} else if (sck->sck_update) {
1160 			sck->sck_update = 0;
1161 			pgt_update_sw_from_hw(sc, NULL, NULL);
1162 		}
1163 	}
1164 	pgt_empty_traps(sck);
1165 	kthread_exit(0);
1166 }
1167 
1168 void
1169 pgt_async_reset(struct pgt_softc *sc)
1170 {
1171 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1172 		return;
1173 	sc->sc_kthread.sck_reset = 1;
1174 	wakeup(&sc->sc_kthread);
1175 }
1176 
1177 void
1178 pgt_async_update(struct pgt_softc *sc)
1179 {
1180 	if (sc->sc_flags & SC_DYING)
1181 		return;
1182 	sc->sc_kthread.sck_update = 1;
1183 	wakeup(&sc->sc_kthread);
1184 }
1185 
1186 int
1187 pgt_intr(void *arg)
1188 {
1189 	struct pgt_softc *sc;
1190 	struct ifnet *ifp;
1191 	u_int32_t reg;
1192 
1193 	sc = arg;
1194 	ifp = &sc->sc_ic.ic_if;
1195 
1196 	/*
1197 	 * Here the Linux driver ands in the value of the INT_EN register,
1198 	 * and masks off everything but the documented interrupt bits.  Why?
1199 	 *
1200 	 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1201 	 * other times.
1202 	 */
1203 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
1204 	    sc->sc_flags & SC_POWERSAVE) {
1205 		/*
1206 		 * Don't try handling the interrupt in sleep mode.
1207 		 */
1208 		reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1209 		if (reg & PGT_CTRL_STAT_SLEEPMODE)
1210 			return (0);
1211 	}
1212 	reg = pgt_read_4(sc, PGT_REG_INT_STAT);
1213 	if (reg == 0)
1214 		return (0); /* This interrupt is not from us */
1215 
1216 	pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
1217 	if (reg & PGT_INT_STAT_INIT)
1218 		pgt_init_intr(sc);
1219 	if (reg & PGT_INT_STAT_UPDATE) {
1220 		pgt_update_intr(sc, 0);
1221 		/*
1222 		 * If we got an update, it's not really asleep.
1223 		 */
1224 		sc->sc_flags &= ~SC_POWERSAVE;
1225 		/*
1226 		 * Pretend I have any idea what the documentation
1227 		 * would say, and just give it a shot sending an
1228 		 * "update" after acknowledging the interrupt
1229 		 * bits and writing out the new control block.
1230 		 */
1231 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1232 		DELAY(PGT_WRITEIO_DELAY);
1233 	}
1234 	if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
1235 		pgt_sleep_intr(sc);
1236 	if (reg & PGT_INT_STAT_WAKEUP)
1237 		pgt_wakeup_intr(sc);
1238 
1239 	if (sc->sc_flags & SC_INTR_RESET) {
1240 		sc->sc_flags &= ~SC_INTR_RESET;
1241 		pgt_async_reset(sc);
1242 	}
1243 
1244 	if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
1245 		DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1246 		    sc->sc_dev.dv_xname,
1247 		    reg & ~PGT_INT_STAT_SOURCES,
1248 		    pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1249 	}
1250 
1251 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1252 		pgt_start(ifp);
1253 
1254 	return (1);
1255 }
1256 
1257 void
1258 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1259 {
1260 	struct pgt_desc *pd;
1261 
1262 	pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
1263 	TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1264 	sc->sc_dirtyq_count[pq]--;
1265 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1266 	sc->sc_freeq_count[pq]++;
1267 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1268 	    pd->pd_dmam->dm_mapsize,
1269 	    BUS_DMASYNC_POSTREAD);
1270 	/* Management frames want completion information. */
1271 	if (sc->sc_debug & SC_DEBUG_QUEUES) {
1272 		DPRINTF(("%s: queue: tx %u <- [%u]\n",
1273 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1274 		if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
1275 			struct pgt_mgmt_frame *pmf;
1276 
1277 			pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1278 			DPRINTF(("%s: queue: txmgmt %p <- "
1279 			    "(ver %u, op %u, flags %#x)\n",
1280 			    sc->sc_dev.dv_xname,
1281 			    pd, pmf->pmf_version, pmf->pmf_operation,
1282 			    pmf->pmf_flags));
1283 		}
1284 	}
1285 	pgt_unload_tx_desc_frag(sc, pd);
1286 }
1287 
1288 void
1289 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1290 {
1291 	struct pgt_desc *pd;
1292 
1293 	pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
1294 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1295 	sc->sc_freeq_count[pq]--;
1296 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1297 	sc->sc_dirtyq_count[pq]++;
1298 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1299 	    pd->pd_dmam->dm_mapsize,
1300 	    BUS_DMASYNC_POSTREAD);
1301 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1302 		DPRINTF(("%s: queue: rx %u <- [%u]\n",
1303 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1304 	if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
1305 	    pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
1306 		DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1307 		    sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1308 }
1309 
1310 /*
1311  * Traps are generally used for the firmware to report changes in state
1312  * back to the host.  Mostly this processes changes in link state, but
1313  * it needs to also be used to initiate WPA and other authentication
1314  * schemes in terms of client (station) or server (access point).
1315  */
1316 void
1317 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1318     size_t size)
1319 {
1320 	struct pgt_async_trap *pa;
1321 	struct mbuf *m;
1322 	char *p;
1323 	size_t total;
1324 
1325 	if (sc->sc_flags & SC_DYING)
1326 		return;
1327 
1328 	total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1329 	if (total > MLEN) {
1330 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1331 		if (m == NULL)
1332 			return;
1333 		MCLGET(m, M_DONTWAIT);
1334 		if (!(m->m_flags & M_EXT)) {
1335 			m_freem(m);
1336 			m = NULL;
1337 		}
1338 	} else
1339 		m = m_get(M_DONTWAIT, MT_DATA);
1340 
1341 	if (m == NULL)
1342 		return;
1343 	else
1344 		m->m_len = total;
1345 
1346 	pa = mtod(m, struct pgt_async_trap *);
1347 	p = mtod(m, char *) + sizeof(*pa);
1348 	*(uint32_t *)p = oid;
1349 	p += sizeof(uint32_t);
1350 	memcpy(p, trapdata, size);
1351 	pa->pa_mbuf = m;
1352 
1353 	TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
1354 	wakeup(&sc->sc_kthread);
1355 }
1356 
1357 /*
1358  * Process a completed management response (all requests should be
1359  * responded to, quickly) or an event (trap).
1360  */
1361 void
1362 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1363 {
1364 	struct pgt_desc *pd;
1365 	struct pgt_mgmt_frame *pmf;
1366 	uint32_t oid, size;
1367 
1368 	pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
1369 	TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
1370 	sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1371 	TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
1372 	    pd, pd_link);
1373 	sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1374 	if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
1375 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1376 			DPRINTF(("%s: mgmt desc too small: %u\n",
1377 			    sc->sc_dev.dv_xname,
1378 			    letoh16(pd->pd_fragp->pf_size)));
1379 		goto out_nopmd;
1380 	}
1381 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1382 	if (pmf->pmf_version != PMF_VER) {
1383 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1384 			DPRINTF(("%s: unknown mgmt version %u\n",
1385 			    sc->sc_dev.dv_xname, pmf->pmf_version));
1386 		goto out_nopmd;
1387 	}
1388 	if (pmf->pmf_device != PMF_DEV) {
1389 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1390 			DPRINTF(("%s: unknown mgmt dev %u\n",
1391 			    sc->sc_dev.dv_xname, pmf->pmf_device));
1392 		goto out;
1393 	}
1394 	if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
1395 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1396 			DPRINTF(("%s: unknown mgmt flags %x\n",
1397 			    sc->sc_dev.dv_xname,
1398 			    pmf->pmf_flags & ~PMF_FLAG_VALID));
1399 		goto out;
1400 	}
1401 	if (pmf->pmf_flags & PMF_FLAG_LE) {
1402 		oid = letoh32(pmf->pmf_oid);
1403 		size = letoh32(pmf->pmf_size);
1404 	} else {
1405 		oid = betoh32(pmf->pmf_oid);
1406 		size = betoh32(pmf->pmf_size);
1407 	}
1408 	if (pmf->pmf_operation == PMF_OP_TRAP) {
1409 		pmd = NULL; /* ignored */
1410 		DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1411 		    sc->sc_dev.dv_xname,
1412 		    pmf->pmf_operation, oid, size));
1413 		pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1414 		    min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
1415 		goto out_nopmd;
1416 	}
1417 	if (pmd == NULL) {
1418 		if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
1419 			DPRINTF(("%s: spurious mgmt received "
1420 			    "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1421 			    pmf->pmf_operation, oid, size));
1422 		goto out_nopmd;
1423 	}
1424 	switch (pmf->pmf_operation) {
1425 	case PMF_OP_RESPONSE:
1426 		pmd->pmd_error = 0;
1427 		break;
1428 	case PMF_OP_ERROR:
1429 		pmd->pmd_error = EPERM;
1430 		goto out;
1431 	default:
1432 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1433 			DPRINTF(("%s: unknown mgmt op %u\n",
1434 			    sc->sc_dev.dv_xname, pmf->pmf_operation));
1435 		pmd->pmd_error = EIO;
1436 		goto out;
1437 	}
1438 	if (oid != pmd->pmd_oid) {
1439 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1440 			DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1441 			    sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1442 		pmd->pmd_oid = oid;
1443 	}
1444 	if (pmd->pmd_recvbuf != NULL) {
1445 		if (size > PGT_FRAG_SIZE) {
1446 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1447 				DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1448 				    sc->sc_dev.dv_xname, oid, size));
1449 			pmd->pmd_error = EIO;
1450 			goto out;
1451 		}
1452 		if (size > pmd->pmd_len)
1453 			pmd->pmd_error = ENOMEM;
1454 		else
1455 			memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
1456 			    size);
1457 		pmd->pmd_len = size;
1458 	}
1459 
1460 out:
1461 	TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1462 	wakeup_one(pmd);
1463 	if (sc->sc_debug & SC_DEBUG_MGMT)
1464 		DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1465 		    sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1466 		    pmd->pmd_oid, pmd->pmd_len));
1467 out_nopmd:
1468 	pgt_reinit_rx_desc_frag(sc, pd);
1469 }
1470 
1471 /*
1472  * Queue packets for reception and defragmentation.  I don't know now
1473  * whether the rx queue being full enough to start, but not finish,
1474  * queueing a fragmented packet, can happen.
1475  */
1476 struct mbuf *
1477 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1478 {
1479 	struct ifnet *ifp;
1480 	struct pgt_desc *pd;
1481 	struct mbuf *top, **mp, *m;
1482 	size_t datalen;
1483 	uint16_t morefrags, dataoff;
1484 	int tlen = 0;
1485 
1486 	ifp = &sc->sc_ic.ic_if;
1487 	m = NULL;
1488 	top = NULL;
1489 	mp = &top;
1490 
1491 	while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
1492 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1493 		sc->sc_dirtyq_count[pq]--;
1494 		datalen = letoh16(pd->pd_fragp->pf_size);
1495 		dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
1496 		morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
1497 
1498 		if (sc->sc_debug & SC_DEBUG_RXFRAG)
1499 			DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1500 			    sc->sc_dev.dv_xname, datalen, dataoff,
1501 			    pd->pd_fragp->pf_flags));
1502 
1503 		/* Add the (two+?) bytes for the header. */
1504 		if (datalen + dataoff > PGT_FRAG_SIZE) {
1505 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1506 				DPRINTF(("%s data rx too big: %u\n",
1507 				    sc->sc_dev.dv_xname, datalen));
1508 			goto fail;
1509 		}
1510 
1511 		if (m == NULL)
1512 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1513 		else
1514 			m = m_get(M_DONTWAIT, MT_DATA);
1515 
1516 		if (m == NULL)
1517 			goto fail;
1518 		if (datalen > MHLEN) {
1519 			MCLGET(m, M_DONTWAIT);
1520 			if (!(m->m_flags & M_EXT)) {
1521 				m_free(m);
1522 				goto fail;
1523 			}
1524 		}
1525 		bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
1526 		m->m_len = datalen;
1527 		tlen += datalen;
1528 
1529 		*mp = m;
1530 		mp = &m->m_next;
1531 
1532 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1533 		sc->sc_freeq_count[pq]++;
1534 		pgt_reinit_rx_desc_frag(sc, pd);
1535 
1536 		if (!morefrags)
1537 			break;
1538 	}
1539 
1540 	if (top) {
1541 		top->m_pkthdr.len = tlen;
1542 		top->m_pkthdr.rcvif = ifp;
1543 	}
1544 	return (top);
1545 
1546 fail:
1547 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1548 	sc->sc_freeq_count[pq]++;
1549 	pgt_reinit_rx_desc_frag(sc, pd);
1550 
1551 	ifp->if_ierrors++;
1552 	if (top)
1553 		m_freem(top);
1554 	return (NULL);
1555 }
1556 
1557 int
1558 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1559     void *arg, size_t arglen)
1560 {
1561 	struct pgt_mgmt_desc pmd;
1562 	int error;
1563 
1564 	bzero(&pmd, sizeof(pmd));
1565 	pmd.pmd_recvbuf = arg;
1566 	pmd.pmd_len = arglen;
1567 	pmd.pmd_oid = oid;
1568 
1569 	error = pgt_mgmt_request(sc, &pmd);
1570 	if (error == 0)
1571 		error = pmd.pmd_error;
1572 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1573 		DPRINTF(("%s: failure getting oid %#x: %d\n",
1574 		    sc->sc_dev.dv_xname, oid, error));
1575 
1576 	return (error);
1577 }
1578 
1579 int
1580 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1581     void *arg, size_t arglen)
1582 {
1583 	struct pgt_mgmt_desc pmd;
1584 	int error;
1585 
1586 	bzero(&pmd, sizeof(pmd));
1587 	pmd.pmd_sendbuf = arg;
1588 	pmd.pmd_recvbuf = arg;
1589 	pmd.pmd_len = arglen;
1590 	pmd.pmd_oid = oid;
1591 
1592 	error = pgt_mgmt_request(sc, &pmd);
1593 	if (error == 0)
1594 		error = pmd.pmd_error;
1595 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1596 		DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1597 		    sc->sc_dev.dv_xname, oid, error));
1598 
1599 	return (error);
1600 }
1601 
1602 int
1603 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1604     const void *arg, size_t arglen)
1605 {
1606 	struct pgt_mgmt_desc pmd;
1607 	int error;
1608 
1609 	bzero(&pmd, sizeof(pmd));
1610 	pmd.pmd_sendbuf = arg;
1611 	pmd.pmd_len = arglen;
1612 	pmd.pmd_oid = oid;
1613 
1614 	error = pgt_mgmt_request(sc, &pmd);
1615 	if (error == 0)
1616 		error = pmd.pmd_error;
1617 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1618 		DPRINTF(("%s: failure setting oid %#x: %d\n",
1619 		    sc->sc_dev.dv_xname, oid, error));
1620 
1621 	return (error);
1622 }
1623 
1624 void
1625 pgt_state_dump(struct pgt_softc *sc)
1626 {
1627 	printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1628 	    sc->sc_dev.dv_xname,
1629 	    pgt_read_4(sc, PGT_REG_CTRL_STAT),
1630 	    pgt_read_4(sc, PGT_REG_INT_STAT));
1631 
1632 	printf("%s: state dump: driver curfrag[]\n",
1633 	    sc->sc_dev.dv_xname);
1634 
1635 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1636 	    sc->sc_dev.dv_xname,
1637 	    letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
1638 	    letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
1639 	    letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
1640 	    letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
1641 	    letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
1642 	    letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
1643 
1644 	printf("%s: state dump: device curfrag[]\n",
1645 	    sc->sc_dev.dv_xname);
1646 
1647 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1648 	    sc->sc_dev.dv_xname,
1649 	    letoh32(sc->sc_cb->pcb_device_curfrag[0]),
1650 	    letoh32(sc->sc_cb->pcb_device_curfrag[1]),
1651 	    letoh32(sc->sc_cb->pcb_device_curfrag[2]),
1652 	    letoh32(sc->sc_cb->pcb_device_curfrag[3]),
1653 	    letoh32(sc->sc_cb->pcb_device_curfrag[4]),
1654 	    letoh32(sc->sc_cb->pcb_device_curfrag[5]));
1655 }
1656 
1657 int
1658 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1659 {
1660 	struct pgt_desc *pd;
1661 	struct pgt_mgmt_frame *pmf;
1662 	int error, i;
1663 
1664 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1665 		return (EIO);
1666 	if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
1667 		return (ENOMEM);
1668 	pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
1669 	if (pd == NULL)
1670 		return (ENOMEM);
1671 	error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1672 	if (error)
1673 		return (error);
1674 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1675 	pmf->pmf_version = PMF_VER;
1676 	/* "get" and "retrieve" operations look the same */
1677 	if (pmd->pmd_recvbuf != NULL)
1678 		pmf->pmf_operation = PMF_OP_GET;
1679 	else
1680 		pmf->pmf_operation = PMF_OP_SET;
1681 	pmf->pmf_oid = htobe32(pmd->pmd_oid);
1682 	pmf->pmf_device = PMF_DEV;
1683 	pmf->pmf_flags = 0;
1684 	pmf->pmf_size = htobe32(pmd->pmd_len);
1685 	/* "set" and "retrieve" operations both send data */
1686 	if (pmd->pmd_sendbuf != NULL)
1687 		memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len);
1688 	else
1689 		bzero(pmf + 1, pmd->pmd_len);
1690 	pmd->pmd_error = EINPROGRESS;
1691 	TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
1692 	if (sc->sc_debug & SC_DEBUG_MGMT)
1693 		DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1694 		    sc->sc_dev.dv_xname,
1695 		    pmd, pmf->pmf_operation,
1696 		    pmd->pmd_oid, pmd->pmd_len));
1697 	pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1698 	    sizeof(*pmf) + pmd->pmd_len, 0);
1699 	/*
1700 	 * Try for one second, triggering 10 times.
1701 	 *
1702 	 * Do our best to work around seemingly buggy CardBus controllers
1703 	 * on Soekris 4521 that fail to get interrupts with alarming
1704 	 * regularity: run as if an interrupt occurred and service every
1705 	 * queue except for mbuf reception.
1706 	 */
1707 	i = 0;
1708 	do {
1709 		if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK)
1710 			break;
1711 		if (pmd->pmd_error != EINPROGRESS)
1712 			break;
1713 		if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
1714 			pmd->pmd_error = EIO;
1715 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1716 			break;
1717 		}
1718 		if (i != 9)
1719 			pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1720 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1721 		pgt_update_intr(sc, 0);
1722 #endif
1723 	} while (i++ < 10);
1724 
1725 	if (pmd->pmd_error == EINPROGRESS) {
1726 		printf("%s: timeout waiting for management "
1727 		    "packet response to %#x\n",
1728 		    sc->sc_dev.dv_xname, pmd->pmd_oid);
1729 		TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1730 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1731 			pgt_state_dump(sc);
1732 		pgt_async_reset(sc);
1733 		error = ETIMEDOUT;
1734 	} else
1735 		error = 0;
1736 
1737 	return (error);
1738 }
1739 
1740 void
1741 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1742     uint16_t len, int morecoming)
1743 {
1744 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1745 	sc->sc_freeq_count[pq]--;
1746 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1747 	sc->sc_dirtyq_count[pq]++;
1748 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1749 		DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1750 		    pd->pd_fragnum, pq));
1751 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1752 	    sc->sc_cbdmam->dm_mapsize,
1753 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
1754 	if (morecoming)
1755 		pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
1756 	pd->pd_fragp->pf_size = htole16(len);
1757 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1758 	    pd->pd_dmam->dm_mapsize,
1759 	    BUS_DMASYNC_POSTWRITE);
1760 	sc->sc_cb->pcb_driver_curfrag[pq] =
1761 	    htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
1762 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1763 	    sc->sc_cbdmam->dm_mapsize,
1764 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
1765 	if (!morecoming)
1766 		pgt_maybe_trigger(sc, pq);
1767 }
1768 
1769 void
1770 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1771 {
1772 	unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
1773 	uint32_t reg;
1774 
1775 	if (sc->sc_debug & SC_DEBUG_TRIGGER)
1776 		DPRINTF(("%s: triggered by queue [%u]\n",
1777 		    sc->sc_dev.dv_xname, pq));
1778 	pgt_debug_events(sc, "trig");
1779 	if (sc->sc_flags & SC_POWERSAVE) {
1780 		/* Magic values ahoy? */
1781 		if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
1782 			do {
1783 				reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1784 				if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
1785 					DELAY(PGT_WRITEIO_DELAY);
1786 			} while (tries-- != 0);
1787 			if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
1788 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1789 					DPRINTF(("%s: timeout triggering from "
1790 					    "sleep mode\n",
1791 					    sc->sc_dev.dv_xname));
1792 				pgt_async_reset(sc);
1793 				return;
1794 			}
1795 		}
1796 		pgt_write_4_flush(sc, PGT_REG_DEV_INT,
1797 		    PGT_DEV_INT_WAKEUP);
1798 		DELAY(PGT_WRITEIO_DELAY);
1799 		/* read the status back in */
1800 		(void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
1801 		DELAY(PGT_WRITEIO_DELAY);
1802 	} else {
1803 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1804 		DELAY(PGT_WRITEIO_DELAY);
1805 	}
1806 }
1807 
1808 struct ieee80211_node *
1809 pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1810 {
1811 	struct pgt_ieee80211_node *pin;
1812 
1813 	pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO);
1814 	if (pin != NULL) {
1815 		pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1816 	}
1817 	return (struct ieee80211_node *)pin;
1818 }
1819 
1820 void
1821 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1822     int reallynew)
1823 {
1824 	ieee80211_ref_node(ni);
1825 }
1826 
1827 void
1828 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1829 {
1830 	struct pgt_ieee80211_node *pin;
1831 
1832 	pin = (struct pgt_ieee80211_node *)ni;
1833 	free(pin, M_DEVBUF);
1834 }
1835 
1836 void
1837 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1838     const struct ieee80211_node *src)
1839 {
1840 	const struct pgt_ieee80211_node *psrc;
1841 	struct pgt_ieee80211_node *pdst;
1842 
1843 	psrc = (const struct pgt_ieee80211_node *)src;
1844 	pdst = (struct pgt_ieee80211_node *)dst;
1845 	bcopy(psrc, pdst, sizeof(*psrc));
1846 }
1847 
1848 int
1849 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1850     int type, int arg1, int arg2)
1851 {
1852 	return (EOPNOTSUPP);
1853 }
1854 
1855 int
1856 pgt_net_attach(struct pgt_softc *sc)
1857 {
1858 	struct ieee80211com *ic = &sc->sc_ic;
1859 	struct ifnet *ifp = &ic->ic_if;
1860 	struct ieee80211_rateset *rs;
1861 	uint8_t rates[IEEE80211_RATE_MAXSIZE];
1862 	struct pgt_obj_buffer psbuffer;
1863 	struct pgt_obj_frequencies *freqs;
1864 	uint32_t phymode, country;
1865 	unsigned int chan, i, j, firstchan = -1;
1866 	int error;
1867 
1868 	psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
1869 	psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
1870 	error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1871 	if (error)
1872 		return (error);
1873 	error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1874 	if (error)
1875 		return (error);
1876 	error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1877 	    sizeof(ic->ic_myaddr));
1878 	if (error)
1879 		return (error);
1880 	error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1881 	if (error)
1882 		return (error);
1883 
1884 	ifp->if_softc = sc;
1885 	ifp->if_ioctl = pgt_ioctl;
1886 	ifp->if_start = pgt_start;
1887 	ifp->if_watchdog = pgt_watchdog;
1888 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
1889 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1890 
1891 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1892 	IFQ_SET_READY(&ifp->if_snd);
1893 
1894 	/*
1895 	 * Set channels
1896 	 *
1897 	 * Prism hardware likes to report supported frequencies that are
1898 	 * not actually available for the country of origin.
1899 	 */
1900 	j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
1901 	freqs = malloc(j, M_DEVBUF, M_WAITOK);
1902 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1903 	if (error) {
1904 		free(freqs, M_DEVBUF);
1905 		return (error);
1906 	}
1907 
1908 	for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
1909 		chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
1910 		    0);
1911 
1912 		if (chan > IEEE80211_CHAN_MAX) {
1913 			printf("%s: reported bogus channel (%uMHz)\n",
1914 			    sc->sc_dev.dv_xname, chan);
1915 			free(freqs, M_DEVBUF);
1916 			return (EIO);
1917 		}
1918 
1919 		if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
1920 			if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
1921 				continue;
1922 			if (country == letoh32(PGT_COUNTRY_USA)) {
1923 				if (chan >= 12 && chan <= 14)
1924 					continue;
1925 			}
1926 			if (chan <= 14)
1927 				ic->ic_channels[chan].ic_flags |=
1928 				    IEEE80211_CHAN_B;
1929 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
1930 		} else {
1931 			if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
1932 				continue;
1933 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
1934 		}
1935 
1936 		ic->ic_channels[chan].ic_freq =
1937 		    letoh16(freqs->pof_freqlist_mhz[i]);
1938 
1939 		if (firstchan == -1)
1940 			firstchan = chan;
1941 
1942 		DPRINTF(("%s: set channel %d to freq %uMHz\n",
1943 		    sc->sc_dev.dv_xname, chan,
1944 		    letoh16(freqs->pof_freqlist_mhz[i])));
1945 	}
1946 	free(freqs, M_DEVBUF);
1947 	if (firstchan == -1) {
1948 		printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1949 		return (EIO);
1950 	}
1951 
1952 	/*
1953 	 * Set rates
1954 	 */
1955 	bzero(rates, sizeof(rates));
1956 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1957 	if (error)
1958 		return (error);
1959 	for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1960 		switch (rates[i]) {
1961 		case 2:
1962 		case 4:
1963 		case 11:
1964 		case 22:
1965 		case 44: /* maybe */
1966 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1967 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1968 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1969 			}
1970 		default:
1971 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1972 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1973 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1974 			}
1975 			if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
1976 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1977 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1978 			}
1979 			rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1980 			rs->rs_rates[rs->rs_nrates++] = rates[i];
1981 		}
1982 	}
1983 
1984 	ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT |
1985 	    IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
1986 #ifndef IEEE80211_STA_ONLY
1987 	ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP;
1988 #endif
1989 	ic->ic_opmode = IEEE80211_M_STA;
1990 	ic->ic_state = IEEE80211_S_INIT;
1991 
1992 	if_attach(ifp);
1993 	ieee80211_ifattach(ifp);
1994 
1995 	/* setup post-attach/pre-lateattach vector functions */
1996 	sc->sc_newstate = ic->ic_newstate;
1997 	ic->ic_newstate = pgt_newstate;
1998 	ic->ic_node_alloc = pgt_ieee80211_node_alloc;
1999 	ic->ic_newassoc = pgt_ieee80211_newassoc;
2000 	ic->ic_node_free = pgt_ieee80211_node_free;
2001 	ic->ic_node_copy = pgt_ieee80211_node_copy;
2002 	ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
2003 	ic->ic_max_rssi = 255;	/* rssi is a u_int8_t */
2004 
2005 	/* let net80211 handle switching around the media + resetting */
2006 	ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
2007 
2008 #if NBPFILTER > 0
2009 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
2010 	    sizeof(struct ieee80211_frame) + 64);
2011 
2012 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2013 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
2014 	sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
2015 
2016 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2017 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
2018 	sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
2019 #endif
2020 	return (0);
2021 }
2022 
2023 int
2024 pgt_media_change(struct ifnet *ifp)
2025 {
2026 	struct pgt_softc *sc = ifp->if_softc;
2027 	int error;
2028 
2029         error = ieee80211_media_change(ifp);
2030         if (error == ENETRESET) {
2031                 pgt_update_hw_from_sw(sc, 0, 0);
2032                 error = 0;
2033         }
2034 
2035         return (error);
2036 }
2037 
2038 void
2039 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2040 {
2041 	struct pgt_softc *sc = ifp->if_softc;
2042 	struct ieee80211com *ic = &sc->sc_ic;
2043 	uint32_t rate;
2044 	int s;
2045 
2046 	imr->ifm_status = 0;
2047 	imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
2048 
2049 	if (!(ifp->if_flags & IFF_UP))
2050 		return;
2051 
2052 	s = splnet();
2053 
2054 	if (ic->ic_fixed_rate != -1) {
2055 		rate = ic->ic_sup_rates[ic->ic_curmode].
2056 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
2057 	} else {
2058 		if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2059 			goto out;
2060 		rate = letoh32(rate);
2061 		if (sc->sc_debug & SC_DEBUG_LINK) {
2062 			DPRINTF(("%s: %s: link rate %u\n",
2063 			    sc->sc_dev.dv_xname, __func__, rate));
2064 		}
2065 		if (rate == 0)
2066 			goto out;
2067 	}
2068 
2069 	imr->ifm_status = IFM_AVALID;
2070 	imr->ifm_active = IFM_IEEE80211;
2071 	if (ic->ic_state == IEEE80211_S_RUN)
2072 		imr->ifm_status |= IFM_ACTIVE;
2073 
2074 	imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2075 
2076 	switch (ic->ic_opmode) {
2077 	case IEEE80211_M_STA:
2078 		break;
2079 #ifndef IEEE80211_STA_ONLY
2080 	case IEEE80211_M_IBSS:
2081 		imr->ifm_active |= IFM_IEEE80211_ADHOC;
2082 		break;
2083 	case IEEE80211_M_AHDEMO:
2084 		imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
2085 		break;
2086 	case IEEE80211_M_HOSTAP:
2087 		imr->ifm_active |= IFM_IEEE80211_HOSTAP;
2088 		break;
2089 #endif
2090 	case IEEE80211_M_MONITOR:
2091 		imr->ifm_active |= IFM_IEEE80211_MONITOR;
2092 		break;
2093 	default:
2094 		break;
2095 	}
2096 
2097 out:
2098 	splx(s);
2099 }
2100 
2101 /*
2102  * Start data frames.  Critical sections surround the boundary of
2103  * management frame transmission / transmission acknowledgement / response
2104  * and data frame transmission / transmission acknowledgement.
2105  */
2106 void
2107 pgt_start(struct ifnet *ifp)
2108 {
2109 	struct pgt_softc *sc;
2110 	struct ieee80211com *ic;
2111 	struct pgt_desc *pd;
2112 	struct mbuf *m;
2113 	int error;
2114 
2115 	sc = ifp->if_softc;
2116 	ic = &sc->sc_ic;
2117 
2118 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
2119 	    !(ifp->if_flags & IFF_RUNNING) ||
2120 	    ic->ic_state != IEEE80211_S_RUN) {
2121 		return;
2122 	}
2123 
2124 	/*
2125 	 * Management packets should probably be MLME frames
2126 	 * (i.e. hostap "managed" mode); we don't touch the
2127 	 * net80211 management queue.
2128 	 */
2129 	for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2130 	    PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) {
2131 		pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
2132 		IFQ_POLL(&ifp->if_snd, m);
2133 		if (m == NULL)
2134 			break;
2135 		if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
2136 			error = pgt_load_tx_desc_frag(sc,
2137 			    PGT_QUEUE_DATA_LOW_TX, pd);
2138 			if (error)
2139 				break;
2140 			IFQ_DEQUEUE(&ifp->if_snd, m);
2141 			m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
2142 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2143 			    pd, m->m_pkthdr.len, 0);
2144 		} else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
2145 			struct pgt_desc *pd2;
2146 
2147 			/*
2148 			 * Transmit a fragmented frame if there is
2149 			 * not enough room in one fragment; limit
2150 			 * to two fragments (802.11 itself couldn't
2151 			 * even support a full two.)
2152 			 */
2153 			if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2154 			    PGT_QUEUE_FULL_THRESHOLD)
2155 				break;
2156 			pd2 = TAILQ_NEXT(pd, pd_link);
2157 			error = pgt_load_tx_desc_frag(sc,
2158 			    PGT_QUEUE_DATA_LOW_TX, pd);
2159 			if (error == 0) {
2160 				error = pgt_load_tx_desc_frag(sc,
2161 				    PGT_QUEUE_DATA_LOW_TX, pd2);
2162 				if (error) {
2163 					pgt_unload_tx_desc_frag(sc, pd);
2164 					TAILQ_INSERT_HEAD(&sc->sc_freeq[
2165 					    PGT_QUEUE_DATA_LOW_TX], pd,
2166 					    pd_link);
2167 				}
2168 			}
2169 			if (error)
2170 				break;
2171 			IFQ_DEQUEUE(&ifp->if_snd, m);
2172 			m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
2173 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2174 			    pd, PGT_FRAG_SIZE, 1);
2175 			m_copydata(m, PGT_FRAG_SIZE,
2176 			    m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
2177 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2178 			    pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
2179 		} else {
2180 			IFQ_DEQUEUE(&ifp->if_snd, m);
2181 			ifp->if_oerrors++;
2182 			m_freem(m);
2183 			m = NULL;
2184 		}
2185 		if (m != NULL) {
2186 			struct ieee80211_node *ni;
2187 #if NBPFILTER > 0
2188 			if (ifp->if_bpf != NULL)
2189 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2190 #endif
2191 			ifp->if_opackets++;
2192 			ifp->if_timer = 1;
2193 			sc->sc_txtimer = 5;
2194 			ni = ieee80211_find_txnode(&sc->sc_ic,
2195 			    mtod(m, struct ether_header *)->ether_dhost);
2196 			if (ni != NULL) {
2197 				ni->ni_inact = 0;
2198 				if (ni != ic->ic_bss)
2199 					ieee80211_release_node(&sc->sc_ic, ni);
2200 			}
2201 #if NBPFILTER > 0
2202 			if (sc->sc_drvbpf != NULL) {
2203 				struct mbuf mb;
2204 				struct ether_header eh;
2205 				struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
2206 
2207 				bcopy(mtod(m, struct ether_header *), &eh,
2208 				    sizeof(eh));
2209 				m_adj(m, sizeof(eh));
2210 				m = pgt_ieee80211_encap(sc, &eh, m, NULL);
2211 
2212 				tap->wt_flags = 0;
2213 				//tap->wt_rate = rate;
2214 				tap->wt_rate = 0;
2215 				tap->wt_chan_freq =
2216 				    htole16(ic->ic_bss->ni_chan->ic_freq);
2217 				tap->wt_chan_flags =
2218 				    htole16(ic->ic_bss->ni_chan->ic_flags);
2219 
2220 				if (m != NULL) {
2221 					mb.m_data = (caddr_t)tap;
2222 					mb.m_len = sc->sc_txtap_len;
2223 					mb.m_next = m;
2224 					mb.m_nextpkt = NULL;
2225 					mb.m_type = 0;
2226 					mb.m_flags = 0;
2227 
2228 					bpf_mtap(sc->sc_drvbpf, &mb,
2229 					    BPF_DIRECTION_OUT);
2230 				}
2231 			}
2232 #endif
2233 			if (m != NULL)
2234 				m_freem(m);
2235 		}
2236 	}
2237 }
2238 
2239 int
2240 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2241 {
2242 	struct pgt_softc *sc = ifp->if_softc;
2243 	struct ifaddr *ifa;
2244 	struct ifreq *ifr;
2245 	struct wi_req *wreq;
2246 	struct ieee80211_nodereq_all *na;
2247 	struct ieee80211com *ic;
2248         struct pgt_obj_bsslist *pob;
2249         struct wi_scan_p2_hdr *p2hdr;
2250         struct wi_scan_res *res;
2251         uint32_t noise;
2252 	int maxscan, i, j, s, error = 0;
2253 
2254 	ic = &sc->sc_ic;
2255 	ifr = (struct ifreq *)req;
2256 
2257 	s = splnet();
2258 	switch (cmd) {
2259 	case SIOCS80211SCAN:
2260 		/*
2261 		 * This chip scans always as soon as it gets initialized.
2262 		 */
2263 
2264 		/*
2265 		 * Give us a bit time to scan in case we were not
2266 		 * initialized before and let the userland process wait.
2267 		 */
2268 		tsleep(&sc->sc_flags, 0, "pgtsca", hz * SCAN_TIMEOUT);
2269 
2270 		break;
2271 	case SIOCG80211ALLNODES: {
2272 		struct ieee80211_nodereq *nr = NULL;
2273 		na = (struct ieee80211_nodereq_all *)req;
2274 		wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO);
2275 
2276 		maxscan = PGT_OBJ_BSSLIST_NBSS;
2277 		pob = malloc(sizeof(*pob) +
2278 		    sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
2279 		error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2280 		    sizeof(noise));
2281 
2282 		if (error == 0) {
2283 			noise = letoh32(noise);
2284 			error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2285 			    sizeof(*pob) +
2286 			    sizeof(struct pgt_obj_bss) * maxscan);
2287 		}
2288 
2289 		if (error == 0) {
2290 			maxscan = min(PGT_OBJ_BSSLIST_NBSS,
2291 			    letoh32(pob->pob_count));
2292 			maxscan = min(maxscan,
2293 			    (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2294 			    WI_PRISM2_RES_SIZE);
2295 			p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2296 			p2hdr->wi_rsvd = 0;
2297 			p2hdr->wi_reason = 1;
2298 			wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
2299 			    sizeof(*p2hdr) / 2;
2300 			wreq->wi_type = WI_RID_SCAN_RES;
2301 		}
2302 
2303 		for (na->na_nodes = j = i = 0; i < maxscan &&
2304 		    (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2305 		    i++) {
2306 			/* allocate node space */
2307 			if (nr == NULL)
2308 				nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
2309 
2310 			/* get next BSS scan result */
2311 			res = (struct wi_scan_res *)
2312 			    ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2313 			    i * WI_PRISM2_RES_SIZE);
2314 			pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2315 			    res, noise);
2316 
2317 			/* copy it to node structure for ifconfig to read */
2318 			bzero(nr, sizeof(*nr));
2319 			IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
2320 			IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
2321 			nr->nr_channel = letoh16(res->wi_chan);
2322 			nr->nr_chan_flags = IEEE80211_CHAN_B;
2323 			nr->nr_rssi = letoh16(res->wi_signal);
2324 			nr->nr_max_rssi = 0; /* XXX */
2325 			nr->nr_nwid_len = letoh16(res->wi_ssid_len);
2326 			bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2327 			nr->nr_intval = letoh16(res->wi_interval);
2328 			nr->nr_capinfo = letoh16(res->wi_capinfo);
2329 			nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
2330 			    (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
2331 			    (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
2332 			    (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
2333 			nr->nr_nrates = 0;
2334 			while (res->wi_srates[nr->nr_nrates] != 0) {
2335 				nr->nr_rates[nr->nr_nrates] =
2336 				    res->wi_srates[nr->nr_nrates] &
2337 				    WI_VAR_SRATES_MASK;
2338 				nr->nr_nrates++;
2339 			}
2340 			nr->nr_flags = 0;
2341 			if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2342 			    IEEE80211_ADDR_LEN) == 0)
2343 				nr->nr_flags |= IEEE80211_NODEREQ_AP;
2344 			error = copyout(nr, (caddr_t)na->na_node + j,
2345 			    sizeof(struct ieee80211_nodereq));
2346 			if (error)
2347 				break;
2348 
2349 			/* point to next node entry */
2350 			j += sizeof(struct ieee80211_nodereq);
2351 			na->na_nodes++;
2352 		}
2353 		if (nr)
2354 			free(nr, M_DEVBUF);
2355 		free(pob, M_DEVBUF);
2356 		free(wreq, M_DEVBUF);
2357 		break;
2358 	}
2359 	case SIOCSIFADDR:
2360 		ifa = (struct ifaddr *)req;
2361 		ifp->if_flags |= IFF_UP;
2362 #ifdef INET
2363 		if (ifa->ifa_addr->sa_family == AF_INET)
2364 			 arp_ifinit(&sc->sc_ic.ic_ac, ifa);
2365 #endif
2366 		/* FALLTHROUGH */
2367 	case SIOCSIFFLAGS:
2368 		if (ifp->if_flags & IFF_UP) {
2369 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
2370 				pgt_init(ifp);
2371 				error = ENETRESET;
2372 			}
2373 		} else {
2374 			if (ifp->if_flags & IFF_RUNNING) {
2375 				pgt_stop(sc, SC_NEEDS_RESET);
2376 				error = ENETRESET;
2377 			}
2378 		}
2379 		break;
2380 	case SIOCADDMULTI:
2381 	case SIOCDELMULTI:
2382 		error = (cmd == SIOCADDMULTI) ?
2383 		    ether_addmulti(ifr, &ic->ic_ac) :
2384 		    ether_delmulti(ifr, &ic->ic_ac);
2385 
2386 		if (error == ENETRESET)
2387 			error = 0;
2388 		break;
2389 	case SIOCSIFMTU:
2390 		if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
2391 			error = EINVAL;
2392 			break;
2393 		}
2394 		/* FALLTHROUGH */
2395 	default:
2396 		error = ieee80211_ioctl(ifp, cmd, req);
2397 		break;
2398 	}
2399 
2400 	if (error == ENETRESET) {
2401 		pgt_update_hw_from_sw(sc, 0, 0);
2402 		error = 0;
2403 	}
2404 	splx(s);
2405 
2406 	return (error);
2407 }
2408 
2409 void
2410 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2411     struct wi_scan_res *scanres, uint32_t noise)
2412 {
2413 	struct ieee80211_rateset *rs;
2414 	struct wi_scan_res ap;
2415 	unsigned int i, n;
2416 
2417 	rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2418 	bzero(&ap, sizeof(ap));
2419 	ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
2420 	ap.wi_noise = noise;
2421 	ap.wi_signal = letoh16(pob->pob_rssi);
2422 	IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
2423 	ap.wi_interval = letoh16(pob->pob_beacon_period);
2424 	ap.wi_capinfo = letoh16(pob->pob_capinfo);
2425 	ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2426 	memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
2427 	n = 0;
2428 	for (i = 0; i < 16; i++) {
2429 		if (letoh16(pob->pob_rates) & (1 << i)) {
2430 			if (i > rs->rs_nrates)
2431 				break;
2432 			ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2433 			if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2434 				break;
2435 		}
2436 	}
2437 	memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
2438 }
2439 
2440 void
2441 node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2442 {
2443 	/*
2444 	 * HostAP mode lets all nodes stick around unless
2445 	 * the firmware AP kicks them off.
2446 	 */
2447 	ni->ni_inact = 0;
2448 }
2449 
2450 void
2451 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2452 {
2453 	struct pgt_ieee80211_node *pin;
2454 
2455 	/*
2456 	 * As there is no association in ad-hoc, we let links just
2457 	 * time out naturally as long they are not holding any private
2458 	 * configuration, such as 802.1x authorization.
2459 	 */
2460 	pin = (struct pgt_ieee80211_node *)ni;
2461 	if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2462 		pin->pin_node.ni_inact = 0;
2463 }
2464 
2465 void
2466 pgt_watchdog(struct ifnet *ifp)
2467 {
2468 	struct pgt_softc *sc;
2469 
2470 	sc = ifp->if_softc;
2471 	/*
2472 	 * Check for timed out transmissions (and make sure to set
2473 	 * this watchdog to fire again if there is still data in the
2474 	 * output device queue).
2475 	 */
2476 	if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2477 		int count;
2478 
2479 		ifp->if_timer = 1;
2480 		if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2481 			count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2482 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
2483 				DPRINTF(("%s: timeout %d data transmissions\n",
2484 				    sc->sc_dev.dv_xname, count));
2485 		}
2486 	}
2487 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
2488 		return;
2489 	/*
2490 	 * If we're goign to kick the device out of power-save mode
2491 	 * just to update the BSSID and such, we should not do it
2492 	 * very often; need to determine in what way to do that.
2493 	 */
2494 	if (ifp->if_flags & IFF_RUNNING &&
2495 	    sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2496 	    sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2497 		pgt_async_update(sc);
2498 
2499 #ifndef IEEE80211_STA_ONLY
2500 	/*
2501 	 * As a firmware-based HostAP, we should not time out
2502 	 * nodes inside the driver additionally to the timeout
2503 	 * that exists in the firmware.  The only things we
2504 	 * should have to deal with timing out when doing HostAP
2505 	 * are the privacy-related.
2506 	 */
2507 	switch (sc->sc_ic.ic_opmode) {
2508 	case IEEE80211_M_HOSTAP:
2509 		ieee80211_iterate_nodes(&sc->sc_ic,
2510 		    node_mark_active_ap, NULL);
2511 		break;
2512 	case IEEE80211_M_IBSS:
2513 		ieee80211_iterate_nodes(&sc->sc_ic,
2514 		    node_mark_active_adhoc, NULL);
2515 		break;
2516 	default:
2517 		break;
2518 	}
2519 #endif
2520 	ieee80211_watchdog(ifp);
2521 	ifp->if_timer = 1;
2522 }
2523 
2524 int
2525 pgt_init(struct ifnet *ifp)
2526 {
2527 	struct pgt_softc *sc = ifp->if_softc;
2528 	struct ieee80211com *ic = &sc->sc_ic;
2529 
2530 	/* set default channel */
2531 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2532 
2533 	if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
2534 		pgt_update_hw_from_sw(sc,
2535 		    ic->ic_state != IEEE80211_S_INIT,
2536 		    ic->ic_opmode != IEEE80211_M_MONITOR);
2537 
2538 	ifp->if_flags |= IFF_RUNNING;
2539 	ifp->if_flags &= ~IFF_OACTIVE;
2540 
2541 	/* Begin background scanning */
2542 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
2543 
2544 	return (0);
2545 }
2546 
2547 /*
2548  * After most every configuration change, everything needs to be fully
2549  * reinitialized.  For some operations (currently, WEP settings
2550  * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2551  * "associations," and allows EAP authorization to occur again.
2552  * If keepassoc is specified, the reset operation should try to go
2553  * back to the BSS had before.
2554  */
2555 void
2556 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes)
2557 {
2558 	struct ieee80211com *ic = &sc->sc_ic;
2559 	struct arpcom *ac = &ic->ic_ac;
2560 	struct ifnet *ifp = &ac->ac_if;
2561 	struct pgt_obj_key keyobj;
2562 	struct pgt_obj_ssid essid;
2563 	uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
2564 	uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2565 	uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2566 	unsigned int i;
2567 	int success, shouldbeup, s;
2568 
2569 	config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
2570 
2571 	/*
2572 	 * Promiscuous mode is currently a no-op since packets transmitted,
2573 	 * while in promiscuous mode, don't ever seem to go anywhere.
2574 	 */
2575 	shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
2576 
2577 	if (shouldbeup) {
2578 		switch (ic->ic_opmode) {
2579 		case IEEE80211_M_STA:
2580 			if (ifp->if_flags & IFF_PROMISC)
2581 				mode = PGT_MODE_CLIENT;	/* what to do? */
2582 			else
2583 				mode = PGT_MODE_CLIENT;
2584 			bsstype = PGT_BSS_TYPE_STA;
2585 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2586 			break;
2587 #ifndef IEEE80211_STA_ONLY
2588 		case IEEE80211_M_IBSS:
2589 			if (ifp->if_flags & IFF_PROMISC)
2590 				mode = PGT_MODE_CLIENT;	/* what to do? */
2591 			else
2592 				mode = PGT_MODE_CLIENT;
2593 			bsstype = PGT_BSS_TYPE_IBSS;
2594 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2595 			break;
2596 		case IEEE80211_M_HOSTAP:
2597 			mode = PGT_MODE_AP;
2598 			bsstype = PGT_BSS_TYPE_STA;
2599 			/*
2600 			 * For IEEE 802.1x, we need to authenticate and
2601 			 * authorize hosts from here on or they remain
2602 			 * associated but without the ability to send or
2603 			 * receive normal traffic to us (courtesy the
2604 			 * firmware AP implementation).
2605 			 */
2606 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2607 			/*
2608 			 * WDS mode needs several things to work:
2609 			 * discovery of exactly how creating the WDS
2610 			 * links is meant to function, an interface
2611 			 * for this, and ability to encode or decode
2612 			 * the WDS frames.
2613 			 */
2614 			if (sc->sc_wds)
2615 				config |= PGT_CONFIG_WDS;
2616 			break;
2617 #endif
2618 		case IEEE80211_M_MONITOR:
2619 			mode = PGT_MODE_PROMISCUOUS;
2620 			bsstype = PGT_BSS_TYPE_ANY;
2621 			dot1x = PGT_DOT1X_AUTH_NONE;
2622 			break;
2623 		default:
2624 			goto badopmode;
2625 		}
2626 	} else {
2627 badopmode:
2628 		mode = PGT_MODE_CLIENT;
2629 		bsstype = PGT_BSS_TYPE_NONE;
2630 	}
2631 
2632 	DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2633 	switch (ic->ic_curmode) {
2634 	case IEEE80211_MODE_11A:
2635 		profile = PGT_PROFILE_A_ONLY;
2636 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2637 		DPRINTF(("IEEE80211_MODE_11A\n"));
2638 		break;
2639 	case IEEE80211_MODE_11B:
2640 		profile = PGT_PROFILE_B_ONLY;
2641 		preamble = PGT_OID_PREAMBLE_MODE_LONG;
2642 		DPRINTF(("IEEE80211_MODE_11B\n"));
2643 		break;
2644 	case IEEE80211_MODE_11G:
2645 		profile = PGT_PROFILE_G_ONLY;
2646 		preamble = PGT_OID_PREAMBLE_MODE_SHORT;
2647 		DPRINTF(("IEEE80211_MODE_11G\n"));
2648 		break;
2649 	case IEEE80211_MODE_TURBO: /* not handled */
2650 		/* FALLTHROUGH */
2651 	case IEEE80211_MODE_AUTO:
2652 		profile = PGT_PROFILE_MIXED_G_WIFI;
2653 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2654 		DPRINTF(("IEEE80211_MODE_AUTO\n"));
2655 		break;
2656 	default:
2657 		panic("unknown mode %d", ic->ic_curmode);
2658 	}
2659 
2660 	switch (sc->sc_80211_ioc_auth) {
2661 	case IEEE80211_AUTH_NONE:
2662 		auth = PGT_AUTH_MODE_NONE;
2663 		break;
2664 	case IEEE80211_AUTH_OPEN:
2665 		auth = PGT_AUTH_MODE_OPEN;
2666 		break;
2667 	default:
2668 		auth = PGT_AUTH_MODE_SHARED;
2669 		break;
2670 	}
2671 
2672 	if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
2673 		wep = 1;
2674 		exunencrypted = 1;
2675 	} else {
2676 		wep = 0;
2677 		exunencrypted = 0;
2678 	}
2679 
2680 	mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
2681 	wep = htole32(wep);
2682 	exunencrypted = htole32(exunencrypted);
2683 	profile = htole32(profile);
2684 	preamble = htole32(preamble);
2685 	bsstype = htole32(bsstype);
2686 	config = htole32(config);
2687 	mode = htole32(mode);
2688 
2689 	if (!wep || !sc->sc_dot1x)
2690 		dot1x = PGT_DOT1X_AUTH_NONE;
2691 	dot1x = htole32(dot1x);
2692 	auth = htole32(auth);
2693 
2694 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2695 		slot = htole32(PGT_OID_SLOT_MODE_SHORT);
2696 	else
2697 		slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
2698 
2699 	if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
2700 		if (keepassoc)
2701 			channel = 0;
2702 		else
2703 			channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2704 	} else
2705 		channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2706 
2707 	DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2708 	for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2709 		availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2710 		DPRINTF((" %d", availrates[i]));
2711 	}
2712 	DPRINTF(("\n"));
2713 	availrates[i++] = 0;
2714 
2715 	essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2716 	memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
2717 
2718 	s = splnet();
2719 	for (success = 0; success == 0; success = 1) {
2720 		SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
2721 		SETOID(PGT_OID_CONFIG, &config, sizeof(config));
2722 		SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
2723 
2724 		if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
2725 			SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
2726 			    sizeof(ac->ac_enaddr));
2727 			IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
2728 		}
2729 
2730 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2731 		SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
2732 
2733 		if (channel != 0 && channel != IEEE80211_CHAN_ANY)
2734 			SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
2735 
2736 		if (ic->ic_flags & IEEE80211_F_DESBSSID) {
2737 			SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
2738 			    sizeof(ic->ic_des_bssid));
2739 		} else if (keepassoc) {
2740 			SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2741 			    sizeof(ic->ic_bss->ni_bssid));
2742 		}
2743 
2744 		SETOID(PGT_OID_SSID, &essid, sizeof(essid));
2745 
2746 		if (ic->ic_des_esslen > 0)
2747 			SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
2748 
2749 		SETOID(PGT_OID_RATES, &availrates, i);
2750 		SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
2751 		SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
2752 		SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
2753 		SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
2754 		SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
2755 		    sizeof(exunencrypted));
2756 		SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
2757 		SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
2758 		/*
2759 		 * Setting WEP key(s)
2760 		 */
2761 		if (letoh32(wep) != 0) {
2762 			keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
2763 			/* key 1 */
2764 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2765 			    IEEE80211_KEYBUF_SIZE);
2766 			keyobj.pok_length = min(keyobj.pok_length,
2767 			    ic->ic_nw_keys[0].k_len);
2768 			bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2769 			    keyobj.pok_length);
2770 			SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
2771 			/* key 2 */
2772 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2773 			    IEEE80211_KEYBUF_SIZE);
2774 			keyobj.pok_length = min(keyobj.pok_length,
2775 			    ic->ic_nw_keys[1].k_len);
2776 			bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2777 			    keyobj.pok_length);
2778 			SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
2779 			/* key 3 */
2780 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2781 			    IEEE80211_KEYBUF_SIZE);
2782 			keyobj.pok_length = min(keyobj.pok_length,
2783 			    ic->ic_nw_keys[2].k_len);
2784 			bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2785 			    keyobj.pok_length);
2786 			SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
2787 			/* key 4 */
2788 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2789 			    IEEE80211_KEYBUF_SIZE);
2790 			keyobj.pok_length = min(keyobj.pok_length,
2791 			    ic->ic_nw_keys[3].k_len);
2792 			bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2793 			    keyobj.pok_length);
2794 			SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
2795 
2796 			wepkey = htole32(ic->ic_wep_txkey);
2797 			SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
2798 		}
2799 		/* set mode again to commit */
2800 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2801 	}
2802 	splx(s);
2803 
2804 	if (success) {
2805 		if (shouldbeup && keepnodes)
2806 			sc->sc_flags |= SC_NOFREE_ALLNODES;
2807 		if (shouldbeup)
2808 			ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2809 		else
2810 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2811 	} else {
2812 		printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2813 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2814 	}
2815 }
2816 
2817 void
2818 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2819     struct pgt_obj_mlme *mlme)
2820 {
2821 	struct ieee80211com *ic = &sc->sc_ic;
2822 	struct pgt_ieee80211_node *pin;
2823 	struct ieee80211_node *ni;
2824 
2825 	ni = ieee80211_find_node(ic, mlme->pom_address);
2826 	pin = (struct pgt_ieee80211_node *)ni;
2827 	switch (oid) {
2828 	case PGT_OID_DISASSOCIATE:
2829 		if (ni != NULL)
2830 			ieee80211_release_node(&sc->sc_ic, ni);
2831 		break;
2832 	case PGT_OID_ASSOCIATE:
2833 		if (ni == NULL) {
2834 			ni = ieee80211_dup_bss(ic, mlme->pom_address);
2835 			if (ni == NULL)
2836 				break;
2837 			ic->ic_newassoc(ic, ni, 1);
2838 			pin = (struct pgt_ieee80211_node *)ni;
2839 		}
2840 		ni->ni_associd = letoh16(mlme->pom_id);
2841 		pin->pin_mlme_state = letoh16(mlme->pom_state);
2842 		break;
2843 	default:
2844 		if (pin != NULL)
2845 			pin->pin_mlme_state = letoh16(mlme->pom_state);
2846 		break;
2847 	}
2848 }
2849 
2850 /*
2851  * Either in response to an event or after a certain amount of time,
2852  * synchronize our idea of the network we're part of from the hardware.
2853  */
2854 void
2855 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2856 	    struct mbuf *args)
2857 {
2858 	struct ieee80211com *ic = &sc->sc_ic;
2859 	struct pgt_obj_ssid ssid;
2860 	struct pgt_obj_bss bss;
2861 	uint32_t channel, noise, ls;
2862 	int error, s;
2863 
2864 	if (pa != NULL) {
2865 		struct pgt_obj_mlme *mlme;
2866 		uint32_t oid;
2867 
2868 		oid = *mtod(args, uint32_t *);
2869 		m_adj(args, sizeof(uint32_t));
2870 		if (sc->sc_debug & SC_DEBUG_TRAP)
2871 			DPRINTF(("%s: trap: oid %#x len %u\n",
2872 			    sc->sc_dev.dv_xname, oid, args->m_len));
2873 		switch (oid) {
2874 		case PGT_OID_LINK_STATE:
2875 			if (args->m_len < sizeof(uint32_t))
2876 				break;
2877 			ls = letoh32(*mtod(args, uint32_t *));
2878 			if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
2879 				DPRINTF(("%s: %s: link rate %u\n",
2880 				    sc->sc_dev.dv_xname, __func__, ls));
2881 			if (ls)
2882 				ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2883 			else
2884 				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2885 			goto gotlinkstate;
2886 		case PGT_OID_DEAUTHENTICATE:
2887 		case PGT_OID_AUTHENTICATE:
2888 		case PGT_OID_DISASSOCIATE:
2889 		case PGT_OID_ASSOCIATE:
2890 			if (args->m_len < sizeof(struct pgt_obj_mlme))
2891 				break;
2892 			mlme = mtod(args, struct pgt_obj_mlme *);
2893 			if (sc->sc_debug & SC_DEBUG_TRAP)
2894 				DPRINTF(("%s: mlme: address "
2895 				    "%s id 0x%02x state 0x%02x code 0x%02x\n",
2896 				    sc->sc_dev.dv_xname,
2897 				    ether_sprintf(mlme->pom_address),
2898 				    letoh16(mlme->pom_id),
2899 				    letoh16(mlme->pom_state),
2900 				    letoh16(mlme->pom_code)));
2901 #ifndef IEEE80211_STA_ONLY
2902 			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2903 				pgt_hostap_handle_mlme(sc, oid, mlme);
2904 #endif
2905 			break;
2906 		}
2907 		return;
2908 	}
2909 	if (ic->ic_state == IEEE80211_S_SCAN) {
2910 		s = splnet();
2911 		error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2912 		splx(s);
2913 		if (error)
2914 			return;
2915 		DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2916 		    htole32(ls)));
2917 		if (ls != 0)
2918 			ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2919 	}
2920 
2921 gotlinkstate:
2922 	s = splnet();
2923 	if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2924 		goto out;
2925 	sc->sc_noise = letoh32(noise);
2926 	if (ic->ic_state == IEEE80211_S_RUN) {
2927 		if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2928 		    sizeof(channel)) != 0)
2929 			goto out;
2930 		channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
2931 		ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2932 		if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2933 		    sizeof(ic->ic_bss->ni_bssid)) != 0)
2934 			goto out;
2935 		IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
2936 		error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2937 		    sizeof(bss));
2938 		if (error == 0)
2939 			ic->ic_bss->ni_rssi = bss.pob_rssi;
2940 		else if (error != EPERM)
2941 			goto out;
2942 		error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2943 		if (error)
2944 			goto out;
2945 		ic->ic_bss->ni_esslen = min(ssid.pos_length,
2946 		    sizeof(ic->ic_bss->ni_essid));
2947 		memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
2948 		    ssid.pos_length);
2949 	}
2950 
2951 out:
2952 	splx(s);
2953 }
2954 
2955 int
2956 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2957 {
2958 	struct pgt_softc *sc = ic->ic_if.if_softc;
2959 	enum ieee80211_state ostate;
2960 
2961 	ostate = ic->ic_state;
2962 
2963 	DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2964 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2965 
2966 	switch (nstate) {
2967 	case IEEE80211_S_INIT:
2968 		if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2969 			ic->ic_if.if_timer = 0;
2970 		ic->ic_mgt_timer = 0;
2971 		ic->ic_flags &= ~IEEE80211_F_SIBSS;
2972 		ieee80211_free_allnodes(ic);
2973 		break;
2974 	case IEEE80211_S_SCAN:
2975 		ic->ic_if.if_timer = 1;
2976 		ic->ic_mgt_timer = 0;
2977 		if (sc->sc_flags & SC_NOFREE_ALLNODES)
2978 			sc->sc_flags &= ~SC_NOFREE_ALLNODES;
2979 		else
2980 			ieee80211_free_allnodes(ic);
2981 
2982 #ifndef IEEE80211_STA_ONLY
2983 		/* Just use any old channel; we override it anyway. */
2984 		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2985 			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
2986 #endif
2987 		break;
2988 	case IEEE80211_S_RUN:
2989 		ic->ic_if.if_timer = 1;
2990 		break;
2991 	default:
2992 		break;
2993 	}
2994 
2995 	return (sc->sc_newstate(ic, nstate, arg));
2996 }
2997 
2998 int
2999 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
3000 {
3001 	int wokeup = 0;
3002 
3003 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
3004 	    sc->sc_cbdmam->dm_mapsize,
3005 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
3006 	sc->sc_cb->pcb_device_curfrag[pq] =
3007 	    sc->sc_cb->pcb_driver_curfrag[pq];
3008 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
3009 	    sc->sc_cbdmam->dm_mapsize,
3010 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
3011 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
3012 		struct pgt_desc *pd;
3013 
3014 		pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
3015 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
3016 		sc->sc_dirtyq_count[pq]--;
3017 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3018 		sc->sc_freeq_count[pq]++;
3019 		pgt_unload_tx_desc_frag(sc, pd);
3020 		if (sc->sc_debug & SC_DEBUG_QUEUES)
3021 			DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
3022 			    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
3023 		wokeup++;
3024 		if (pgt_queue_is_data(pq))
3025 			sc->sc_ic.ic_if.if_oerrors++;
3026 	}
3027 
3028 	return (wokeup);
3029 }
3030 
3031 int
3032 pgt_dma_alloc(struct pgt_softc *sc)
3033 {
3034 	size_t size;
3035 	int i, error, nsegs;
3036 
3037 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
3038 		TAILQ_INIT(&sc->sc_freeq[i]);
3039 		TAILQ_INIT(&sc->sc_dirtyq[i]);
3040 	}
3041 
3042 	/*
3043 	 * control block
3044 	 */
3045 	size = sizeof(struct pgt_control_block);
3046 
3047 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3048 	    BUS_DMA_NOWAIT, &sc->sc_cbdmam);
3049 	if (error != 0) {
3050 		printf("%s: can not create DMA tag for control block\n",
3051 		    sc->sc_dev.dv_xname);
3052 		goto out;
3053 	}
3054 
3055 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3056 	    0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3057 	if (error != 0) {
3058 		printf("%s: can not allocate DMA memory for control block\n",
3059 		    sc->sc_dev.dv_xname);
3060 		goto out;
3061 	}
3062 
3063 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
3064 	    size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
3065 	if (error != 0) {
3066 		printf("%s: can not map DMA memory for control block\n",
3067 		    sc->sc_dev.dv_xname);
3068 		goto out;
3069 	}
3070 
3071 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
3072 	    sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
3073 	if (error != 0) {
3074 		printf("%s: can not load DMA map for control block\n",
3075 		    sc->sc_dev.dv_xname);
3076 		goto out;
3077 	}
3078 
3079 	/*
3080 	 * powersave
3081 	 */
3082 	size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
3083 
3084 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3085 	    BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
3086 	if (error != 0) {
3087 		printf("%s: can not create DMA tag for powersave\n",
3088 		    sc->sc_dev.dv_xname);
3089 		goto out;
3090 	}
3091 
3092 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3093 	   0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3094 	if (error != 0) {
3095 		printf("%s: can not allocate DMA memory for powersave\n",
3096 		    sc->sc_dev.dv_xname);
3097 		goto out;
3098 	}
3099 
3100 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
3101 	    size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
3102 	if (error != 0) {
3103 		printf("%s: can not map DMA memory for powersave\n",
3104 		    sc->sc_dev.dv_xname);
3105 		goto out;
3106 	}
3107 
3108 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
3109 	    sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
3110 	if (error != 0) {
3111 		printf("%s: can not load DMA map for powersave\n",
3112 		    sc->sc_dev.dv_xname);
3113 		goto out;
3114 	}
3115 
3116 	/*
3117 	 * fragments
3118 	 */
3119 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3120 	if (error != 0)
3121 		goto out;
3122 
3123 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3124 	if (error != 0)
3125 		goto out;
3126 
3127 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3128 	if (error != 0)
3129 		goto out;
3130 
3131 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3132 	if (error != 0)
3133 		goto out;
3134 
3135 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3136 	if (error != 0)
3137 		goto out;
3138 
3139 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3140 	if (error != 0)
3141 		goto out;
3142 
3143 out:
3144 	if (error) {
3145 		printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3146 		pgt_dma_free(sc);
3147 	}
3148 
3149 	return (error);
3150 }
3151 
3152 int
3153 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3154 {
3155 	struct pgt_desc *pd;
3156 	size_t i, qsize;
3157 	int error, nsegs;
3158 
3159 	switch (pq) {
3160 		case PGT_QUEUE_DATA_LOW_RX:
3161 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3162 			break;
3163 		case PGT_QUEUE_DATA_LOW_TX:
3164 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3165 			break;
3166 		case PGT_QUEUE_DATA_HIGH_RX:
3167 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3168 			break;
3169 		case PGT_QUEUE_DATA_HIGH_TX:
3170 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3171 			break;
3172 		case PGT_QUEUE_MGMT_RX:
3173 			qsize = PGT_QUEUE_MGMT_SIZE;
3174 			break;
3175 		case PGT_QUEUE_MGMT_TX:
3176 			qsize = PGT_QUEUE_MGMT_SIZE;
3177 			break;
3178 		default:
3179 			return (EINVAL);
3180 	}
3181 
3182 	for (i = 0; i < qsize; i++) {
3183 		pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3184 
3185 		error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
3186 		    PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
3187 		if (error != 0) {
3188 			printf("%s: can not create DMA tag for fragment\n",
3189 			    sc->sc_dev.dv_xname);
3190 			free(pd, M_DEVBUF);
3191 			break;
3192 		}
3193 
3194 		error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
3195 		    0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
3196 		if (error != 0) {
3197 			printf("%s: error alloc frag %u on queue %u\n",
3198 			    sc->sc_dev.dv_xname, i, pq);
3199 			free(pd, M_DEVBUF);
3200 			break;
3201 		}
3202 
3203 		error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
3204 		    PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
3205 		if (error != 0) {
3206 			printf("%s: error map frag %u on queue %u\n",
3207 			    sc->sc_dev.dv_xname, i, pq);
3208 			free(pd, M_DEVBUF);
3209 			break;
3210 		}
3211 
3212 		if (pgt_queue_is_rx(pq)) {
3213 			error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
3214 			    pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
3215 			if (error != 0) {
3216 				printf("%s: error load frag %u on queue %u\n",
3217 				    sc->sc_dev.dv_xname, i, pq);
3218 				bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
3219 				    nsegs);
3220 				free(pd, M_DEVBUF);
3221 				break;
3222 			}
3223 			pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3224 		}
3225 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3226 	}
3227 
3228 	return (error);
3229 }
3230 
3231 void
3232 pgt_dma_free(struct pgt_softc *sc)
3233 {
3234 	/*
3235 	 * fragments
3236 	 */
3237 	if (sc->sc_dmat != NULL) {
3238 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3239 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3240 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3241 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3242 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3243 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3244 	}
3245 
3246 	/*
3247 	 * powersave
3248 	 */
3249 	if (sc->sc_psmbuf != NULL) {
3250 		bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
3251 		bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
3252 		sc->sc_psmbuf = NULL;
3253 		sc->sc_psmdmam = NULL;
3254 	}
3255 
3256 	/*
3257 	 * control block
3258 	 */
3259 	if (sc->sc_cb != NULL) {
3260 		bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
3261 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
3262 		sc->sc_cb = NULL;
3263 		sc->sc_cbdmam = NULL;
3264 	}
3265 }
3266 
3267 void
3268 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3269 {
3270 	struct pgt_desc	*pd;
3271 
3272 	while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
3273 		pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
3274 		TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
3275 		if (pd->pd_dmam != NULL) {
3276 			bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
3277 			pd->pd_dmam = NULL;
3278 		}
3279 		bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
3280 		free(pd, M_DEVBUF);
3281 	}
3282 }
3283 
3284 int
3285 pgt_activate(struct device *self, int act)
3286 {
3287 	struct pgt_softc *sc = (struct pgt_softc *)self;
3288 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3289 
3290 	DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3291 
3292 	switch (act) {
3293 	case DVACT_SUSPEND:
3294 		if (ifp->if_flags & IFF_RUNNING) {
3295 			pgt_stop(sc, SC_NEEDS_RESET);
3296 			pgt_update_hw_from_sw(sc, 0, 0);
3297 		}
3298 		if (sc->sc_power != NULL)
3299 			(*sc->sc_power)(sc, act);
3300 		break;
3301 	case DVACT_RESUME:
3302 		workq_queue_task(NULL, &sc->sc_resume_wqt, 0,
3303 		    pgt_resume, sc, NULL);
3304 		break;
3305 	}
3306 	return 0;
3307 }
3308 
3309 void
3310 pgt_resume(void *arg1, void *arg2)
3311 {
3312 	struct pgt_softc *sc = arg1;
3313 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3314 
3315 	if (sc->sc_power != NULL)
3316 		(*sc->sc_power)(sc, DVACT_RESUME);
3317 
3318 	pgt_stop(sc, SC_NEEDS_RESET);
3319 	pgt_update_hw_from_sw(sc, 0, 0);
3320 
3321 	if (ifp->if_flags & IFF_UP) {
3322 		pgt_init(ifp);
3323 		pgt_update_hw_from_sw(sc, 0, 0);
3324 	}
3325 }
3326