xref: /openbsd-src/sys/dev/ic/pgt.c (revision be691f3bb6417f04a68938fadbcaee2d5795e764)
1 /*	$OpenBSD: pgt.c,v 1.100 2021/03/03 23:58:28 jsg Exp $  */
2 
3 /*
4  * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22  * Copyright (c) 2004 Brian Fundakowski Feldman
23  * All rights reserved.
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  * 1. Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  * 2. Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in the
32  *    documentation and/or other materials provided with the distribution.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44  * SUCH DAMAGE.
45  */
46 
47 #include "bpfilter.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/mbuf.h>
55 #include <sys/endian.h>
56 #include <sys/sockio.h>
57 #include <sys/kthread.h>
58 #include <sys/time.h>
59 #include <sys/ioctl.h>
60 #include <sys/device.h>
61 
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 
65 #include <net/if.h>
66 #include <net/if_llc.h>
67 #include <net/if_media.h>
68 
69 #if NBPFILTER > 0
70 #include <net/bpf.h>
71 #endif
72 
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 
76 #include <net80211/ieee80211_var.h>
77 #include <net80211/ieee80211_radiotap.h>
78 
79 #include <dev/ic/pgtreg.h>
80 #include <dev/ic/pgtvar.h>
81 
82 #include <dev/ic/if_wireg.h>
83 #include <dev/ic/if_wi_ieee.h>
84 #include <dev/ic/if_wivar.h>
85 
86 #ifdef PGT_DEBUG
87 #define DPRINTF(x)	do { printf x; } while (0)
88 #else
89 #define DPRINTF(x)
90 #endif
91 
92 #define	SETOID(oid, var, size) {					\
93 	if (pgt_oid_set(sc, oid, var, size) != 0)			\
94 		break;							\
95 }
96 
97 /*
98  * This is a driver for the Intersil Prism family of 802.11g network cards,
99  * based upon version 1.2 of the Linux driver and firmware found at
100  * http://www.prism54.org/.
101  */
102 
103 #define SCAN_TIMEOUT			5	/* 5 seconds */
104 
105 struct cfdriver pgt_cd = {
106         NULL, "pgt", DV_IFNET
107 };
108 
109 void	 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
110 int	 pgt_media_change(struct ifnet *ifp);
111 void	 pgt_write_memory_barrier(struct pgt_softc *);
112 uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
113 void	 pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
114 void	 pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
115 void	 pgt_debug_events(struct pgt_softc *, const char *);
116 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
117 void	 pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
118 int	 pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
119 	     struct pgt_desc *);
120 void	 pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
121 int	 pgt_load_firmware(struct pgt_softc *);
122 void	 pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
123 	     struct pgt_frag *);
124 int	 pgt_reset(struct pgt_softc *);
125 void	 pgt_stop(struct pgt_softc *, unsigned int);
126 void	 pgt_reboot(struct pgt_softc *);
127 void	 pgt_init_intr(struct pgt_softc *);
128 void	 pgt_update_intr(struct pgt_softc *, int);
129 struct mbuf
130 	*pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
131 	     struct mbuf *, struct ieee80211_node **);
132 void	 pgt_input_frames(struct pgt_softc *, struct mbuf *);
133 void	 pgt_wakeup_intr(struct pgt_softc *);
134 void	 pgt_sleep_intr(struct pgt_softc *);
135 void	 pgt_empty_traps(struct pgt_softc_kthread *);
136 void	 pgt_per_device_kthread(void *);
137 void	 pgt_async_reset(struct pgt_softc *);
138 void	 pgt_async_update(struct pgt_softc *);
139 void	 pgt_txdone(struct pgt_softc *, enum pgt_queue);
140 void	 pgt_rxdone(struct pgt_softc *, enum pgt_queue);
141 void	 pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
142 void	 pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
143 struct mbuf
144 	*pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
145 int	 pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
146 int	 pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
147 int	 pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
148 void	 pgt_state_dump(struct pgt_softc *);
149 int	 pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
150 void	 pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
151 	     struct pgt_desc *, uint16_t, int);
152 void	 pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
153 struct ieee80211_node
154 	*pgt_ieee80211_node_alloc(struct ieee80211com *);
155 void	 pgt_ieee80211_newassoc(struct ieee80211com *,
156 	     struct ieee80211_node *, int);
157 void	 pgt_ieee80211_node_free(struct ieee80211com *,
158 	    struct ieee80211_node *);
159 void	 pgt_ieee80211_node_copy(struct ieee80211com *,
160 	     struct ieee80211_node *,
161 	     const struct ieee80211_node *);
162 int	 pgt_ieee80211_send_mgmt(struct ieee80211com *,
163 	     struct ieee80211_node *, int, int, int);
164 int	 pgt_net_attach(struct pgt_softc *);
165 void	 pgt_start(struct ifnet *);
166 int	 pgt_ioctl(struct ifnet *, u_long, caddr_t);
167 void	 pgt_obj_bss2scanres(struct pgt_softc *,
168 	     struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
169 void	 node_mark_active_ap(void *, struct ieee80211_node *);
170 void	 node_mark_active_adhoc(void *, struct ieee80211_node *);
171 void	 pgt_watchdog(struct ifnet *);
172 int	 pgt_init(struct ifnet *);
173 void	 pgt_update_hw_from_sw(struct pgt_softc *, int);
174 void	 pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
175 	     struct pgt_obj_mlme *);
176 void	 pgt_update_sw_from_hw(struct pgt_softc *,
177 	     struct pgt_async_trap *, struct mbuf *);
178 int	 pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
179 int	 pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
180 int	 pgt_dma_alloc(struct pgt_softc *);
181 int	 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
182 void	 pgt_dma_free(struct pgt_softc *);
183 void	 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
184 void	 pgt_wakeup(struct pgt_softc *);
185 
186 void
187 pgt_write_memory_barrier(struct pgt_softc *sc)
188 {
189 	bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
190 	    BUS_SPACE_BARRIER_WRITE);
191 }
192 
193 u_int32_t
194 pgt_read_4(struct pgt_softc *sc, uint16_t offset)
195 {
196 	return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
197 }
198 
199 void
200 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
201 {
202 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
203 }
204 
205 /*
206  * Write out 4 bytes and cause a PCI flush by reading back in on a
207  * harmless register.
208  */
209 void
210 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
211 {
212 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
213 	(void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
214 }
215 
216 /*
217  * Print the state of events in the queues from an interrupt or a trigger.
218  */
219 void
220 pgt_debug_events(struct pgt_softc *sc, const char *when)
221 {
222 #define	COUNT(i)							\
223 	letoh32(sc->sc_cb->pcb_driver_curfrag[i]) -			\
224 	letoh32(sc->sc_cb->pcb_device_curfrag[i])
225 	if (sc->sc_debug & SC_DEBUG_EVENTS)
226 		DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
227 		    sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
228 		    COUNT(3), COUNT(4), COUNT(5)));
229 #undef COUNT
230 }
231 
232 uint32_t
233 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
234 {
235 	return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
236 	    letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
237 }
238 
239 void
240 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
241 {
242 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
243 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
244 	pd->pd_fragp->pf_flags = 0;
245 
246 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
247 	    BUS_DMASYNC_POSTWRITE);
248 }
249 
250 int
251 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
252     struct pgt_desc *pd)
253 {
254 	int error;
255 
256 	error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
257 	    PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
258 	if (error) {
259 		DPRINTF(("%s: unable to load %s tx DMA: %d\n",
260 		    sc->sc_dev.dv_xname,
261 		    pgt_queue_is_data(pq) ? "data" : "mgmt", error));
262 		return (error);
263 	}
264 	pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
265 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
266 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
267 	pd->pd_fragp->pf_flags = htole16(0);
268 
269 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
270 	    BUS_DMASYNC_POSTWRITE);
271 
272 	return (0);
273 }
274 
275 void
276 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
277 {
278         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
279 	pd->pd_dmaaddr = 0;
280 }
281 
282 int
283 pgt_load_firmware(struct pgt_softc *sc)
284 {
285 	int error, reg, dirreg, fwoff, ucodeoff, fwlen;
286 	uint8_t *ucode;
287 	uint32_t *uc;
288 	size_t size;
289 	char *name;
290 
291 	if (sc->sc_flags & SC_ISL3877)
292 		name = "pgt-isl3877";
293 	else
294 		name = "pgt-isl3890";	/* includes isl3880 */
295 
296 	error = loadfirmware(name, &ucode, &size);
297 
298 	if (error != 0) {
299 		DPRINTF(("%s: error %d, could not read firmware %s\n",
300 		    sc->sc_dev.dv_xname, error, name));
301 		return (EIO);
302 	}
303 
304 	if (size & 3) {
305 		DPRINTF(("%s: bad firmware size %u\n",
306 		    sc->sc_dev.dv_xname, size));
307 		free(ucode, M_DEVBUF, 0);
308 		return (EINVAL);
309 	}
310 
311 	pgt_reboot(sc);
312 
313 	fwoff = 0;
314 	ucodeoff = 0;
315 	uc = (uint32_t *)ucode;
316 	reg = PGT_FIRMWARE_INTERNAL_OFFSET;
317 	while (fwoff < size) {
318 		pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
319 
320 		if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
321 			fwlen = PGT_DIRECT_MEMORY_SIZE;
322 		else
323 			fwlen = size - fwoff;
324 
325 		dirreg = PGT_DIRECT_MEMORY_OFFSET;
326 		while (fwlen > 4) {
327 			pgt_write_4(sc, dirreg, uc[ucodeoff]);
328 			fwoff += 4;
329 			dirreg += 4;
330 			reg += 4;
331 			fwlen -= 4;
332 			ucodeoff++;
333 		}
334 		pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
335 		fwoff += 4;
336 		dirreg += 4;
337 		reg += 4;
338 		fwlen -= 4;
339 		ucodeoff++;
340 	}
341 	DPRINTF(("%s: %d bytes microcode loaded from %s\n",
342 	    sc->sc_dev.dv_xname, fwoff, name));
343 
344 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
345 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
346 	reg |= PGT_CTRL_STAT_RAMBOOT;
347 	pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
348 	pgt_write_memory_barrier(sc);
349 	DELAY(PGT_WRITEIO_DELAY);
350 
351 	reg |= PGT_CTRL_STAT_RESET;
352 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
353 	pgt_write_memory_barrier(sc);
354 	DELAY(PGT_WRITEIO_DELAY);
355 
356 	reg &= ~PGT_CTRL_STAT_RESET;
357 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
358 	pgt_write_memory_barrier(sc);
359 	DELAY(PGT_WRITEIO_DELAY);
360 
361 	free(ucode, M_DEVBUF, 0);
362 
363 	return (0);
364 }
365 
366 void
367 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
368     struct pgt_frag *pqfrags)
369 {
370 	struct pgt_desc *pd;
371 	unsigned int i;
372 
373 	sc->sc_cb->pcb_device_curfrag[pq] = 0;
374 	i = 0;
375 	/* XXX why only freeq ??? */
376 	TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
377 		pd->pd_fragnum = i;
378 		pd->pd_fragp = &pqfrags[i];
379 		if (pgt_queue_is_rx(pq))
380 			pgt_reinit_rx_desc_frag(sc, pd);
381 		i++;
382 	}
383 	sc->sc_freeq_count[pq] = i;
384 	/*
385 	 * The ring buffer describes how many free buffers are available from
386 	 * the host (for receive queues) or how many are pending (for
387 	 * transmit queues).
388 	 */
389 	if (pgt_queue_is_rx(pq))
390 		sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
391 	else
392 		sc->sc_cb->pcb_driver_curfrag[pq] = 0;
393 }
394 
395 /*
396  * Turn off interrupts, reset the device (possibly loading firmware),
397  * and put everything in a known state.
398  */
399 int
400 pgt_reset(struct pgt_softc *sc)
401 {
402 	int error;
403 
404 	/* disable all interrupts */
405 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
406 	DELAY(PGT_WRITEIO_DELAY);
407 
408 	/*
409 	 * Set up the management receive queue, assuming there are no
410 	 * requests in progress.
411 	 */
412 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
413 	    sc->sc_cbdmam->dm_mapsize,
414 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
415 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
416 	    &sc->sc_cb->pcb_data_low_rx[0]);
417 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
418 	    &sc->sc_cb->pcb_data_low_tx[0]);
419 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
420 	    &sc->sc_cb->pcb_data_high_rx[0]);
421 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
422 	    &sc->sc_cb->pcb_data_high_tx[0]);
423 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
424 	    &sc->sc_cb->pcb_mgmt_rx[0]);
425 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
426 	    &sc->sc_cb->pcb_mgmt_tx[0]);
427 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
428 	    sc->sc_cbdmam->dm_mapsize,
429 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
430 
431 	/* load firmware */
432 	if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
433 		error = pgt_load_firmware(sc);
434 		if (error) {
435 			printf("%s: firmware load failed\n",
436 			    sc->sc_dev.dv_xname);
437 			return (error);
438 		}
439 		sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
440 		DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
441 	}
442 
443 	/* upload the control block's DMA address */
444 	pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
445 	    htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
446 	DELAY(PGT_WRITEIO_DELAY);
447 
448 	/* send a reset event */
449 	pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
450 	DELAY(PGT_WRITEIO_DELAY);
451 
452 	/* await only the initialization interrupt */
453 	pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);
454 	DELAY(PGT_WRITEIO_DELAY);
455 
456 	return (0);
457 }
458 
459 /*
460  * If we're trying to reset and the device has seemingly not been detached,
461  * we'll spend a minute seeing if we can't do the reset.
462  */
463 void
464 pgt_stop(struct pgt_softc *sc, unsigned int flag)
465 {
466 	struct ieee80211com *ic;
467 	unsigned int wokeup;
468 	int tryagain = 0;
469 
470 	ic = &sc->sc_ic;
471 
472 	ic->ic_if.if_flags &= ~IFF_RUNNING;
473 	sc->sc_flags |= SC_UNINITIALIZED;
474 	sc->sc_flags |= flag;
475 
476 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
477 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
478 	pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
479 
480 trying_again:
481 	/* disable all interrupts */
482 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
483 	DELAY(PGT_WRITEIO_DELAY);
484 
485 	/* reboot card */
486 	pgt_reboot(sc);
487 
488 	do {
489 		wokeup = 0;
490 		/*
491 		 * We don't expect to be woken up, just to drop the lock
492 		 * and time out.  Only tx queues can have anything valid
493 		 * on them outside of an interrupt.
494 		 */
495 		while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
496 			struct pgt_mgmt_desc *pmd;
497 
498 			pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
499 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
500 			pmd->pmd_error = ENETRESET;
501 			wakeup_one(pmd);
502 			if (sc->sc_debug & SC_DEBUG_MGMT)
503 				DPRINTF(("%s: queue: mgmt %p <- %#x "
504 				    "(drained)\n", sc->sc_dev.dv_xname,
505 				    pmd, pmd->pmd_oid));
506 			wokeup++;
507 		}
508 		if (wokeup > 0) {
509 			if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
510 				sc->sc_flags &= ~flag;
511 				return;
512 			}
513 		}
514 	} while (wokeup > 0);
515 
516 	if (flag == SC_NEEDS_RESET) {
517 		int error;
518 
519 		DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
520 		sc->sc_flags &= ~SC_POWERSAVE;
521 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
522 		error = pgt_reset(sc);
523 		if (error == 0) {
524 			tsleep_nsec(&sc->sc_flags, 0, "pgtres", SEC_TO_NSEC(1));
525 			if (sc->sc_flags & SC_UNINITIALIZED) {
526 				printf("%s: not responding\n",
527 				    sc->sc_dev.dv_xname);
528 				/* Thud.  It was probably removed. */
529 				if (tryagain)
530 					panic("pgt went for lunch"); /* XXX */
531 				tryagain = 1;
532 			} else {
533 				/* await all interrupts */
534 				pgt_write_4_flush(sc, PGT_REG_INT_EN,
535 				    PGT_INT_STAT_SOURCES);
536 				DELAY(PGT_WRITEIO_DELAY);
537 				ic->ic_if.if_flags |= IFF_RUNNING;
538 			}
539 		}
540 
541 		if (tryagain)
542 			goto trying_again;
543 
544 		sc->sc_flags &= ~flag;
545 		if (ic->ic_if.if_flags & IFF_RUNNING)
546 			pgt_update_hw_from_sw(sc,
547 			    ic->ic_state != IEEE80211_S_INIT);
548 	}
549 
550 	ic->ic_if.if_flags &= ~IFF_RUNNING;
551 	ifq_clr_oactive(&ic->ic_if.if_snd);
552 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
553 }
554 
555 void
556 pgt_attach(struct device *self)
557 {
558 	struct pgt_softc *sc = (struct pgt_softc *)self;
559 	int error;
560 
561 	/* debug flags */
562 	//sc->sc_debug |= SC_DEBUG_QUEUES;	/* super verbose */
563 	//sc->sc_debug |= SC_DEBUG_MGMT;
564 	sc->sc_debug |= SC_DEBUG_UNEXPECTED;
565 	//sc->sc_debug |= SC_DEBUG_TRIGGER;	/* verbose */
566 	//sc->sc_debug |= SC_DEBUG_EVENTS;	/* super verbose */
567 	//sc->sc_debug |= SC_DEBUG_POWER;
568 	sc->sc_debug |= SC_DEBUG_TRAP;
569 	sc->sc_debug |= SC_DEBUG_LINK;
570 	//sc->sc_debug |= SC_DEBUG_RXANNEX;
571 	//sc->sc_debug |= SC_DEBUG_RXFRAG;
572 	//sc->sc_debug |= SC_DEBUG_RXETHER;
573 
574 	/* enable card if possible */
575 	if (sc->sc_enable != NULL)
576 		(*sc->sc_enable)(sc);
577 
578 	error = pgt_dma_alloc(sc);
579 	if (error)
580 		return;
581 
582 	sc->sc_ic.ic_if.if_softc = sc;
583 	TAILQ_INIT(&sc->sc_mgmtinprog);
584 	TAILQ_INIT(&sc->sc_kthread.sck_traps);
585 	sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
586 	sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
587 
588 	error = pgt_reset(sc);
589 	if (error)
590 		return;
591 
592 	tsleep_nsec(&sc->sc_flags, 0, "pgtres", SEC_TO_NSEC(1));
593 	if (sc->sc_flags & SC_UNINITIALIZED) {
594 		printf("%s: not responding\n", sc->sc_dev.dv_xname);
595 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
596 		return;
597 	} else {
598 		/* await all interrupts */
599 		pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
600 		DELAY(PGT_WRITEIO_DELAY);
601 	}
602 
603 	error = pgt_net_attach(sc);
604 	if (error)
605 		return;
606 
607 	if (kthread_create(pgt_per_device_kthread, sc, NULL,
608 	    sc->sc_dev.dv_xname) != 0)
609 		return;
610 
611 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
612 }
613 
614 int
615 pgt_detach(struct pgt_softc *sc)
616 {
617 	if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
618 		/* device was not initialized correctly, so leave early */
619 		goto out;
620 
621 	/* stop card */
622 	pgt_stop(sc, SC_DYING);
623 	pgt_reboot(sc);
624 
625 	ieee80211_ifdetach(&sc->sc_ic.ic_if);
626 	if_detach(&sc->sc_ic.ic_if);
627 
628 out:
629 	/* disable card if possible */
630 	if (sc->sc_disable != NULL)
631 		(*sc->sc_disable)(sc);
632 
633 	pgt_dma_free(sc);
634 
635 	return (0);
636 }
637 
638 void
639 pgt_reboot(struct pgt_softc *sc)
640 {
641 	uint32_t reg;
642 
643 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
644 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
645 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
646 	pgt_write_memory_barrier(sc);
647 	DELAY(PGT_WRITEIO_DELAY);
648 
649 	reg |= PGT_CTRL_STAT_RESET;
650 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
651 	pgt_write_memory_barrier(sc);
652 	DELAY(PGT_WRITEIO_DELAY);
653 
654 	reg &= ~PGT_CTRL_STAT_RESET;
655 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
656 	pgt_write_memory_barrier(sc);
657 	DELAY(PGT_RESET_DELAY);
658 }
659 
660 void
661 pgt_init_intr(struct pgt_softc *sc)
662 {
663 	if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
664 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
665 			DPRINTF(("%s: spurious initialization\n",
666 			    sc->sc_dev.dv_xname));
667 	} else {
668 		sc->sc_flags &= ~SC_UNINITIALIZED;
669 		wakeup(&sc->sc_flags);
670 	}
671 }
672 
673 /*
674  * If called with a NULL last_nextpkt, only the mgmt queue will be checked
675  * for new packets.
676  */
677 void
678 pgt_update_intr(struct pgt_softc *sc, int hack)
679 {
680 	/* priority order */
681 	enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
682 	    PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
683 	    PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
684 	    PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
685 	};
686 	struct mbuf *m;
687 	uint32_t npend;
688 	unsigned int dirtycount;
689 	int i;
690 
691 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
692 	    sc->sc_cbdmam->dm_mapsize,
693 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
694 	pgt_debug_events(sc, "intr");
695 	/*
696 	 * Check for completion of tx in their dirty queues.
697 	 * Check completion of rx into their dirty queues.
698 	 */
699 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
700 		size_t qdirty, qfree;
701 
702 		qdirty = sc->sc_dirtyq_count[pqs[i]];
703 		qfree = sc->sc_freeq_count[pqs[i]];
704 		/*
705 		 * We want the wrap-around here.
706 		 */
707 		if (pgt_queue_is_rx(pqs[i])) {
708 			int data;
709 
710 			data = pgt_queue_is_data(pqs[i]);
711 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
712 			if (hack && data)
713 				continue;
714 #endif
715 			npend = pgt_queue_frags_pending(sc, pqs[i]);
716 			/*
717 			 * Receive queues clean up below, so qdirty must
718 			 * always be 0.
719 			 */
720 			if (npend > qfree) {
721 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
722 					DPRINTF(("%s: rx queue [%u] "
723 					    "overflowed by %u\n",
724 					    sc->sc_dev.dv_xname, pqs[i],
725 					    npend - qfree));
726 				sc->sc_flags |= SC_INTR_RESET;
727 				break;
728 			}
729 			while (qfree-- > npend)
730 				pgt_rxdone(sc, pqs[i]);
731 		} else {
732 			npend = pgt_queue_frags_pending(sc, pqs[i]);
733 			if (npend > qdirty) {
734 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
735 					DPRINTF(("%s: tx queue [%u] "
736 					    "underflowed by %u\n",
737 					    sc->sc_dev.dv_xname, pqs[i],
738 					    npend - qdirty));
739 				sc->sc_flags |= SC_INTR_RESET;
740 				break;
741 			}
742 			/*
743 			 * If the free queue was empty, or the data transmit
744 			 * queue just became empty, wake up any waiters.
745 			 */
746 			if (qdirty > npend) {
747 				if (pgt_queue_is_data(pqs[i])) {
748 					sc->sc_ic.ic_if.if_timer = 0;
749 					ifq_clr_oactive(
750 					    &sc->sc_ic.ic_if.if_snd);
751 				}
752 				while (qdirty-- > npend)
753 					pgt_txdone(sc, pqs[i]);
754 			}
755 		}
756 	}
757 
758 	/*
759 	 * This is the deferred completion for received management frames
760 	 * and where we queue network frames for stack input.
761 	 */
762 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
763 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
764 		struct pgt_mgmt_desc *pmd;
765 
766 		pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
767 		/*
768 		 * If there is no mgmt request in progress or the operation
769 		 * returned is explicitly a trap, this pmd will essentially
770 		 * be ignored.
771 		 */
772 		pgt_mgmtrx_completion(sc, pmd);
773 	}
774 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
775 	    htole32(dirtycount +
776 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
777 
778 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
779 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
780 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
781 			pgt_input_frames(sc, m);
782 	}
783 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
784 	    htole32(dirtycount +
785 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
786 
787 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
788 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
789 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
790 			pgt_input_frames(sc, m);
791 	}
792 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
793 	    htole32(dirtycount +
794 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
795 
796 	/*
797 	 * Write out what we've finished with.
798 	 */
799 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
800 	    sc->sc_cbdmam->dm_mapsize,
801 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
802 }
803 
804 struct mbuf *
805 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
806     struct mbuf *m, struct ieee80211_node **ni)
807 {
808 	struct ieee80211com *ic;
809 	struct ieee80211_frame *frame;
810 	struct llc *snap;
811 
812 	ic = &sc->sc_ic;
813 	if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
814 		*ni = ieee80211_ref_node(ic->ic_bss);
815 		(*ni)->ni_inact = 0;
816 		return (m);
817 	}
818 
819 	M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
820 	if (m == NULL)
821 		return (m);
822 	if (m->m_len < sizeof(*frame) + sizeof(*snap)) {
823 		m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
824 		if (m == NULL)
825 			return (m);
826 	}
827 	frame = mtod(m, struct ieee80211_frame *);
828 	snap = (struct llc *)&frame[1];
829 	if (ni != NULL) {
830 		if (ic->ic_opmode == IEEE80211_M_STA) {
831 			*ni = ieee80211_ref_node(ic->ic_bss);
832 		}
833 #ifndef IEEE80211_STA_ONLY
834 		else {
835 			*ni = ieee80211_find_node(ic, eh->ether_shost);
836 			/*
837 			 * Make up associations for ad-hoc mode.  To support
838 			 * ad-hoc WPA, we'll need to maintain a bounded
839 			 * pool of ad-hoc stations.
840 			 */
841 			if (*ni == NULL &&
842 			    ic->ic_opmode != IEEE80211_M_HOSTAP) {
843 				*ni = ieee80211_dup_bss(ic, eh->ether_shost);
844 				if (*ni != NULL) {
845 					(*ni)->ni_associd = 1;
846 					ic->ic_newassoc(ic, *ni, 1);
847 				}
848 			}
849 			if (*ni == NULL) {
850 				m_freem(m);
851 				return (NULL);
852 			}
853 		}
854 #endif
855 		(*ni)->ni_inact = 0;
856 	}
857 	snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
858 	snap->llc_control = LLC_UI;
859 	snap->llc_snap.org_code[0] = 0;
860 	snap->llc_snap.org_code[1] = 0;
861 	snap->llc_snap.org_code[2] = 0;
862 	snap->llc_snap.ether_type = eh->ether_type;
863 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
864 	/* Doesn't look like much of the 802.11 header is available. */
865 	*(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
866 	/*
867 	 * Translate the addresses; WDS is not handled.
868 	 */
869 	switch (ic->ic_opmode) {
870 	case IEEE80211_M_STA:
871 		frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
872 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
873 		IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
874 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
875 		break;
876 #ifndef IEEE80211_STA_ONLY
877 	case IEEE80211_M_IBSS:
878 	case IEEE80211_M_AHDEMO:
879 		frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
880 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
881 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
882 		IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
883 		break;
884 	case IEEE80211_M_HOSTAP:
885 		/* HostAP forwarding defaults to being done on firmware. */
886 		frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
887 		IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
888 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
889 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
890 		break;
891 #endif
892 	default:
893 		break;
894 	}
895 	return (m);
896 }
897 
898 void
899 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
900 {
901 	struct ether_header eh;
902 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
903 	struct ifnet *ifp;
904 	struct ieee80211_channel *chan;
905 	struct ieee80211_rxinfo rxi;
906 	struct ieee80211_node *ni;
907 	struct ieee80211com *ic;
908 	struct pgt_rx_annex *pra;
909 	struct pgt_rx_header *pha;
910 	struct mbuf *next;
911 	unsigned int n;
912 	uint32_t rstamp;
913 	uint8_t rssi;
914 
915 	ic = &sc->sc_ic;
916 	ifp = &ic->ic_if;
917 	for (next = m; m != NULL; m = next) {
918 		next = m->m_nextpkt;
919 		m->m_nextpkt = NULL;
920 
921 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
922 			if (m->m_len < sizeof(*pha)) {
923 				m = m_pullup(m, sizeof(*pha));
924 				if (m == NULL) {
925 					if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
926 						DPRINTF(("%s: m_pullup "
927 						    "failure\n",
928 						    sc->sc_dev.dv_xname));
929 					ifp->if_ierrors++;
930 					continue;
931 				}
932 			}
933 			pha = mtod(m, struct pgt_rx_header *);
934 			pra = NULL;
935 			goto input;
936 		}
937 
938 		if (m->m_len < sizeof(*pra)) {
939 			m = m_pullup(m, sizeof(*pra));
940 			if (m == NULL) {
941 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
942 					DPRINTF(("%s: m_pullup failure\n",
943 					    sc->sc_dev.dv_xname));
944 				ifp->if_ierrors++;
945 				continue;
946 			}
947 		}
948 		pra = mtod(m, struct pgt_rx_annex *);
949 		pha = &pra->pra_header;
950 		if (sc->sc_debug & SC_DEBUG_RXANNEX)
951 			DPRINTF(("%s: rx annex: ? %04x "
952 			    "len %u clock %u flags %02x ? %02x rate %u ? %02x "
953 			    "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
954 			    sc->sc_dev.dv_xname,
955 			    letoh16(pha->pra_unknown0),
956 			    letoh16(pha->pra_length),
957 			    letoh32(pha->pra_clock), pha->pra_flags,
958 			    pha->pra_unknown1, pha->pra_rate,
959 			    pha->pra_unknown2, letoh32(pha->pra_frequency),
960 			    pha->pra_unknown3, pha->pra_rssi,
961 			    pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
962 		if (sc->sc_debug & SC_DEBUG_RXETHER)
963 			DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
964 			    sc->sc_dev.dv_xname,
965 			    ether_sprintf(pra->pra_ether_dhost),
966 			    ether_sprintf(pra->pra_ether_shost),
967 			    ntohs(pra->pra_ether_type)));
968 
969 		memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
970 		memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
971 		eh.ether_type = pra->pra_ether_type;
972 
973 input:
974 		/*
975 		 * This flag is set if e.g. packet could not be decrypted.
976 		 */
977 		if (pha->pra_flags & PRA_FLAG_BAD) {
978 			ifp->if_ierrors++;
979 			m_freem(m);
980 			continue;
981 		}
982 
983 		/*
984 		 * After getting what we want, chop off the annex, then
985 		 * turn into something that looks like it really was
986 		 * 802.11.
987 		 */
988 		rssi = pha->pra_rssi;
989 		rstamp = letoh32(pha->pra_clock);
990 		n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
991 		if (n <= IEEE80211_CHAN_MAX)
992 			chan = &ic->ic_channels[n];
993 		else
994 			chan = ic->ic_bss->ni_chan;
995 		/* Send to 802.3 listeners. */
996 		if (pra) {
997 			m_adj(m, sizeof(*pra));
998 		} else
999 			m_adj(m, sizeof(*pha));
1000 
1001 		m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1002 		if (m != NULL) {
1003 #if NBPFILTER > 0
1004 			if (sc->sc_drvbpf != NULL) {
1005 				struct mbuf mb;
1006 				struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
1007 
1008 				tap->wr_flags = 0;
1009 				tap->wr_chan_freq = htole16(chan->ic_freq);
1010 				tap->wr_chan_flags = htole16(chan->ic_flags);
1011 				tap->wr_rssi = rssi;
1012 				tap->wr_max_rssi = ic->ic_max_rssi;
1013 
1014 				mb.m_data = (caddr_t)tap;
1015 				mb.m_len = sc->sc_rxtap_len;
1016 				mb.m_next = m;
1017 				mb.m_nextpkt = NULL;
1018 				mb.m_type = 0;
1019 				mb.m_flags = 0;
1020 				bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
1021 			}
1022 #endif
1023 			rxi.rxi_flags = 0;
1024 			ni->ni_rssi = rxi.rxi_rssi = rssi;
1025 			ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1026 			ieee80211_inputm(ifp, m, ni, &rxi, &ml);
1027 			/*
1028 			 * The frame may have caused the node to be marked for
1029 			 * reclamation (e.g. in response to a DEAUTH message)
1030 			 * so use free_node here instead of unref_node.
1031 			 */
1032 			if (ni == ic->ic_bss)
1033 				ieee80211_unref_node(&ni);
1034 			else
1035 				ieee80211_release_node(&sc->sc_ic, ni);
1036 		} else {
1037 			ifp->if_ierrors++;
1038 		}
1039 	}
1040 	if_input(ifp, &ml);
1041 }
1042 
1043 void
1044 pgt_wakeup_intr(struct pgt_softc *sc)
1045 {
1046 	int shouldupdate;
1047 	int i;
1048 
1049 	shouldupdate = 0;
1050 	/* Check for any queues being empty before updating. */
1051 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1052 	    sc->sc_cbdmam->dm_mapsize,
1053 	    BUS_DMASYNC_POSTREAD);
1054 	for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
1055 		if (pgt_queue_is_tx(i))
1056 			shouldupdate = pgt_queue_frags_pending(sc, i);
1057 		else
1058 			shouldupdate = pgt_queue_frags_pending(sc, i) <
1059 			    sc->sc_freeq_count[i];
1060 	}
1061 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1062 		shouldupdate = 1;
1063 	if (sc->sc_debug & SC_DEBUG_POWER)
1064 		DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1065 		    sc->sc_dev.dv_xname, shouldupdate));
1066 	sc->sc_flags &= ~SC_POWERSAVE;
1067 	if (shouldupdate) {
1068 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1069 		DELAY(PGT_WRITEIO_DELAY);
1070 	}
1071 }
1072 
1073 void
1074 pgt_sleep_intr(struct pgt_softc *sc)
1075 {
1076 	int allowed;
1077 	int i;
1078 
1079 	allowed = 1;
1080 	/* Check for any queues not being empty before allowing. */
1081 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1082 	    sc->sc_cbdmam->dm_mapsize,
1083 	    BUS_DMASYNC_POSTREAD);
1084 	for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
1085 		if (pgt_queue_is_tx(i))
1086 			allowed = pgt_queue_frags_pending(sc, i) == 0;
1087 		else
1088 			allowed = pgt_queue_frags_pending(sc, i) >=
1089 			    sc->sc_freeq_count[i];
1090 	}
1091 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1092 		allowed = 0;
1093 	if (sc->sc_debug & SC_DEBUG_POWER)
1094 		DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1095 		    sc->sc_dev.dv_xname, allowed));
1096 	if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
1097 		sc->sc_flags |= SC_POWERSAVE;
1098 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
1099 		DELAY(PGT_WRITEIO_DELAY);
1100 	}
1101 }
1102 
1103 void
1104 pgt_empty_traps(struct pgt_softc_kthread *sck)
1105 {
1106 	struct pgt_async_trap *pa;
1107 	struct mbuf *m;
1108 
1109 	while (!TAILQ_EMPTY(&sck->sck_traps)) {
1110 		pa = TAILQ_FIRST(&sck->sck_traps);
1111 		TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1112 		m = pa->pa_mbuf;
1113 		m_freem(m);
1114 	}
1115 }
1116 
1117 void
1118 pgt_per_device_kthread(void *argp)
1119 {
1120 	struct pgt_softc *sc;
1121 	struct pgt_softc_kthread *sck;
1122 	struct pgt_async_trap *pa;
1123 	struct mbuf *m;
1124 	int s;
1125 
1126 	sc = argp;
1127 	sck = &sc->sc_kthread;
1128 	while (!sck->sck_exit) {
1129 		if (!sck->sck_update && !sck->sck_reset &&
1130 		    TAILQ_EMPTY(&sck->sck_traps))
1131 			tsleep_nsec(&sc->sc_kthread, 0, "pgtkth", INFSLP);
1132 		if (sck->sck_reset) {
1133 			DPRINTF(("%s: [thread] async reset\n",
1134 			    sc->sc_dev.dv_xname));
1135 			sck->sck_reset = 0;
1136 			sck->sck_update = 0;
1137 			pgt_empty_traps(sck);
1138 			s = splnet();
1139 			pgt_stop(sc, SC_NEEDS_RESET);
1140 			splx(s);
1141 		} else if (!TAILQ_EMPTY(&sck->sck_traps)) {
1142 			DPRINTF(("%s: [thread] got a trap\n",
1143 			    sc->sc_dev.dv_xname));
1144 			pa = TAILQ_FIRST(&sck->sck_traps);
1145 			TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1146 			m = pa->pa_mbuf;
1147 			m_adj(m, sizeof(*pa));
1148 			pgt_update_sw_from_hw(sc, pa, m);
1149 			m_freem(m);
1150 		} else if (sck->sck_update) {
1151 			sck->sck_update = 0;
1152 			pgt_update_sw_from_hw(sc, NULL, NULL);
1153 		}
1154 	}
1155 	pgt_empty_traps(sck);
1156 	kthread_exit(0);
1157 }
1158 
1159 void
1160 pgt_async_reset(struct pgt_softc *sc)
1161 {
1162 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1163 		return;
1164 	sc->sc_kthread.sck_reset = 1;
1165 	wakeup(&sc->sc_kthread);
1166 }
1167 
1168 void
1169 pgt_async_update(struct pgt_softc *sc)
1170 {
1171 	if (sc->sc_flags & SC_DYING)
1172 		return;
1173 	sc->sc_kthread.sck_update = 1;
1174 	wakeup(&sc->sc_kthread);
1175 }
1176 
1177 int
1178 pgt_intr(void *arg)
1179 {
1180 	struct pgt_softc *sc;
1181 	struct ifnet *ifp;
1182 	u_int32_t reg;
1183 
1184 	sc = arg;
1185 	ifp = &sc->sc_ic.ic_if;
1186 
1187 	/*
1188 	 * Here the Linux driver ands in the value of the INT_EN register,
1189 	 * and masks off everything but the documented interrupt bits.  Why?
1190 	 *
1191 	 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1192 	 * other times.
1193 	 */
1194 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
1195 	    sc->sc_flags & SC_POWERSAVE) {
1196 		/*
1197 		 * Don't try handling the interrupt in sleep mode.
1198 		 */
1199 		reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1200 		if (reg & PGT_CTRL_STAT_SLEEPMODE)
1201 			return (0);
1202 	}
1203 	reg = pgt_read_4(sc, PGT_REG_INT_STAT);
1204 	if (reg == 0)
1205 		return (0); /* This interrupt is not from us */
1206 
1207 	pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
1208 	if (reg & PGT_INT_STAT_INIT)
1209 		pgt_init_intr(sc);
1210 	if (reg & PGT_INT_STAT_UPDATE) {
1211 		pgt_update_intr(sc, 0);
1212 		/*
1213 		 * If we got an update, it's not really asleep.
1214 		 */
1215 		sc->sc_flags &= ~SC_POWERSAVE;
1216 		/*
1217 		 * Pretend I have any idea what the documentation
1218 		 * would say, and just give it a shot sending an
1219 		 * "update" after acknowledging the interrupt
1220 		 * bits and writing out the new control block.
1221 		 */
1222 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1223 		DELAY(PGT_WRITEIO_DELAY);
1224 	}
1225 	if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
1226 		pgt_sleep_intr(sc);
1227 	if (reg & PGT_INT_STAT_WAKEUP)
1228 		pgt_wakeup_intr(sc);
1229 
1230 	if (sc->sc_flags & SC_INTR_RESET) {
1231 		sc->sc_flags &= ~SC_INTR_RESET;
1232 		pgt_async_reset(sc);
1233 	}
1234 
1235 	if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
1236 		DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1237 		    sc->sc_dev.dv_xname,
1238 		    reg & ~PGT_INT_STAT_SOURCES,
1239 		    pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1240 	}
1241 
1242 	if (!ifq_empty(&ifp->if_snd))
1243 		pgt_start(ifp);
1244 
1245 	return (1);
1246 }
1247 
1248 void
1249 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1250 {
1251 	struct pgt_desc *pd;
1252 
1253 	pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
1254 	TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1255 	sc->sc_dirtyq_count[pq]--;
1256 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1257 	sc->sc_freeq_count[pq]++;
1258 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1259 	    pd->pd_dmam->dm_mapsize,
1260 	    BUS_DMASYNC_POSTREAD);
1261 	/* Management frames want completion information. */
1262 	if (sc->sc_debug & SC_DEBUG_QUEUES) {
1263 		DPRINTF(("%s: queue: tx %u <- [%u]\n",
1264 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1265 		if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
1266 			struct pgt_mgmt_frame *pmf;
1267 
1268 			pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1269 			DPRINTF(("%s: queue: txmgmt %p <- "
1270 			    "(ver %u, op %u, flags %#x)\n",
1271 			    sc->sc_dev.dv_xname,
1272 			    pd, pmf->pmf_version, pmf->pmf_operation,
1273 			    pmf->pmf_flags));
1274 		}
1275 	}
1276 	pgt_unload_tx_desc_frag(sc, pd);
1277 }
1278 
1279 void
1280 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1281 {
1282 	struct pgt_desc *pd;
1283 
1284 	pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
1285 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1286 	sc->sc_freeq_count[pq]--;
1287 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1288 	sc->sc_dirtyq_count[pq]++;
1289 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1290 	    pd->pd_dmam->dm_mapsize,
1291 	    BUS_DMASYNC_POSTREAD);
1292 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1293 		DPRINTF(("%s: queue: rx %u <- [%u]\n",
1294 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1295 	if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
1296 	    pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
1297 		DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1298 		    sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1299 }
1300 
1301 /*
1302  * Traps are generally used for the firmware to report changes in state
1303  * back to the host.  Mostly this processes changes in link state, but
1304  * it needs to also be used to initiate WPA and other authentication
1305  * schemes in terms of client (station) or server (access point).
1306  */
1307 void
1308 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1309     size_t size)
1310 {
1311 	struct pgt_async_trap *pa;
1312 	struct mbuf *m;
1313 	char *p;
1314 	size_t total;
1315 
1316 	if (sc->sc_flags & SC_DYING)
1317 		return;
1318 
1319 	total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1320 	if (total > MLEN) {
1321 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1322 		if (m == NULL)
1323 			return;
1324 		MCLGET(m, M_DONTWAIT);
1325 		if (!(m->m_flags & M_EXT)) {
1326 			m_freem(m);
1327 			m = NULL;
1328 		}
1329 	} else
1330 		m = m_get(M_DONTWAIT, MT_DATA);
1331 
1332 	if (m == NULL)
1333 		return;
1334 	else
1335 		m->m_len = total;
1336 
1337 	pa = mtod(m, struct pgt_async_trap *);
1338 	p = mtod(m, char *) + sizeof(*pa);
1339 	*(uint32_t *)p = oid;
1340 	p += sizeof(uint32_t);
1341 	memcpy(p, trapdata, size);
1342 	pa->pa_mbuf = m;
1343 
1344 	TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
1345 	wakeup(&sc->sc_kthread);
1346 }
1347 
1348 /*
1349  * Process a completed management response (all requests should be
1350  * responded to, quickly) or an event (trap).
1351  */
1352 void
1353 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1354 {
1355 	struct pgt_desc *pd;
1356 	struct pgt_mgmt_frame *pmf;
1357 	uint32_t oid, size;
1358 
1359 	pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
1360 	TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
1361 	sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1362 	TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
1363 	    pd, pd_link);
1364 	sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1365 	if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
1366 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1367 			DPRINTF(("%s: mgmt desc too small: %u\n",
1368 			    sc->sc_dev.dv_xname,
1369 			    letoh16(pd->pd_fragp->pf_size)));
1370 		goto out_nopmd;
1371 	}
1372 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1373 	if (pmf->pmf_version != PMF_VER) {
1374 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1375 			DPRINTF(("%s: unknown mgmt version %u\n",
1376 			    sc->sc_dev.dv_xname, pmf->pmf_version));
1377 		goto out_nopmd;
1378 	}
1379 	if (pmf->pmf_device != PMF_DEV) {
1380 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1381 			DPRINTF(("%s: unknown mgmt dev %u\n",
1382 			    sc->sc_dev.dv_xname, pmf->pmf_device));
1383 		goto out;
1384 	}
1385 	if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
1386 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1387 			DPRINTF(("%s: unknown mgmt flags %x\n",
1388 			    sc->sc_dev.dv_xname,
1389 			    pmf->pmf_flags & ~PMF_FLAG_VALID));
1390 		goto out;
1391 	}
1392 	if (pmf->pmf_flags & PMF_FLAG_LE) {
1393 		oid = letoh32(pmf->pmf_oid);
1394 		size = letoh32(pmf->pmf_size);
1395 	} else {
1396 		oid = betoh32(pmf->pmf_oid);
1397 		size = betoh32(pmf->pmf_size);
1398 	}
1399 	if (pmf->pmf_operation == PMF_OP_TRAP) {
1400 		pmd = NULL; /* ignored */
1401 		DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1402 		    sc->sc_dev.dv_xname,
1403 		    pmf->pmf_operation, oid, size));
1404 		pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1405 		    min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
1406 		goto out_nopmd;
1407 	}
1408 	if (pmd == NULL) {
1409 		if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
1410 			DPRINTF(("%s: spurious mgmt received "
1411 			    "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1412 			    pmf->pmf_operation, oid, size));
1413 		goto out_nopmd;
1414 	}
1415 	switch (pmf->pmf_operation) {
1416 	case PMF_OP_RESPONSE:
1417 		pmd->pmd_error = 0;
1418 		break;
1419 	case PMF_OP_ERROR:
1420 		pmd->pmd_error = EPERM;
1421 		goto out;
1422 	default:
1423 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1424 			DPRINTF(("%s: unknown mgmt op %u\n",
1425 			    sc->sc_dev.dv_xname, pmf->pmf_operation));
1426 		pmd->pmd_error = EIO;
1427 		goto out;
1428 	}
1429 	if (oid != pmd->pmd_oid) {
1430 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1431 			DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1432 			    sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1433 		pmd->pmd_oid = oid;
1434 	}
1435 	if (pmd->pmd_recvbuf != NULL) {
1436 		if (size > PGT_FRAG_SIZE) {
1437 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1438 				DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1439 				    sc->sc_dev.dv_xname, oid, size));
1440 			pmd->pmd_error = EIO;
1441 			goto out;
1442 		}
1443 		if (size > pmd->pmd_len)
1444 			pmd->pmd_error = ENOMEM;
1445 		else
1446 			memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
1447 			    size);
1448 		pmd->pmd_len = size;
1449 	}
1450 
1451 out:
1452 	TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1453 	wakeup_one(pmd);
1454 	if (sc->sc_debug & SC_DEBUG_MGMT)
1455 		DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1456 		    sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1457 		    pmd->pmd_oid, pmd->pmd_len));
1458 out_nopmd:
1459 	pgt_reinit_rx_desc_frag(sc, pd);
1460 }
1461 
1462 /*
1463  * Queue packets for reception and defragmentation.  I don't know now
1464  * whether the rx queue being full enough to start, but not finish,
1465  * queueing a fragmented packet, can happen.
1466  */
1467 struct mbuf *
1468 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1469 {
1470 	struct ifnet *ifp;
1471 	struct pgt_desc *pd;
1472 	struct mbuf *top, **mp, *m;
1473 	size_t datalen;
1474 	uint16_t morefrags, dataoff;
1475 	int tlen = 0;
1476 
1477 	ifp = &sc->sc_ic.ic_if;
1478 	m = NULL;
1479 	top = NULL;
1480 	mp = &top;
1481 
1482 	while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
1483 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1484 		sc->sc_dirtyq_count[pq]--;
1485 		datalen = letoh16(pd->pd_fragp->pf_size);
1486 		dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
1487 		morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
1488 
1489 		if (sc->sc_debug & SC_DEBUG_RXFRAG)
1490 			DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1491 			    sc->sc_dev.dv_xname, datalen, dataoff,
1492 			    pd->pd_fragp->pf_flags));
1493 
1494 		/* Add the (two+?) bytes for the header. */
1495 		if (datalen + dataoff > PGT_FRAG_SIZE) {
1496 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1497 				DPRINTF(("%s data rx too big: %u\n",
1498 				    sc->sc_dev.dv_xname, datalen));
1499 			goto fail;
1500 		}
1501 
1502 		if (m == NULL)
1503 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1504 		else
1505 			m = m_get(M_DONTWAIT, MT_DATA);
1506 
1507 		if (m == NULL)
1508 			goto fail;
1509 		if (datalen > MHLEN) {
1510 			MCLGET(m, M_DONTWAIT);
1511 			if (!(m->m_flags & M_EXT)) {
1512 				m_free(m);
1513 				goto fail;
1514 			}
1515 		}
1516 		bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
1517 		m->m_len = datalen;
1518 		tlen += datalen;
1519 
1520 		*mp = m;
1521 		mp = &m->m_next;
1522 
1523 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1524 		sc->sc_freeq_count[pq]++;
1525 		pgt_reinit_rx_desc_frag(sc, pd);
1526 
1527 		if (!morefrags)
1528 			break;
1529 	}
1530 
1531 	if (top) {
1532 		top->m_pkthdr.len = tlen;
1533 	}
1534 	return (top);
1535 
1536 fail:
1537 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1538 	sc->sc_freeq_count[pq]++;
1539 	pgt_reinit_rx_desc_frag(sc, pd);
1540 
1541 	ifp->if_ierrors++;
1542 	m_freem(top);
1543 	return (NULL);
1544 }
1545 
1546 int
1547 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1548     void *arg, size_t arglen)
1549 {
1550 	struct pgt_mgmt_desc pmd;
1551 	int error;
1552 
1553 	bzero(&pmd, sizeof(pmd));
1554 	pmd.pmd_recvbuf = arg;
1555 	pmd.pmd_len = arglen;
1556 	pmd.pmd_oid = oid;
1557 
1558 	error = pgt_mgmt_request(sc, &pmd);
1559 	if (error == 0)
1560 		error = pmd.pmd_error;
1561 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1562 		DPRINTF(("%s: failure getting oid %#x: %d\n",
1563 		    sc->sc_dev.dv_xname, oid, error));
1564 
1565 	return (error);
1566 }
1567 
1568 int
1569 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1570     void *arg, size_t arglen)
1571 {
1572 	struct pgt_mgmt_desc pmd;
1573 	int error;
1574 
1575 	bzero(&pmd, sizeof(pmd));
1576 	pmd.pmd_sendbuf = arg;
1577 	pmd.pmd_recvbuf = arg;
1578 	pmd.pmd_len = arglen;
1579 	pmd.pmd_oid = oid;
1580 
1581 	error = pgt_mgmt_request(sc, &pmd);
1582 	if (error == 0)
1583 		error = pmd.pmd_error;
1584 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1585 		DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1586 		    sc->sc_dev.dv_xname, oid, error));
1587 
1588 	return (error);
1589 }
1590 
1591 int
1592 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1593     const void *arg, size_t arglen)
1594 {
1595 	struct pgt_mgmt_desc pmd;
1596 	int error;
1597 
1598 	bzero(&pmd, sizeof(pmd));
1599 	pmd.pmd_sendbuf = arg;
1600 	pmd.pmd_len = arglen;
1601 	pmd.pmd_oid = oid;
1602 
1603 	error = pgt_mgmt_request(sc, &pmd);
1604 	if (error == 0)
1605 		error = pmd.pmd_error;
1606 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1607 		DPRINTF(("%s: failure setting oid %#x: %d\n",
1608 		    sc->sc_dev.dv_xname, oid, error));
1609 
1610 	return (error);
1611 }
1612 
1613 void
1614 pgt_state_dump(struct pgt_softc *sc)
1615 {
1616 	printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1617 	    sc->sc_dev.dv_xname,
1618 	    pgt_read_4(sc, PGT_REG_CTRL_STAT),
1619 	    pgt_read_4(sc, PGT_REG_INT_STAT));
1620 
1621 	printf("%s: state dump: driver curfrag[]\n",
1622 	    sc->sc_dev.dv_xname);
1623 
1624 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1625 	    sc->sc_dev.dv_xname,
1626 	    letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
1627 	    letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
1628 	    letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
1629 	    letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
1630 	    letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
1631 	    letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
1632 
1633 	printf("%s: state dump: device curfrag[]\n",
1634 	    sc->sc_dev.dv_xname);
1635 
1636 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1637 	    sc->sc_dev.dv_xname,
1638 	    letoh32(sc->sc_cb->pcb_device_curfrag[0]),
1639 	    letoh32(sc->sc_cb->pcb_device_curfrag[1]),
1640 	    letoh32(sc->sc_cb->pcb_device_curfrag[2]),
1641 	    letoh32(sc->sc_cb->pcb_device_curfrag[3]),
1642 	    letoh32(sc->sc_cb->pcb_device_curfrag[4]),
1643 	    letoh32(sc->sc_cb->pcb_device_curfrag[5]));
1644 }
1645 
1646 int
1647 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1648 {
1649 	struct pgt_desc *pd;
1650 	struct pgt_mgmt_frame *pmf;
1651 	int error, i, ret;
1652 
1653 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1654 		return (EIO);
1655 	if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
1656 		return (ENOMEM);
1657 	pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
1658 	if (pd == NULL)
1659 		return (ENOMEM);
1660 	error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1661 	if (error)
1662 		return (error);
1663 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1664 	pmf->pmf_version = PMF_VER;
1665 	/* "get" and "retrieve" operations look the same */
1666 	if (pmd->pmd_recvbuf != NULL)
1667 		pmf->pmf_operation = PMF_OP_GET;
1668 	else
1669 		pmf->pmf_operation = PMF_OP_SET;
1670 	pmf->pmf_oid = htobe32(pmd->pmd_oid);
1671 	pmf->pmf_device = PMF_DEV;
1672 	pmf->pmf_flags = 0;
1673 	pmf->pmf_size = htobe32(pmd->pmd_len);
1674 	/* "set" and "retrieve" operations both send data */
1675 	if (pmd->pmd_sendbuf != NULL)
1676 		memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len);
1677 	else
1678 		bzero(pmf + 1, pmd->pmd_len);
1679 	pmd->pmd_error = EINPROGRESS;
1680 	TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
1681 	if (sc->sc_debug & SC_DEBUG_MGMT)
1682 		DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1683 		    sc->sc_dev.dv_xname,
1684 		    pmd, pmf->pmf_operation,
1685 		    pmd->pmd_oid, pmd->pmd_len));
1686 	pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1687 	    sizeof(*pmf) + pmd->pmd_len, 0);
1688 	/*
1689 	 * Try for one second, triggering 10 times.
1690 	 *
1691 	 * Do our best to work around seemingly buggy CardBus controllers
1692 	 * on Soekris 4521 that fail to get interrupts with alarming
1693 	 * regularity: run as if an interrupt occurred and service every
1694 	 * queue except for mbuf reception.
1695 	 */
1696 	i = 0;
1697 	do {
1698 		ret = tsleep_nsec(pmd, 0, "pgtmgm", MSEC_TO_NSEC(100));
1699 		if (ret != EWOULDBLOCK)
1700 			break;
1701 		if (pmd->pmd_error != EINPROGRESS)
1702 			break;
1703 		if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
1704 			pmd->pmd_error = EIO;
1705 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1706 			break;
1707 		}
1708 		if (i != 9)
1709 			pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1710 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1711 		pgt_update_intr(sc, 0);
1712 #endif
1713 	} while (i++ < 10);
1714 
1715 	if (pmd->pmd_error == EINPROGRESS) {
1716 		printf("%s: timeout waiting for management "
1717 		    "packet response to %#x\n",
1718 		    sc->sc_dev.dv_xname, pmd->pmd_oid);
1719 		TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1720 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1721 			pgt_state_dump(sc);
1722 		pgt_async_reset(sc);
1723 		error = ETIMEDOUT;
1724 	} else
1725 		error = 0;
1726 
1727 	return (error);
1728 }
1729 
1730 void
1731 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1732     uint16_t len, int morecoming)
1733 {
1734 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1735 	sc->sc_freeq_count[pq]--;
1736 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1737 	sc->sc_dirtyq_count[pq]++;
1738 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1739 		DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1740 		    pd->pd_fragnum, pq));
1741 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1742 	    sc->sc_cbdmam->dm_mapsize,
1743 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
1744 	if (morecoming)
1745 		pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
1746 	pd->pd_fragp->pf_size = htole16(len);
1747 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1748 	    pd->pd_dmam->dm_mapsize,
1749 	    BUS_DMASYNC_POSTWRITE);
1750 	sc->sc_cb->pcb_driver_curfrag[pq] =
1751 	    htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
1752 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1753 	    sc->sc_cbdmam->dm_mapsize,
1754 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
1755 	if (!morecoming)
1756 		pgt_maybe_trigger(sc, pq);
1757 }
1758 
1759 void
1760 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1761 {
1762 	unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
1763 	uint32_t reg;
1764 
1765 	if (sc->sc_debug & SC_DEBUG_TRIGGER)
1766 		DPRINTF(("%s: triggered by queue [%u]\n",
1767 		    sc->sc_dev.dv_xname, pq));
1768 	pgt_debug_events(sc, "trig");
1769 	if (sc->sc_flags & SC_POWERSAVE) {
1770 		/* Magic values ahoy? */
1771 		if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
1772 			do {
1773 				reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1774 				if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
1775 					DELAY(PGT_WRITEIO_DELAY);
1776 			} while (tries-- != 0);
1777 			if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
1778 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1779 					DPRINTF(("%s: timeout triggering from "
1780 					    "sleep mode\n",
1781 					    sc->sc_dev.dv_xname));
1782 				pgt_async_reset(sc);
1783 				return;
1784 			}
1785 		}
1786 		pgt_write_4_flush(sc, PGT_REG_DEV_INT,
1787 		    PGT_DEV_INT_WAKEUP);
1788 		DELAY(PGT_WRITEIO_DELAY);
1789 		/* read the status back in */
1790 		(void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
1791 		DELAY(PGT_WRITEIO_DELAY);
1792 	} else {
1793 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1794 		DELAY(PGT_WRITEIO_DELAY);
1795 	}
1796 }
1797 
1798 struct ieee80211_node *
1799 pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1800 {
1801 	struct pgt_ieee80211_node *pin;
1802 
1803 	pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO);
1804 	if (pin != NULL) {
1805 		pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1806 	}
1807 	return (struct ieee80211_node *)pin;
1808 }
1809 
1810 void
1811 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1812     int reallynew)
1813 {
1814 	ieee80211_ref_node(ni);
1815 }
1816 
1817 void
1818 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1819 {
1820 	struct pgt_ieee80211_node *pin;
1821 
1822 	pin = (struct pgt_ieee80211_node *)ni;
1823 	free(pin, M_DEVBUF, 0);
1824 }
1825 
1826 void
1827 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1828     const struct ieee80211_node *src)
1829 {
1830 	const struct pgt_ieee80211_node *psrc;
1831 	struct pgt_ieee80211_node *pdst;
1832 
1833 	psrc = (const struct pgt_ieee80211_node *)src;
1834 	pdst = (struct pgt_ieee80211_node *)dst;
1835 	bcopy(psrc, pdst, sizeof(*psrc));
1836 }
1837 
1838 int
1839 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1840     int type, int arg1, int arg2)
1841 {
1842 	return (EOPNOTSUPP);
1843 }
1844 
1845 int
1846 pgt_net_attach(struct pgt_softc *sc)
1847 {
1848 	struct ieee80211com *ic = &sc->sc_ic;
1849 	struct ifnet *ifp = &ic->ic_if;
1850 	struct ieee80211_rateset *rs;
1851 	uint8_t rates[IEEE80211_RATE_MAXSIZE];
1852 	struct pgt_obj_buffer psbuffer;
1853 	struct pgt_obj_frequencies *freqs;
1854 	uint32_t phymode, country;
1855 	unsigned int chan, i, j, firstchan = -1;
1856 	int error;
1857 
1858 	psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
1859 	psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
1860 	error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1861 	if (error)
1862 		return (error);
1863 	error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1864 	if (error)
1865 		return (error);
1866 	error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1867 	    sizeof(ic->ic_myaddr));
1868 	if (error)
1869 		return (error);
1870 	error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1871 	if (error)
1872 		return (error);
1873 
1874 	ifp->if_softc = sc;
1875 	ifp->if_ioctl = pgt_ioctl;
1876 	ifp->if_start = pgt_start;
1877 	ifp->if_watchdog = pgt_watchdog;
1878 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
1879 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1880 
1881 	ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
1882 
1883 	/*
1884 	 * Set channels
1885 	 *
1886 	 * Prism hardware likes to report supported frequencies that are
1887 	 * not actually available for the country of origin.
1888 	 */
1889 	j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
1890 	freqs = malloc(j, M_DEVBUF, M_WAITOK);
1891 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1892 	if (error) {
1893 		free(freqs, M_DEVBUF, 0);
1894 		return (error);
1895 	}
1896 
1897 	for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
1898 		chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
1899 		    0);
1900 
1901 		if (chan > IEEE80211_CHAN_MAX) {
1902 			printf("%s: reported bogus channel (%uMHz)\n",
1903 			    sc->sc_dev.dv_xname, chan);
1904 			free(freqs, M_DEVBUF, 0);
1905 			return (EIO);
1906 		}
1907 
1908 		if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
1909 			if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
1910 				continue;
1911 			if (country == letoh32(PGT_COUNTRY_USA)) {
1912 				if (chan >= 12 && chan <= 14)
1913 					continue;
1914 			}
1915 			if (chan <= 14)
1916 				ic->ic_channels[chan].ic_flags |=
1917 				    IEEE80211_CHAN_B;
1918 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
1919 		} else {
1920 			if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
1921 				continue;
1922 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
1923 		}
1924 
1925 		ic->ic_channels[chan].ic_freq =
1926 		    letoh16(freqs->pof_freqlist_mhz[i]);
1927 
1928 		if (firstchan == -1)
1929 			firstchan = chan;
1930 
1931 		DPRINTF(("%s: set channel %d to freq %uMHz\n",
1932 		    sc->sc_dev.dv_xname, chan,
1933 		    letoh16(freqs->pof_freqlist_mhz[i])));
1934 	}
1935 	free(freqs, M_DEVBUF, 0);
1936 	if (firstchan == -1) {
1937 		printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1938 		return (EIO);
1939 	}
1940 
1941 	/*
1942 	 * Set rates
1943 	 */
1944 	bzero(rates, sizeof(rates));
1945 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1946 	if (error)
1947 		return (error);
1948 	for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1949 		switch (rates[i]) {
1950 		case 2:
1951 		case 4:
1952 		case 11:
1953 		case 22:
1954 		case 44: /* maybe */
1955 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1956 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1957 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1958 			}
1959 		default:
1960 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1961 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1962 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1963 			}
1964 			if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
1965 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1966 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1967 			}
1968 			rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1969 			rs->rs_rates[rs->rs_nrates++] = rates[i];
1970 		}
1971 	}
1972 
1973 	ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT |
1974 	    IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
1975 #ifndef IEEE80211_STA_ONLY
1976 	ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP;
1977 #endif
1978 	ic->ic_opmode = IEEE80211_M_STA;
1979 	ic->ic_state = IEEE80211_S_INIT;
1980 
1981 	if_attach(ifp);
1982 	ieee80211_ifattach(ifp);
1983 
1984 	/* setup post-attach/pre-lateattach vector functions */
1985 	sc->sc_newstate = ic->ic_newstate;
1986 	ic->ic_newstate = pgt_newstate;
1987 	ic->ic_node_alloc = pgt_ieee80211_node_alloc;
1988 	ic->ic_newassoc = pgt_ieee80211_newassoc;
1989 	ic->ic_node_free = pgt_ieee80211_node_free;
1990 	ic->ic_node_copy = pgt_ieee80211_node_copy;
1991 	ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
1992 	ic->ic_max_rssi = 255;	/* rssi is a u_int8_t */
1993 
1994 	/* let net80211 handle switching around the media + resetting */
1995 	ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
1996 
1997 #if NBPFILTER > 0
1998 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
1999 	    sizeof(struct ieee80211_frame) + 64);
2000 
2001 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2002 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
2003 	sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
2004 
2005 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2006 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
2007 	sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
2008 #endif
2009 	return (0);
2010 }
2011 
2012 int
2013 pgt_media_change(struct ifnet *ifp)
2014 {
2015 	struct pgt_softc *sc = ifp->if_softc;
2016 	int error;
2017 
2018         error = ieee80211_media_change(ifp);
2019         if (error == ENETRESET) {
2020                 pgt_update_hw_from_sw(sc, 0);
2021                 error = 0;
2022         }
2023 
2024         return (error);
2025 }
2026 
2027 void
2028 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2029 {
2030 	struct pgt_softc *sc = ifp->if_softc;
2031 	struct ieee80211com *ic = &sc->sc_ic;
2032 	uint32_t rate;
2033 	int s;
2034 
2035 	imr->ifm_status = 0;
2036 	imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
2037 
2038 	if (!(ifp->if_flags & IFF_UP))
2039 		return;
2040 
2041 	s = splnet();
2042 
2043 	if (ic->ic_fixed_rate != -1) {
2044 		rate = ic->ic_sup_rates[ic->ic_curmode].
2045 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
2046 	} else {
2047 		if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2048 			goto out;
2049 		rate = letoh32(rate);
2050 		if (sc->sc_debug & SC_DEBUG_LINK) {
2051 			DPRINTF(("%s: %s: link rate %u\n",
2052 			    sc->sc_dev.dv_xname, __func__, rate));
2053 		}
2054 		if (rate == 0)
2055 			goto out;
2056 	}
2057 
2058 	imr->ifm_status = IFM_AVALID;
2059 	imr->ifm_active = IFM_IEEE80211;
2060 	if (ic->ic_state == IEEE80211_S_RUN)
2061 		imr->ifm_status |= IFM_ACTIVE;
2062 
2063 	imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2064 
2065 	switch (ic->ic_opmode) {
2066 	case IEEE80211_M_STA:
2067 		break;
2068 #ifndef IEEE80211_STA_ONLY
2069 	case IEEE80211_M_IBSS:
2070 		imr->ifm_active |= IFM_IEEE80211_ADHOC;
2071 		break;
2072 	case IEEE80211_M_AHDEMO:
2073 		imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
2074 		break;
2075 	case IEEE80211_M_HOSTAP:
2076 		imr->ifm_active |= IFM_IEEE80211_HOSTAP;
2077 		break;
2078 #endif
2079 	case IEEE80211_M_MONITOR:
2080 		imr->ifm_active |= IFM_IEEE80211_MONITOR;
2081 		break;
2082 	default:
2083 		break;
2084 	}
2085 
2086 out:
2087 	splx(s);
2088 }
2089 
2090 /*
2091  * Start data frames.  Critical sections surround the boundary of
2092  * management frame transmission / transmission acknowledgement / response
2093  * and data frame transmission / transmission acknowledgement.
2094  */
2095 void
2096 pgt_start(struct ifnet *ifp)
2097 {
2098 	struct pgt_softc *sc;
2099 	struct ieee80211com *ic;
2100 	struct pgt_desc *pd;
2101 	struct mbuf *m;
2102 	int error;
2103 
2104 	sc = ifp->if_softc;
2105 	ic = &sc->sc_ic;
2106 
2107 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
2108 	    !(ifp->if_flags & IFF_RUNNING) ||
2109 	    ic->ic_state != IEEE80211_S_RUN) {
2110 		return;
2111 	}
2112 
2113 	/*
2114 	 * Management packets should probably be MLME frames
2115 	 * (i.e. hostap "managed" mode); we don't touch the
2116 	 * net80211 management queue.
2117 	 */
2118 	for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2119 	    PGT_QUEUE_FULL_THRESHOLD && !ifq_empty(&ifp->if_snd);) {
2120 		pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
2121 		m = ifq_deq_begin(&ifp->if_snd);
2122 		if (m == NULL)
2123 			break;
2124 		if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
2125 			error = pgt_load_tx_desc_frag(sc,
2126 			    PGT_QUEUE_DATA_LOW_TX, pd);
2127 			if (error) {
2128 				ifq_deq_rollback(&ifp->if_snd, m);
2129 				break;
2130 			}
2131 			ifq_deq_commit(&ifp->if_snd, m);
2132 			m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
2133 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2134 			    pd, m->m_pkthdr.len, 0);
2135 		} else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
2136 			struct pgt_desc *pd2;
2137 
2138 			/*
2139 			 * Transmit a fragmented frame if there is
2140 			 * not enough room in one fragment; limit
2141 			 * to two fragments (802.11 itself couldn't
2142 			 * even support a full two.)
2143 			 */
2144 			if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2145 			    PGT_QUEUE_FULL_THRESHOLD) {
2146 				ifq_deq_rollback(&ifp->if_snd, m);
2147 				break;
2148 			}
2149 			pd2 = TAILQ_NEXT(pd, pd_link);
2150 			error = pgt_load_tx_desc_frag(sc,
2151 			    PGT_QUEUE_DATA_LOW_TX, pd);
2152 			if (error == 0) {
2153 				error = pgt_load_tx_desc_frag(sc,
2154 				    PGT_QUEUE_DATA_LOW_TX, pd2);
2155 				if (error) {
2156 					pgt_unload_tx_desc_frag(sc, pd);
2157 					TAILQ_INSERT_HEAD(&sc->sc_freeq[
2158 					    PGT_QUEUE_DATA_LOW_TX], pd,
2159 					    pd_link);
2160 				}
2161 			}
2162 			if (error) {
2163 				ifq_deq_rollback(&ifp->if_snd, m);
2164 				break;
2165 			}
2166 			ifq_deq_commit(&ifp->if_snd, m);
2167 			m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
2168 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2169 			    pd, PGT_FRAG_SIZE, 1);
2170 			m_copydata(m, PGT_FRAG_SIZE,
2171 			    m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
2172 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2173 			    pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
2174 		} else {
2175 			ifq_deq_commit(&ifp->if_snd, m);
2176 			ifp->if_oerrors++;
2177 			m_freem(m);
2178 			m = NULL;
2179 		}
2180 		if (m != NULL) {
2181 			struct ieee80211_node *ni;
2182 #if NBPFILTER > 0
2183 			if (ifp->if_bpf != NULL)
2184 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2185 #endif
2186 			ifp->if_timer = 1;
2187 			sc->sc_txtimer = 5;
2188 			ni = ieee80211_find_txnode(&sc->sc_ic,
2189 			    mtod(m, struct ether_header *)->ether_dhost);
2190 			if (ni != NULL) {
2191 				ni->ni_inact = 0;
2192 				if (ni != ic->ic_bss)
2193 					ieee80211_release_node(&sc->sc_ic, ni);
2194 			}
2195 #if NBPFILTER > 0
2196 			if (sc->sc_drvbpf != NULL) {
2197 				struct mbuf mb;
2198 				struct ether_header eh;
2199 				struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
2200 
2201 				bcopy(mtod(m, struct ether_header *), &eh,
2202 				    sizeof(eh));
2203 				m_adj(m, sizeof(eh));
2204 				m = pgt_ieee80211_encap(sc, &eh, m, NULL);
2205 
2206 				tap->wt_flags = 0;
2207 				//tap->wt_rate = rate;
2208 				tap->wt_rate = 0;
2209 				tap->wt_chan_freq =
2210 				    htole16(ic->ic_bss->ni_chan->ic_freq);
2211 				tap->wt_chan_flags =
2212 				    htole16(ic->ic_bss->ni_chan->ic_flags);
2213 
2214 				if (m != NULL) {
2215 					mb.m_data = (caddr_t)tap;
2216 					mb.m_len = sc->sc_txtap_len;
2217 					mb.m_next = m;
2218 					mb.m_nextpkt = NULL;
2219 					mb.m_type = 0;
2220 					mb.m_flags = 0;
2221 
2222 					bpf_mtap(sc->sc_drvbpf, &mb,
2223 					    BPF_DIRECTION_OUT);
2224 				}
2225 			}
2226 #endif
2227 			m_freem(m);
2228 		}
2229 	}
2230 }
2231 
2232 int
2233 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2234 {
2235 	struct pgt_softc *sc = ifp->if_softc;
2236 	struct ifreq *ifr;
2237 	struct wi_req *wreq;
2238 	struct ieee80211_nodereq_all *na;
2239 	struct ieee80211com *ic;
2240         struct pgt_obj_bsslist *pob;
2241         struct wi_scan_p2_hdr *p2hdr;
2242         struct wi_scan_res *res;
2243         uint32_t noise;
2244 	int maxscan, i, j, s, error = 0;
2245 
2246 	ic = &sc->sc_ic;
2247 	ifr = (struct ifreq *)req;
2248 
2249 	s = splnet();
2250 	switch (cmd) {
2251 	case SIOCS80211SCAN:
2252 		/*
2253 		 * This chip scans always as soon as it gets initialized.
2254 		 */
2255 		break;
2256 	case SIOCG80211ALLNODES: {
2257 		struct ieee80211_nodereq *nr = NULL;
2258 		na = (struct ieee80211_nodereq_all *)req;
2259 		wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO);
2260 
2261 		maxscan = PGT_OBJ_BSSLIST_NBSS;
2262 		pob = malloc(sizeof(*pob) +
2263 		    sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
2264 		error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2265 		    sizeof(noise));
2266 
2267 		if (error == 0) {
2268 			noise = letoh32(noise);
2269 			error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2270 			    sizeof(*pob) +
2271 			    sizeof(struct pgt_obj_bss) * maxscan);
2272 		}
2273 
2274 		if (error == 0) {
2275 			maxscan = min(PGT_OBJ_BSSLIST_NBSS,
2276 			    letoh32(pob->pob_count));
2277 			maxscan = min(maxscan,
2278 			    (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2279 			    WI_PRISM2_RES_SIZE);
2280 			p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2281 			p2hdr->wi_rsvd = 0;
2282 			p2hdr->wi_reason = 1;
2283 			wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
2284 			    sizeof(*p2hdr) / 2;
2285 			wreq->wi_type = WI_RID_SCAN_RES;
2286 		}
2287 
2288 		for (na->na_nodes = j = i = 0; i < maxscan &&
2289 		    (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2290 		    i++) {
2291 			/* allocate node space */
2292 			if (nr == NULL)
2293 				nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
2294 
2295 			/* get next BSS scan result */
2296 			res = (struct wi_scan_res *)
2297 			    ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2298 			    i * WI_PRISM2_RES_SIZE);
2299 			pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2300 			    res, noise);
2301 
2302 			/* copy it to node structure for ifconfig to read */
2303 			bzero(nr, sizeof(*nr));
2304 			IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
2305 			IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
2306 			nr->nr_channel = letoh16(res->wi_chan);
2307 			nr->nr_chan_flags = IEEE80211_CHAN_B;
2308 			nr->nr_rssi = letoh16(res->wi_signal);
2309 			nr->nr_max_rssi = 0; /* XXX */
2310 			nr->nr_nwid_len = letoh16(res->wi_ssid_len);
2311 			bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2312 			nr->nr_intval = letoh16(res->wi_interval);
2313 			nr->nr_capinfo = letoh16(res->wi_capinfo);
2314 			nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
2315 			    (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
2316 			    (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
2317 			    (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
2318 			nr->nr_nrates = 0;
2319 			while (res->wi_srates[nr->nr_nrates] != 0) {
2320 				nr->nr_rates[nr->nr_nrates] =
2321 				    res->wi_srates[nr->nr_nrates] &
2322 				    WI_VAR_SRATES_MASK;
2323 				nr->nr_nrates++;
2324 			}
2325 			nr->nr_flags = 0;
2326 			if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2327 			    IEEE80211_ADDR_LEN) == 0)
2328 				nr->nr_flags |= IEEE80211_NODEREQ_AP;
2329 			error = copyout(nr, (caddr_t)na->na_node + j,
2330 			    sizeof(struct ieee80211_nodereq));
2331 			if (error)
2332 				break;
2333 
2334 			/* point to next node entry */
2335 			j += sizeof(struct ieee80211_nodereq);
2336 			na->na_nodes++;
2337 		}
2338 		if (nr)
2339 			free(nr, M_DEVBUF, 0);
2340 		free(pob, M_DEVBUF, 0);
2341 		free(wreq, M_DEVBUF, 0);
2342 		break;
2343 	}
2344 	case SIOCSIFADDR:
2345 		ifp->if_flags |= IFF_UP;
2346 		/* FALLTHROUGH */
2347 	case SIOCSIFFLAGS:
2348 		if (ifp->if_flags & IFF_UP) {
2349 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
2350 				pgt_init(ifp);
2351 				error = ENETRESET;
2352 			}
2353 		} else {
2354 			if (ifp->if_flags & IFF_RUNNING) {
2355 				pgt_stop(sc, SC_NEEDS_RESET);
2356 				error = ENETRESET;
2357 			}
2358 		}
2359 		break;
2360 	case SIOCSIFMTU:
2361 		if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
2362 			error = EINVAL;
2363 			break;
2364 		}
2365 		/* FALLTHROUGH */
2366 	default:
2367 		error = ieee80211_ioctl(ifp, cmd, req);
2368 		break;
2369 	}
2370 
2371 	if (error == ENETRESET) {
2372 		pgt_update_hw_from_sw(sc, 0);
2373 		error = 0;
2374 	}
2375 	splx(s);
2376 
2377 	return (error);
2378 }
2379 
2380 void
2381 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2382     struct wi_scan_res *scanres, uint32_t noise)
2383 {
2384 	struct ieee80211_rateset *rs;
2385 	struct wi_scan_res ap;
2386 	unsigned int i, n;
2387 
2388 	rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2389 	bzero(&ap, sizeof(ap));
2390 	ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
2391 	ap.wi_noise = noise;
2392 	ap.wi_signal = letoh16(pob->pob_rssi);
2393 	IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
2394 	ap.wi_interval = letoh16(pob->pob_beacon_period);
2395 	ap.wi_capinfo = letoh16(pob->pob_capinfo);
2396 	ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2397 	memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
2398 	n = 0;
2399 	for (i = 0; i < 16; i++) {
2400 		if (letoh16(pob->pob_rates) & (1 << i)) {
2401 			if (i >= rs->rs_nrates)
2402 				break;
2403 			ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2404 			if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2405 				break;
2406 		}
2407 	}
2408 	memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
2409 }
2410 
2411 void
2412 node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2413 {
2414 	/*
2415 	 * HostAP mode lets all nodes stick around unless
2416 	 * the firmware AP kicks them off.
2417 	 */
2418 	ni->ni_inact = 0;
2419 }
2420 
2421 void
2422 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2423 {
2424 	struct pgt_ieee80211_node *pin;
2425 
2426 	/*
2427 	 * As there is no association in ad-hoc, we let links just
2428 	 * time out naturally as long they are not holding any private
2429 	 * configuration, such as 802.1x authorization.
2430 	 */
2431 	pin = (struct pgt_ieee80211_node *)ni;
2432 	if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2433 		pin->pin_node.ni_inact = 0;
2434 }
2435 
2436 void
2437 pgt_watchdog(struct ifnet *ifp)
2438 {
2439 	struct pgt_softc *sc;
2440 
2441 	sc = ifp->if_softc;
2442 	/*
2443 	 * Check for timed out transmissions (and make sure to set
2444 	 * this watchdog to fire again if there is still data in the
2445 	 * output device queue).
2446 	 */
2447 	if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2448 		int count;
2449 
2450 		ifp->if_timer = 1;
2451 		if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2452 			count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2453 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
2454 				DPRINTF(("%s: timeout %d data transmissions\n",
2455 				    sc->sc_dev.dv_xname, count));
2456 		}
2457 	}
2458 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
2459 		return;
2460 	/*
2461 	 * If we're goign to kick the device out of power-save mode
2462 	 * just to update the BSSID and such, we should not do it
2463 	 * very often; need to determine in what way to do that.
2464 	 */
2465 	if (ifp->if_flags & IFF_RUNNING &&
2466 	    sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2467 	    sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2468 		pgt_async_update(sc);
2469 
2470 #ifndef IEEE80211_STA_ONLY
2471 	/*
2472 	 * As a firmware-based HostAP, we should not time out
2473 	 * nodes inside the driver additionally to the timeout
2474 	 * that exists in the firmware.  The only things we
2475 	 * should have to deal with timing out when doing HostAP
2476 	 * are the privacy-related.
2477 	 */
2478 	switch (sc->sc_ic.ic_opmode) {
2479 	case IEEE80211_M_HOSTAP:
2480 		ieee80211_iterate_nodes(&sc->sc_ic,
2481 		    node_mark_active_ap, NULL);
2482 		break;
2483 	case IEEE80211_M_IBSS:
2484 		ieee80211_iterate_nodes(&sc->sc_ic,
2485 		    node_mark_active_adhoc, NULL);
2486 		break;
2487 	default:
2488 		break;
2489 	}
2490 #endif
2491 	ieee80211_watchdog(ifp);
2492 	ifp->if_timer = 1;
2493 }
2494 
2495 int
2496 pgt_init(struct ifnet *ifp)
2497 {
2498 	struct pgt_softc *sc = ifp->if_softc;
2499 	struct ieee80211com *ic = &sc->sc_ic;
2500 
2501 	/* set default channel */
2502 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2503 
2504 	if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
2505 		pgt_update_hw_from_sw(sc,
2506 		    ic->ic_state != IEEE80211_S_INIT);
2507 
2508 	ifp->if_flags |= IFF_RUNNING;
2509 	ifq_clr_oactive(&ifp->if_snd);
2510 
2511 	/* Begin background scanning */
2512 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
2513 
2514 	return (0);
2515 }
2516 
2517 /*
2518  * After most every configuration change, everything needs to be fully
2519  * reinitialized.  For some operations (currently, WEP settings
2520  * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2521  * "associations," and allows EAP authorization to occur again.
2522  * If keepassoc is specified, the reset operation should try to go
2523  * back to the BSS had before.
2524  */
2525 void
2526 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc)
2527 {
2528 	struct ieee80211com *ic = &sc->sc_ic;
2529 	struct arpcom *ac = &ic->ic_ac;
2530 	struct ifnet *ifp = &ac->ac_if;
2531 	struct pgt_obj_key keyobj;
2532 	struct pgt_obj_ssid essid;
2533 	uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
2534 	uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2535 	uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2536 	unsigned int i;
2537 	int success, shouldbeup, s;
2538 
2539 	config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
2540 
2541 	/*
2542 	 * Promiscuous mode is currently a no-op since packets transmitted,
2543 	 * while in promiscuous mode, don't ever seem to go anywhere.
2544 	 */
2545 	shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
2546 
2547 	if (shouldbeup) {
2548 		switch (ic->ic_opmode) {
2549 		case IEEE80211_M_STA:
2550 			if (ifp->if_flags & IFF_PROMISC)
2551 				mode = PGT_MODE_CLIENT;	/* what to do? */
2552 			else
2553 				mode = PGT_MODE_CLIENT;
2554 			bsstype = PGT_BSS_TYPE_STA;
2555 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2556 			break;
2557 #ifndef IEEE80211_STA_ONLY
2558 		case IEEE80211_M_IBSS:
2559 			if (ifp->if_flags & IFF_PROMISC)
2560 				mode = PGT_MODE_CLIENT;	/* what to do? */
2561 			else
2562 				mode = PGT_MODE_CLIENT;
2563 			bsstype = PGT_BSS_TYPE_IBSS;
2564 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2565 			break;
2566 		case IEEE80211_M_HOSTAP:
2567 			mode = PGT_MODE_AP;
2568 			bsstype = PGT_BSS_TYPE_STA;
2569 			/*
2570 			 * For IEEE 802.1x, we need to authenticate and
2571 			 * authorize hosts from here on or they remain
2572 			 * associated but without the ability to send or
2573 			 * receive normal traffic to us (courtesy the
2574 			 * firmware AP implementation).
2575 			 */
2576 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2577 			/*
2578 			 * WDS mode needs several things to work:
2579 			 * discovery of exactly how creating the WDS
2580 			 * links is meant to function, an interface
2581 			 * for this, and ability to encode or decode
2582 			 * the WDS frames.
2583 			 */
2584 			if (sc->sc_wds)
2585 				config |= PGT_CONFIG_WDS;
2586 			break;
2587 #endif
2588 		case IEEE80211_M_MONITOR:
2589 			mode = PGT_MODE_PROMISCUOUS;
2590 			bsstype = PGT_BSS_TYPE_ANY;
2591 			dot1x = PGT_DOT1X_AUTH_NONE;
2592 			break;
2593 		default:
2594 			goto badopmode;
2595 		}
2596 	} else {
2597 badopmode:
2598 		mode = PGT_MODE_CLIENT;
2599 		bsstype = PGT_BSS_TYPE_NONE;
2600 	}
2601 
2602 	DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2603 	switch (ic->ic_curmode) {
2604 	case IEEE80211_MODE_11A:
2605 		profile = PGT_PROFILE_A_ONLY;
2606 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2607 		DPRINTF(("IEEE80211_MODE_11A\n"));
2608 		break;
2609 	case IEEE80211_MODE_11B:
2610 		profile = PGT_PROFILE_B_ONLY;
2611 		preamble = PGT_OID_PREAMBLE_MODE_LONG;
2612 		DPRINTF(("IEEE80211_MODE_11B\n"));
2613 		break;
2614 	case IEEE80211_MODE_11G:
2615 		profile = PGT_PROFILE_G_ONLY;
2616 		preamble = PGT_OID_PREAMBLE_MODE_SHORT;
2617 		DPRINTF(("IEEE80211_MODE_11G\n"));
2618 		break;
2619 	case IEEE80211_MODE_AUTO:
2620 		profile = PGT_PROFILE_MIXED_G_WIFI;
2621 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2622 		DPRINTF(("IEEE80211_MODE_AUTO\n"));
2623 		break;
2624 	default:
2625 		panic("unknown mode %d", ic->ic_curmode);
2626 	}
2627 
2628 	switch (sc->sc_80211_ioc_auth) {
2629 	case IEEE80211_AUTH_NONE:
2630 		auth = PGT_AUTH_MODE_NONE;
2631 		break;
2632 	case IEEE80211_AUTH_OPEN:
2633 		auth = PGT_AUTH_MODE_OPEN;
2634 		break;
2635 	default:
2636 		auth = PGT_AUTH_MODE_SHARED;
2637 		break;
2638 	}
2639 
2640 	if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
2641 		wep = 1;
2642 		exunencrypted = 1;
2643 	} else {
2644 		wep = 0;
2645 		exunencrypted = 0;
2646 	}
2647 
2648 	mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
2649 	wep = htole32(wep);
2650 	exunencrypted = htole32(exunencrypted);
2651 	profile = htole32(profile);
2652 	preamble = htole32(preamble);
2653 	bsstype = htole32(bsstype);
2654 	config = htole32(config);
2655 	mode = htole32(mode);
2656 
2657 	if (!wep || !sc->sc_dot1x)
2658 		dot1x = PGT_DOT1X_AUTH_NONE;
2659 	dot1x = htole32(dot1x);
2660 	auth = htole32(auth);
2661 
2662 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2663 		slot = htole32(PGT_OID_SLOT_MODE_SHORT);
2664 	else
2665 		slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
2666 
2667 	if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
2668 		if (keepassoc)
2669 			channel = 0;
2670 		else
2671 			channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2672 	} else
2673 		channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2674 
2675 	DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2676 	for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2677 		availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2678 		DPRINTF((" %d", availrates[i]));
2679 	}
2680 	DPRINTF(("\n"));
2681 	availrates[i++] = 0;
2682 
2683 	essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2684 	memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
2685 
2686 	s = splnet();
2687 	for (success = 0; success == 0; success = 1) {
2688 		SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
2689 		SETOID(PGT_OID_CONFIG, &config, sizeof(config));
2690 		SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
2691 
2692 		if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
2693 			SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
2694 			    sizeof(ac->ac_enaddr));
2695 			IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
2696 		}
2697 
2698 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2699 		SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
2700 
2701 		if (channel != 0 && channel != IEEE80211_CHAN_ANY)
2702 			SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
2703 
2704 		if (ic->ic_flags & IEEE80211_F_DESBSSID) {
2705 			SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
2706 			    sizeof(ic->ic_des_bssid));
2707 		} else if (keepassoc) {
2708 			SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2709 			    sizeof(ic->ic_bss->ni_bssid));
2710 		}
2711 
2712 		SETOID(PGT_OID_SSID, &essid, sizeof(essid));
2713 
2714 		if (ic->ic_des_esslen > 0)
2715 			SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
2716 
2717 		SETOID(PGT_OID_RATES, &availrates, i);
2718 		SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
2719 		SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
2720 		SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
2721 		SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
2722 		SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
2723 		    sizeof(exunencrypted));
2724 		SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
2725 		SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
2726 		/*
2727 		 * Setting WEP key(s)
2728 		 */
2729 		if (letoh32(wep) != 0) {
2730 			keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
2731 			/* key 1 */
2732 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2733 			    IEEE80211_KEYBUF_SIZE);
2734 			keyobj.pok_length = min(keyobj.pok_length,
2735 			    ic->ic_nw_keys[0].k_len);
2736 			bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2737 			    keyobj.pok_length);
2738 			SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
2739 			/* key 2 */
2740 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2741 			    IEEE80211_KEYBUF_SIZE);
2742 			keyobj.pok_length = min(keyobj.pok_length,
2743 			    ic->ic_nw_keys[1].k_len);
2744 			bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2745 			    keyobj.pok_length);
2746 			SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
2747 			/* key 3 */
2748 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2749 			    IEEE80211_KEYBUF_SIZE);
2750 			keyobj.pok_length = min(keyobj.pok_length,
2751 			    ic->ic_nw_keys[2].k_len);
2752 			bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2753 			    keyobj.pok_length);
2754 			SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
2755 			/* key 4 */
2756 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2757 			    IEEE80211_KEYBUF_SIZE);
2758 			keyobj.pok_length = min(keyobj.pok_length,
2759 			    ic->ic_nw_keys[3].k_len);
2760 			bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2761 			    keyobj.pok_length);
2762 			SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
2763 
2764 			wepkey = htole32(ic->ic_wep_txkey);
2765 			SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
2766 		}
2767 		/* set mode again to commit */
2768 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2769 	}
2770 	splx(s);
2771 
2772 	if (success) {
2773 		if (shouldbeup)
2774 			ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2775 		else
2776 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2777 	} else {
2778 		printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2779 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2780 	}
2781 }
2782 
2783 void
2784 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2785     struct pgt_obj_mlme *mlme)
2786 {
2787 	struct ieee80211com *ic = &sc->sc_ic;
2788 	struct pgt_ieee80211_node *pin;
2789 	struct ieee80211_node *ni;
2790 
2791 	ni = ieee80211_find_node(ic, mlme->pom_address);
2792 	pin = (struct pgt_ieee80211_node *)ni;
2793 	switch (oid) {
2794 	case PGT_OID_DISASSOCIATE:
2795 		if (ni != NULL)
2796 			ieee80211_release_node(&sc->sc_ic, ni);
2797 		break;
2798 	case PGT_OID_ASSOCIATE:
2799 		if (ni == NULL) {
2800 			ni = ieee80211_dup_bss(ic, mlme->pom_address);
2801 			if (ni == NULL)
2802 				break;
2803 			ic->ic_newassoc(ic, ni, 1);
2804 			pin = (struct pgt_ieee80211_node *)ni;
2805 		}
2806 		ni->ni_associd = letoh16(mlme->pom_id);
2807 		pin->pin_mlme_state = letoh16(mlme->pom_state);
2808 		break;
2809 	default:
2810 		if (pin != NULL)
2811 			pin->pin_mlme_state = letoh16(mlme->pom_state);
2812 		break;
2813 	}
2814 }
2815 
2816 /*
2817  * Either in response to an event or after a certain amount of time,
2818  * synchronize our idea of the network we're part of from the hardware.
2819  */
2820 void
2821 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2822 	    struct mbuf *args)
2823 {
2824 	struct ieee80211com *ic = &sc->sc_ic;
2825 	struct pgt_obj_ssid ssid;
2826 	struct pgt_obj_bss bss;
2827 	uint32_t channel, noise, ls;
2828 	int error, s;
2829 
2830 	if (pa != NULL) {
2831 		struct pgt_obj_mlme *mlme;
2832 		uint32_t oid;
2833 
2834 		oid = *mtod(args, uint32_t *);
2835 		m_adj(args, sizeof(uint32_t));
2836 		if (sc->sc_debug & SC_DEBUG_TRAP)
2837 			DPRINTF(("%s: trap: oid %#x len %u\n",
2838 			    sc->sc_dev.dv_xname, oid, args->m_len));
2839 		switch (oid) {
2840 		case PGT_OID_LINK_STATE:
2841 			if (args->m_len < sizeof(uint32_t))
2842 				break;
2843 			ls = letoh32(*mtod(args, uint32_t *));
2844 			if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
2845 				DPRINTF(("%s: %s: link rate %u\n",
2846 				    sc->sc_dev.dv_xname, __func__, ls));
2847 			if (ls)
2848 				ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2849 			else
2850 				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2851 			goto gotlinkstate;
2852 		case PGT_OID_DEAUTHENTICATE:
2853 		case PGT_OID_AUTHENTICATE:
2854 		case PGT_OID_DISASSOCIATE:
2855 		case PGT_OID_ASSOCIATE:
2856 			if (args->m_len < sizeof(struct pgt_obj_mlme))
2857 				break;
2858 			mlme = mtod(args, struct pgt_obj_mlme *);
2859 			if (sc->sc_debug & SC_DEBUG_TRAP)
2860 				DPRINTF(("%s: mlme: address "
2861 				    "%s id 0x%02x state 0x%02x code 0x%02x\n",
2862 				    sc->sc_dev.dv_xname,
2863 				    ether_sprintf(mlme->pom_address),
2864 				    letoh16(mlme->pom_id),
2865 				    letoh16(mlme->pom_state),
2866 				    letoh16(mlme->pom_code)));
2867 #ifndef IEEE80211_STA_ONLY
2868 			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2869 				pgt_hostap_handle_mlme(sc, oid, mlme);
2870 #endif
2871 			break;
2872 		}
2873 		return;
2874 	}
2875 	if (ic->ic_state == IEEE80211_S_SCAN) {
2876 		s = splnet();
2877 		error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2878 		splx(s);
2879 		if (error)
2880 			return;
2881 		DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2882 		    htole32(ls)));
2883 		if (ls != 0)
2884 			ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2885 	}
2886 
2887 gotlinkstate:
2888 	s = splnet();
2889 	if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2890 		goto out;
2891 	sc->sc_noise = letoh32(noise);
2892 	if (ic->ic_state == IEEE80211_S_RUN) {
2893 		if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2894 		    sizeof(channel)) != 0)
2895 			goto out;
2896 		channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
2897 		ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2898 		if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2899 		    sizeof(ic->ic_bss->ni_bssid)) != 0)
2900 			goto out;
2901 		IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
2902 		error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2903 		    sizeof(bss));
2904 		if (error == 0)
2905 			ic->ic_bss->ni_rssi = bss.pob_rssi;
2906 		else if (error != EPERM)
2907 			goto out;
2908 		error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2909 		if (error)
2910 			goto out;
2911 		ic->ic_bss->ni_esslen = min(ssid.pos_length,
2912 		    sizeof(ic->ic_bss->ni_essid));
2913 		memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
2914 		    ssid.pos_length);
2915 	}
2916 
2917 out:
2918 	splx(s);
2919 }
2920 
2921 int
2922 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2923 {
2924 	struct pgt_softc *sc = ic->ic_if.if_softc;
2925 	enum ieee80211_state ostate;
2926 
2927 	ostate = ic->ic_state;
2928 
2929 	DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2930 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2931 
2932 	switch (nstate) {
2933 	case IEEE80211_S_INIT:
2934 		if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2935 			ic->ic_if.if_timer = 0;
2936 		ic->ic_mgt_timer = 0;
2937 		ic->ic_flags &= ~IEEE80211_F_SIBSS;
2938 		ieee80211_free_allnodes(ic, 1);
2939 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
2940 		break;
2941 	case IEEE80211_S_SCAN:
2942 		ic->ic_if.if_timer = 1;
2943 		ic->ic_mgt_timer = 0;
2944 		ieee80211_node_cleanup(ic, ic->ic_bss);
2945 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
2946 #ifndef IEEE80211_STA_ONLY
2947 		/* Just use any old channel; we override it anyway. */
2948 		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2949 			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
2950 #endif
2951 		break;
2952 	case IEEE80211_S_RUN:
2953 		ic->ic_if.if_timer = 1;
2954 		break;
2955 	default:
2956 		break;
2957 	}
2958 
2959 	return (sc->sc_newstate(ic, nstate, arg));
2960 }
2961 
2962 int
2963 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
2964 {
2965 	int wokeup = 0;
2966 
2967 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2968 	    sc->sc_cbdmam->dm_mapsize,
2969 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
2970 	sc->sc_cb->pcb_device_curfrag[pq] =
2971 	    sc->sc_cb->pcb_driver_curfrag[pq];
2972 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2973 	    sc->sc_cbdmam->dm_mapsize,
2974 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
2975 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
2976 		struct pgt_desc *pd;
2977 
2978 		pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
2979 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
2980 		sc->sc_dirtyq_count[pq]--;
2981 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
2982 		sc->sc_freeq_count[pq]++;
2983 		pgt_unload_tx_desc_frag(sc, pd);
2984 		if (sc->sc_debug & SC_DEBUG_QUEUES)
2985 			DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
2986 			    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
2987 		wokeup++;
2988 		if (pgt_queue_is_data(pq))
2989 			sc->sc_ic.ic_if.if_oerrors++;
2990 	}
2991 
2992 	return (wokeup);
2993 }
2994 
2995 int
2996 pgt_dma_alloc(struct pgt_softc *sc)
2997 {
2998 	size_t size;
2999 	int i, error, nsegs;
3000 
3001 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
3002 		TAILQ_INIT(&sc->sc_freeq[i]);
3003 		TAILQ_INIT(&sc->sc_dirtyq[i]);
3004 	}
3005 
3006 	/*
3007 	 * control block
3008 	 */
3009 	size = sizeof(struct pgt_control_block);
3010 
3011 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3012 	    BUS_DMA_NOWAIT, &sc->sc_cbdmam);
3013 	if (error != 0) {
3014 		printf("%s: can not create DMA tag for control block\n",
3015 		    sc->sc_dev.dv_xname);
3016 		goto out;
3017 	}
3018 
3019 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3020 	    0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3021 	if (error != 0) {
3022 		printf("%s: can not allocate DMA memory for control block\n",
3023 		    sc->sc_dev.dv_xname);
3024 		goto out;
3025 	}
3026 
3027 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
3028 	    size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
3029 	if (error != 0) {
3030 		printf("%s: can not map DMA memory for control block\n",
3031 		    sc->sc_dev.dv_xname);
3032 		goto out;
3033 	}
3034 
3035 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
3036 	    sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
3037 	if (error != 0) {
3038 		printf("%s: can not load DMA map for control block\n",
3039 		    sc->sc_dev.dv_xname);
3040 		goto out;
3041 	}
3042 
3043 	/*
3044 	 * powersave
3045 	 */
3046 	size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
3047 
3048 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3049 	    BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
3050 	if (error != 0) {
3051 		printf("%s: can not create DMA tag for powersave\n",
3052 		    sc->sc_dev.dv_xname);
3053 		goto out;
3054 	}
3055 
3056 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3057 	   0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3058 	if (error != 0) {
3059 		printf("%s: can not allocate DMA memory for powersave\n",
3060 		    sc->sc_dev.dv_xname);
3061 		goto out;
3062 	}
3063 
3064 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
3065 	    size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
3066 	if (error != 0) {
3067 		printf("%s: can not map DMA memory for powersave\n",
3068 		    sc->sc_dev.dv_xname);
3069 		goto out;
3070 	}
3071 
3072 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
3073 	    sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
3074 	if (error != 0) {
3075 		printf("%s: can not load DMA map for powersave\n",
3076 		    sc->sc_dev.dv_xname);
3077 		goto out;
3078 	}
3079 
3080 	/*
3081 	 * fragments
3082 	 */
3083 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3084 	if (error != 0)
3085 		goto out;
3086 
3087 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3088 	if (error != 0)
3089 		goto out;
3090 
3091 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3092 	if (error != 0)
3093 		goto out;
3094 
3095 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3096 	if (error != 0)
3097 		goto out;
3098 
3099 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3100 	if (error != 0)
3101 		goto out;
3102 
3103 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3104 	if (error != 0)
3105 		goto out;
3106 
3107 out:
3108 	if (error) {
3109 		printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3110 		pgt_dma_free(sc);
3111 	}
3112 
3113 	return (error);
3114 }
3115 
3116 int
3117 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3118 {
3119 	struct pgt_desc *pd;
3120 	size_t i, qsize;
3121 	int error, nsegs;
3122 
3123 	switch (pq) {
3124 		case PGT_QUEUE_DATA_LOW_RX:
3125 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3126 			break;
3127 		case PGT_QUEUE_DATA_LOW_TX:
3128 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3129 			break;
3130 		case PGT_QUEUE_DATA_HIGH_RX:
3131 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3132 			break;
3133 		case PGT_QUEUE_DATA_HIGH_TX:
3134 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3135 			break;
3136 		case PGT_QUEUE_MGMT_RX:
3137 			qsize = PGT_QUEUE_MGMT_SIZE;
3138 			break;
3139 		case PGT_QUEUE_MGMT_TX:
3140 			qsize = PGT_QUEUE_MGMT_SIZE;
3141 			break;
3142 		default:
3143 			return (EINVAL);
3144 	}
3145 
3146 	for (i = 0; i < qsize; i++) {
3147 		pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3148 
3149 		error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
3150 		    PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
3151 		if (error != 0) {
3152 			printf("%s: can not create DMA tag for fragment\n",
3153 			    sc->sc_dev.dv_xname);
3154 			free(pd, M_DEVBUF, 0);
3155 			break;
3156 		}
3157 
3158 		error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
3159 		    0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
3160 		if (error != 0) {
3161 			printf("%s: error alloc frag %zu on queue %u\n",
3162 			    sc->sc_dev.dv_xname, i, pq);
3163 			free(pd, M_DEVBUF, 0);
3164 			break;
3165 		}
3166 
3167 		error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
3168 		    PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
3169 		if (error != 0) {
3170 			printf("%s: error map frag %zu on queue %u\n",
3171 			    sc->sc_dev.dv_xname, i, pq);
3172 			free(pd, M_DEVBUF, 0);
3173 			break;
3174 		}
3175 
3176 		if (pgt_queue_is_rx(pq)) {
3177 			error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
3178 			    pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
3179 			if (error != 0) {
3180 				printf("%s: error load frag %zu on queue %u\n",
3181 				    sc->sc_dev.dv_xname, i, pq);
3182 				bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
3183 				    nsegs);
3184 				free(pd, M_DEVBUF, 0);
3185 				break;
3186 			}
3187 			pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3188 		}
3189 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3190 	}
3191 
3192 	return (error);
3193 }
3194 
3195 void
3196 pgt_dma_free(struct pgt_softc *sc)
3197 {
3198 	/*
3199 	 * fragments
3200 	 */
3201 	if (sc->sc_dmat != NULL) {
3202 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3203 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3204 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3205 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3206 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3207 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3208 	}
3209 
3210 	/*
3211 	 * powersave
3212 	 */
3213 	if (sc->sc_psmbuf != NULL) {
3214 		bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
3215 		bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
3216 		sc->sc_psmbuf = NULL;
3217 		sc->sc_psmdmam = NULL;
3218 	}
3219 
3220 	/*
3221 	 * control block
3222 	 */
3223 	if (sc->sc_cb != NULL) {
3224 		bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
3225 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
3226 		sc->sc_cb = NULL;
3227 		sc->sc_cbdmam = NULL;
3228 	}
3229 }
3230 
3231 void
3232 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3233 {
3234 	struct pgt_desc	*pd;
3235 
3236 	while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
3237 		pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
3238 		TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
3239 		if (pd->pd_dmam != NULL) {
3240 			bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
3241 			pd->pd_dmam = NULL;
3242 		}
3243 		bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
3244 		free(pd, M_DEVBUF, 0);
3245 	}
3246 }
3247 
3248 int
3249 pgt_activate(struct device *self, int act)
3250 {
3251 	struct pgt_softc *sc = (struct pgt_softc *)self;
3252 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3253 
3254 	DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3255 
3256 	switch (act) {
3257 	case DVACT_SUSPEND:
3258 		if (ifp->if_flags & IFF_RUNNING) {
3259 			pgt_stop(sc, SC_NEEDS_RESET);
3260 			pgt_update_hw_from_sw(sc, 0);
3261 		}
3262 		if (sc->sc_power != NULL)
3263 			(*sc->sc_power)(sc, act);
3264 		break;
3265 	case DVACT_WAKEUP:
3266 		pgt_wakeup(sc);
3267 		break;
3268 	}
3269 	return 0;
3270 }
3271 
3272 void
3273 pgt_wakeup(struct pgt_softc *sc)
3274 {
3275 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3276 
3277 	if (sc->sc_power != NULL)
3278 		(*sc->sc_power)(sc, DVACT_RESUME);
3279 
3280 	pgt_stop(sc, SC_NEEDS_RESET);
3281 	pgt_update_hw_from_sw(sc, 0);
3282 
3283 	if (ifp->if_flags & IFF_UP) {
3284 		pgt_init(ifp);
3285 		pgt_update_hw_from_sw(sc, 0);
3286 	}
3287 }
3288