xref: /openbsd-src/sys/dev/ic/pgt.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: pgt.c,v 1.87 2016/04/13 10:49:26 mpi Exp $  */
2 
3 /*
4  * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22  * Copyright (c) 2004 Brian Fundakowski Feldman
23  * All rights reserved.
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  * 1. Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  * 2. Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in the
32  *    documentation and/or other materials provided with the distribution.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44  * SUCH DAMAGE.
45  */
46 
47 #include "bpfilter.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/mbuf.h>
55 #include <sys/endian.h>
56 #include <sys/sockio.h>
57 #include <sys/kthread.h>
58 #include <sys/time.h>
59 #include <sys/ioctl.h>
60 #include <sys/device.h>
61 
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 
65 #include <net/if.h>
66 #include <net/if_llc.h>
67 #include <net/if_media.h>
68 
69 #if NBPFILTER > 0
70 #include <net/bpf.h>
71 #endif
72 
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 
76 #include <net80211/ieee80211_var.h>
77 #include <net80211/ieee80211_radiotap.h>
78 
79 #include <dev/ic/pgtreg.h>
80 #include <dev/ic/pgtvar.h>
81 
82 #include <dev/ic/if_wireg.h>
83 #include <dev/ic/if_wi_ieee.h>
84 #include <dev/ic/if_wivar.h>
85 
86 #ifdef PGT_DEBUG
87 #define DPRINTF(x)	do { printf x; } while (0)
88 #else
89 #define DPRINTF(x)
90 #endif
91 
92 #define	SETOID(oid, var, size) {					\
93 	if (pgt_oid_set(sc, oid, var, size) != 0)			\
94 		break;							\
95 }
96 
97 /*
98  * This is a driver for the Intersil Prism family of 802.11g network cards,
99  * based upon version 1.2 of the Linux driver and firmware found at
100  * http://www.prism54.org/.
101  */
102 
103 #define SCAN_TIMEOUT			5	/* 5 seconds */
104 
105 struct cfdriver pgt_cd = {
106         NULL, "pgt", DV_IFNET
107 };
108 
109 void	 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
110 int	 pgt_media_change(struct ifnet *ifp);
111 void	 pgt_write_memory_barrier(struct pgt_softc *);
112 uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
113 void	 pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
114 void	 pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
115 void	 pgt_debug_events(struct pgt_softc *, const char *);
116 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
117 void	 pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
118 int	 pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
119 	     struct pgt_desc *);
120 void	 pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
121 int	 pgt_load_firmware(struct pgt_softc *);
122 void	 pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
123 	     struct pgt_frag *);
124 int	 pgt_reset(struct pgt_softc *);
125 void	 pgt_stop(struct pgt_softc *, unsigned int);
126 void	 pgt_reboot(struct pgt_softc *);
127 void	 pgt_init_intr(struct pgt_softc *);
128 void	 pgt_update_intr(struct pgt_softc *, int);
129 struct mbuf
130 	*pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
131 	     struct mbuf *, struct ieee80211_node **);
132 void	 pgt_input_frames(struct pgt_softc *, struct mbuf *);
133 void	 pgt_wakeup_intr(struct pgt_softc *);
134 void	 pgt_sleep_intr(struct pgt_softc *);
135 void	 pgt_empty_traps(struct pgt_softc_kthread *);
136 void	 pgt_per_device_kthread(void *);
137 void	 pgt_async_reset(struct pgt_softc *);
138 void	 pgt_async_update(struct pgt_softc *);
139 void	 pgt_txdone(struct pgt_softc *, enum pgt_queue);
140 void	 pgt_rxdone(struct pgt_softc *, enum pgt_queue);
141 void	 pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
142 void	 pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
143 struct mbuf
144 	*pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
145 int	 pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
146 int	 pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
147 int	 pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
148 void	 pgt_state_dump(struct pgt_softc *);
149 int	 pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
150 void	 pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
151 	     struct pgt_desc *, uint16_t, int);
152 void	 pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
153 struct ieee80211_node
154 	*pgt_ieee80211_node_alloc(struct ieee80211com *);
155 void	 pgt_ieee80211_newassoc(struct ieee80211com *,
156 	     struct ieee80211_node *, int);
157 void	 pgt_ieee80211_node_free(struct ieee80211com *,
158 	    struct ieee80211_node *);
159 void	 pgt_ieee80211_node_copy(struct ieee80211com *,
160 	     struct ieee80211_node *,
161 	     const struct ieee80211_node *);
162 int	 pgt_ieee80211_send_mgmt(struct ieee80211com *,
163 	     struct ieee80211_node *, int, int, int);
164 int	 pgt_net_attach(struct pgt_softc *);
165 void	 pgt_start(struct ifnet *);
166 int	 pgt_ioctl(struct ifnet *, u_long, caddr_t);
167 void	 pgt_obj_bss2scanres(struct pgt_softc *,
168 	     struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
169 void	 node_mark_active_ap(void *, struct ieee80211_node *);
170 void	 node_mark_active_adhoc(void *, struct ieee80211_node *);
171 void	 pgt_watchdog(struct ifnet *);
172 int	 pgt_init(struct ifnet *);
173 void	 pgt_update_hw_from_sw(struct pgt_softc *, int, int);
174 void	 pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
175 	     struct pgt_obj_mlme *);
176 void	 pgt_update_sw_from_hw(struct pgt_softc *,
177 	     struct pgt_async_trap *, struct mbuf *);
178 int	 pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
179 int	 pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
180 int	 pgt_dma_alloc(struct pgt_softc *);
181 int	 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
182 void	 pgt_dma_free(struct pgt_softc *);
183 void	 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
184 void	 pgt_wakeup(struct pgt_softc *);
185 
186 void
187 pgt_write_memory_barrier(struct pgt_softc *sc)
188 {
189 	bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
190 	    BUS_SPACE_BARRIER_WRITE);
191 }
192 
193 u_int32_t
194 pgt_read_4(struct pgt_softc *sc, uint16_t offset)
195 {
196 	return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
197 }
198 
199 void
200 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
201 {
202 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
203 }
204 
205 /*
206  * Write out 4 bytes and cause a PCI flush by reading back in on a
207  * harmless register.
208  */
209 void
210 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
211 {
212 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
213 	(void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
214 }
215 
216 /*
217  * Print the state of events in the queues from an interrupt or a trigger.
218  */
219 void
220 pgt_debug_events(struct pgt_softc *sc, const char *when)
221 {
222 #define	COUNT(i)							\
223 	letoh32(sc->sc_cb->pcb_driver_curfrag[i]) -			\
224 	letoh32(sc->sc_cb->pcb_device_curfrag[i])
225 	if (sc->sc_debug & SC_DEBUG_EVENTS)
226 		DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
227 		    sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
228 		    COUNT(3), COUNT(4), COUNT(5)));
229 #undef COUNT
230 }
231 
232 uint32_t
233 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
234 {
235 	return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
236 	    letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
237 }
238 
239 void
240 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
241 {
242 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
243 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
244 	pd->pd_fragp->pf_flags = 0;
245 
246 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
247 	    BUS_DMASYNC_POSTWRITE);
248 }
249 
250 int
251 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
252     struct pgt_desc *pd)
253 {
254 	int error;
255 
256 	error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
257 	    PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
258 	if (error) {
259 		DPRINTF(("%s: unable to load %s tx DMA: %d\n",
260 		    sc->sc_dev.dv_xname,
261 		    pgt_queue_is_data(pq) ? "data" : "mgmt", error));
262 		return (error);
263 	}
264 	pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
265 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
266 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
267 	pd->pd_fragp->pf_flags = htole16(0);
268 
269 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
270 	    BUS_DMASYNC_POSTWRITE);
271 
272 	return (0);
273 }
274 
275 void
276 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
277 {
278         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
279 	pd->pd_dmaaddr = 0;
280 }
281 
282 int
283 pgt_load_firmware(struct pgt_softc *sc)
284 {
285 	int error, reg, dirreg, fwoff, ucodeoff, fwlen;
286 	uint8_t *ucode;
287 	uint32_t *uc;
288 	size_t size;
289 	char *name;
290 
291 	if (sc->sc_flags & SC_ISL3877)
292 		name = "pgt-isl3877";
293 	else
294 		name = "pgt-isl3890";	/* includes isl3880 */
295 
296 	error = loadfirmware(name, &ucode, &size);
297 
298 	if (error != 0) {
299 		DPRINTF(("%s: error %d, could not read firmware %s\n",
300 		    sc->sc_dev.dv_xname, error, name));
301 		return (EIO);
302 	}
303 
304 	if (size & 3) {
305 		DPRINTF(("%s: bad firmware size %u\n",
306 		    sc->sc_dev.dv_xname, size));
307 		free(ucode, M_DEVBUF, 0);
308 		return (EINVAL);
309 	}
310 
311 	pgt_reboot(sc);
312 
313 	fwoff = 0;
314 	ucodeoff = 0;
315 	uc = (uint32_t *)ucode;
316 	reg = PGT_FIRMWARE_INTERNAL_OFFSET;
317 	while (fwoff < size) {
318 		pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
319 
320 		if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
321 			fwlen = PGT_DIRECT_MEMORY_SIZE;
322 		else
323 			fwlen = size - fwoff;
324 
325 		dirreg = PGT_DIRECT_MEMORY_OFFSET;
326 		while (fwlen > 4) {
327 			pgt_write_4(sc, dirreg, uc[ucodeoff]);
328 			fwoff += 4;
329 			dirreg += 4;
330 			reg += 4;
331 			fwlen -= 4;
332 			ucodeoff++;
333 		}
334 		pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
335 		fwoff += 4;
336 		dirreg += 4;
337 		reg += 4;
338 		fwlen -= 4;
339 		ucodeoff++;
340 	}
341 	DPRINTF(("%s: %d bytes microcode loaded from %s\n",
342 	    sc->sc_dev.dv_xname, fwoff, name));
343 
344 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
345 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
346 	reg |= PGT_CTRL_STAT_RAMBOOT;
347 	pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
348 	pgt_write_memory_barrier(sc);
349 	DELAY(PGT_WRITEIO_DELAY);
350 
351 	reg |= PGT_CTRL_STAT_RESET;
352 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
353 	pgt_write_memory_barrier(sc);
354 	DELAY(PGT_WRITEIO_DELAY);
355 
356 	reg &= ~PGT_CTRL_STAT_RESET;
357 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
358 	pgt_write_memory_barrier(sc);
359 	DELAY(PGT_WRITEIO_DELAY);
360 
361 	free(ucode, M_DEVBUF, 0);
362 
363 	return (0);
364 }
365 
366 void
367 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
368     struct pgt_frag *pqfrags)
369 {
370 	struct pgt_desc *pd;
371 	unsigned int i;
372 
373 	sc->sc_cb->pcb_device_curfrag[pq] = 0;
374 	i = 0;
375 	/* XXX why only freeq ??? */
376 	TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
377 		pd->pd_fragnum = i;
378 		pd->pd_fragp = &pqfrags[i];
379 		if (pgt_queue_is_rx(pq))
380 			pgt_reinit_rx_desc_frag(sc, pd);
381 		i++;
382 	}
383 	sc->sc_freeq_count[pq] = i;
384 	/*
385 	 * The ring buffer describes how many free buffers are available from
386 	 * the host (for receive queues) or how many are pending (for
387 	 * transmit queues).
388 	 */
389 	if (pgt_queue_is_rx(pq))
390 		sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
391 	else
392 		sc->sc_cb->pcb_driver_curfrag[pq] = 0;
393 }
394 
395 /*
396  * Turn off interrupts, reset the device (possibly loading firmware),
397  * and put everything in a known state.
398  */
399 int
400 pgt_reset(struct pgt_softc *sc)
401 {
402 	int error;
403 
404 	/* disable all interrupts */
405 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
406 	DELAY(PGT_WRITEIO_DELAY);
407 
408 	/*
409 	 * Set up the management receive queue, assuming there are no
410 	 * requests in progress.
411 	 */
412 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
413 	    sc->sc_cbdmam->dm_mapsize,
414 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
415 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
416 	    &sc->sc_cb->pcb_data_low_rx[0]);
417 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
418 	    &sc->sc_cb->pcb_data_low_tx[0]);
419 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
420 	    &sc->sc_cb->pcb_data_high_rx[0]);
421 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
422 	    &sc->sc_cb->pcb_data_high_tx[0]);
423 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
424 	    &sc->sc_cb->pcb_mgmt_rx[0]);
425 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
426 	    &sc->sc_cb->pcb_mgmt_tx[0]);
427 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
428 	    sc->sc_cbdmam->dm_mapsize,
429 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
430 
431 	/* load firmware */
432 	if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
433 		error = pgt_load_firmware(sc);
434 		if (error) {
435 			printf("%s: firmware load failed\n",
436 			    sc->sc_dev.dv_xname);
437 			return (error);
438 		}
439 		sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
440 		DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
441 	}
442 
443 	/* upload the control block's DMA address */
444 	pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
445 	    htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
446 	DELAY(PGT_WRITEIO_DELAY);
447 
448 	/* send a reset event */
449 	pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
450 	DELAY(PGT_WRITEIO_DELAY);
451 
452 	/* await only the initialization interrupt */
453 	pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);
454 	DELAY(PGT_WRITEIO_DELAY);
455 
456 	return (0);
457 }
458 
459 /*
460  * If we're trying to reset and the device has seemingly not been detached,
461  * we'll spend a minute seeing if we can't do the reset.
462  */
463 void
464 pgt_stop(struct pgt_softc *sc, unsigned int flag)
465 {
466 	struct ieee80211com *ic;
467 	unsigned int wokeup;
468 	int tryagain = 0;
469 
470 	ic = &sc->sc_ic;
471 
472 	ic->ic_if.if_flags &= ~IFF_RUNNING;
473 	sc->sc_flags |= SC_UNINITIALIZED;
474 	sc->sc_flags |= flag;
475 
476 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
477 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
478 	pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
479 
480 trying_again:
481 	/* disable all interrupts */
482 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
483 	DELAY(PGT_WRITEIO_DELAY);
484 
485 	/* reboot card */
486 	pgt_reboot(sc);
487 
488 	do {
489 		wokeup = 0;
490 		/*
491 		 * We don't expect to be woken up, just to drop the lock
492 		 * and time out.  Only tx queues can have anything valid
493 		 * on them outside of an interrupt.
494 		 */
495 		while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
496 			struct pgt_mgmt_desc *pmd;
497 
498 			pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
499 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
500 			pmd->pmd_error = ENETRESET;
501 			wakeup_one(pmd);
502 			if (sc->sc_debug & SC_DEBUG_MGMT)
503 				DPRINTF(("%s: queue: mgmt %p <- %#x "
504 				    "(drained)\n", sc->sc_dev.dv_xname,
505 				    pmd, pmd->pmd_oid));
506 			wokeup++;
507 		}
508 		if (wokeup > 0) {
509 			if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
510 				sc->sc_flags &= ~flag;
511 				return;
512 			}
513 		}
514 	} while (wokeup > 0);
515 
516 	if (flag == SC_NEEDS_RESET) {
517 		int error;
518 
519 		DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
520 		sc->sc_flags &= ~SC_POWERSAVE;
521 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
522 		error = pgt_reset(sc);
523 		if (error == 0) {
524 			tsleep(&sc->sc_flags, 0, "pgtres", hz);
525 			if (sc->sc_flags & SC_UNINITIALIZED) {
526 				printf("%s: not responding\n",
527 				    sc->sc_dev.dv_xname);
528 				/* Thud.  It was probably removed. */
529 				if (tryagain)
530 					panic("pgt went for lunch"); /* XXX */
531 				tryagain = 1;
532 			} else {
533 				/* await all interrupts */
534 				pgt_write_4_flush(sc, PGT_REG_INT_EN,
535 				    PGT_INT_STAT_SOURCES);
536 				DELAY(PGT_WRITEIO_DELAY);
537 				ic->ic_if.if_flags |= IFF_RUNNING;
538 			}
539 		}
540 
541 		if (tryagain)
542 			goto trying_again;
543 
544 		sc->sc_flags &= ~flag;
545 		if (ic->ic_if.if_flags & IFF_RUNNING)
546 			pgt_update_hw_from_sw(sc,
547 			    ic->ic_state != IEEE80211_S_INIT,
548 			    ic->ic_opmode != IEEE80211_M_MONITOR);
549 	}
550 
551 	ic->ic_if.if_flags &= ~IFF_RUNNING;
552 	ifq_clr_oactive(&ic->ic_if.if_snd);
553 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
554 }
555 
556 void
557 pgt_attach(struct device *self)
558 {
559 	struct pgt_softc *sc = (struct pgt_softc *)self;
560 	int error;
561 
562 	/* debug flags */
563 	//sc->sc_debug |= SC_DEBUG_QUEUES;	/* super verbose */
564 	//sc->sc_debug |= SC_DEBUG_MGMT;
565 	sc->sc_debug |= SC_DEBUG_UNEXPECTED;
566 	//sc->sc_debug |= SC_DEBUG_TRIGGER;	/* verbose */
567 	//sc->sc_debug |= SC_DEBUG_EVENTS;	/* super verbose */
568 	//sc->sc_debug |= SC_DEBUG_POWER;
569 	sc->sc_debug |= SC_DEBUG_TRAP;
570 	sc->sc_debug |= SC_DEBUG_LINK;
571 	//sc->sc_debug |= SC_DEBUG_RXANNEX;
572 	//sc->sc_debug |= SC_DEBUG_RXFRAG;
573 	//sc->sc_debug |= SC_DEBUG_RXETHER;
574 
575 	/* enable card if possible */
576 	if (sc->sc_enable != NULL)
577 		(*sc->sc_enable)(sc);
578 
579 	error = pgt_dma_alloc(sc);
580 	if (error)
581 		return;
582 
583 	sc->sc_ic.ic_if.if_softc = sc;
584 	TAILQ_INIT(&sc->sc_mgmtinprog);
585 	TAILQ_INIT(&sc->sc_kthread.sck_traps);
586 	sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
587 	sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
588 
589 	error = pgt_reset(sc);
590 	if (error)
591 		return;
592 
593 	tsleep(&sc->sc_flags, 0, "pgtres", hz);
594 	if (sc->sc_flags & SC_UNINITIALIZED) {
595 		printf("%s: not responding\n", sc->sc_dev.dv_xname);
596 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
597 		return;
598 	} else {
599 		/* await all interrupts */
600 		pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
601 		DELAY(PGT_WRITEIO_DELAY);
602 	}
603 
604 	error = pgt_net_attach(sc);
605 	if (error)
606 		return;
607 
608 	if (kthread_create(pgt_per_device_kthread, sc, NULL,
609 	    sc->sc_dev.dv_xname) != 0)
610 		return;
611 
612 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
613 }
614 
615 int
616 pgt_detach(struct pgt_softc *sc)
617 {
618 	if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
619 		/* device was not initialized correctly, so leave early */
620 		goto out;
621 
622 	/* stop card */
623 	pgt_stop(sc, SC_DYING);
624 	pgt_reboot(sc);
625 
626 	ieee80211_ifdetach(&sc->sc_ic.ic_if);
627 	if_detach(&sc->sc_ic.ic_if);
628 
629 out:
630 	/* disable card if possible */
631 	if (sc->sc_disable != NULL)
632 		(*sc->sc_disable)(sc);
633 
634 	pgt_dma_free(sc);
635 
636 	return (0);
637 }
638 
639 void
640 pgt_reboot(struct pgt_softc *sc)
641 {
642 	uint32_t reg;
643 
644 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
645 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
646 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
647 	pgt_write_memory_barrier(sc);
648 	DELAY(PGT_WRITEIO_DELAY);
649 
650 	reg |= PGT_CTRL_STAT_RESET;
651 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
652 	pgt_write_memory_barrier(sc);
653 	DELAY(PGT_WRITEIO_DELAY);
654 
655 	reg &= ~PGT_CTRL_STAT_RESET;
656 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
657 	pgt_write_memory_barrier(sc);
658 	DELAY(PGT_RESET_DELAY);
659 }
660 
661 void
662 pgt_init_intr(struct pgt_softc *sc)
663 {
664 	if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
665 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
666 			DPRINTF(("%s: spurious initialization\n",
667 			    sc->sc_dev.dv_xname));
668 	} else {
669 		sc->sc_flags &= ~SC_UNINITIALIZED;
670 		wakeup(&sc->sc_flags);
671 	}
672 }
673 
674 /*
675  * If called with a NULL last_nextpkt, only the mgmt queue will be checked
676  * for new packets.
677  */
678 void
679 pgt_update_intr(struct pgt_softc *sc, int hack)
680 {
681 	/* priority order */
682 	enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
683 	    PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
684 	    PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
685 	    PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
686 	};
687 	struct mbuf *m;
688 	uint32_t npend;
689 	unsigned int dirtycount;
690 	int i;
691 
692 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
693 	    sc->sc_cbdmam->dm_mapsize,
694 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
695 	pgt_debug_events(sc, "intr");
696 	/*
697 	 * Check for completion of tx in their dirty queues.
698 	 * Check completion of rx into their dirty queues.
699 	 */
700 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
701 		size_t qdirty, qfree;
702 
703 		qdirty = sc->sc_dirtyq_count[pqs[i]];
704 		qfree = sc->sc_freeq_count[pqs[i]];
705 		/*
706 		 * We want the wrap-around here.
707 		 */
708 		if (pgt_queue_is_rx(pqs[i])) {
709 			int data;
710 
711 			data = pgt_queue_is_data(pqs[i]);
712 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
713 			if (hack && data)
714 				continue;
715 #endif
716 			npend = pgt_queue_frags_pending(sc, pqs[i]);
717 			/*
718 			 * Receive queues clean up below, so qdirty must
719 			 * always be 0.
720 			 */
721 			if (npend > qfree) {
722 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
723 					DPRINTF(("%s: rx queue [%u] "
724 					    "overflowed by %u\n",
725 					    sc->sc_dev.dv_xname, pqs[i],
726 					    npend - qfree));
727 				sc->sc_flags |= SC_INTR_RESET;
728 				break;
729 			}
730 			while (qfree-- > npend)
731 				pgt_rxdone(sc, pqs[i]);
732 		} else {
733 			npend = pgt_queue_frags_pending(sc, pqs[i]);
734 			if (npend > qdirty) {
735 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
736 					DPRINTF(("%s: tx queue [%u] "
737 					    "underflowed by %u\n",
738 					    sc->sc_dev.dv_xname, pqs[i],
739 					    npend - qdirty));
740 				sc->sc_flags |= SC_INTR_RESET;
741 				break;
742 			}
743 			/*
744 			 * If the free queue was empty, or the data transmit
745 			 * queue just became empty, wake up any waiters.
746 			 */
747 			if (qdirty > npend) {
748 				if (pgt_queue_is_data(pqs[i])) {
749 					sc->sc_ic.ic_if.if_timer = 0;
750 					ifq_clr_oactive(
751 					    &sc->sc_ic.ic_if.if_snd);
752 				}
753 				while (qdirty-- > npend)
754 					pgt_txdone(sc, pqs[i]);
755 			}
756 		}
757 	}
758 
759 	/*
760 	 * This is the deferred completion for received management frames
761 	 * and where we queue network frames for stack input.
762 	 */
763 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
764 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
765 		struct pgt_mgmt_desc *pmd;
766 
767 		pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
768 		/*
769 		 * If there is no mgmt request in progress or the operation
770 		 * returned is explicitly a trap, this pmd will essentially
771 		 * be ignored.
772 		 */
773 		pgt_mgmtrx_completion(sc, pmd);
774 	}
775 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
776 	    htole32(dirtycount +
777 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
778 
779 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
780 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
781 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
782 			pgt_input_frames(sc, m);
783 	}
784 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
785 	    htole32(dirtycount +
786 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
787 
788 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
789 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
790 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
791 			pgt_input_frames(sc, m);
792 	}
793 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
794 	    htole32(dirtycount +
795 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
796 
797 	/*
798 	 * Write out what we've finished with.
799 	 */
800 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
801 	    sc->sc_cbdmam->dm_mapsize,
802 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
803 }
804 
805 struct mbuf *
806 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
807     struct mbuf *m, struct ieee80211_node **ni)
808 {
809 	struct ieee80211com *ic;
810 	struct ieee80211_frame *frame;
811 	struct llc *snap;
812 
813 	ic = &sc->sc_ic;
814 	if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
815 		*ni = ieee80211_ref_node(ic->ic_bss);
816 		(*ni)->ni_inact = 0;
817 		return (m);
818 	}
819 
820 	M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
821 	if (m == NULL)
822 		return (m);
823 	if (m->m_len < sizeof(*frame) + sizeof(*snap)) {
824 		m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
825 		if (m == NULL)
826 			return (m);
827 	}
828 	frame = mtod(m, struct ieee80211_frame *);
829 	snap = (struct llc *)&frame[1];
830 	if (ni != NULL) {
831 		if (ic->ic_opmode == IEEE80211_M_STA) {
832 			*ni = ieee80211_ref_node(ic->ic_bss);
833 		}
834 #ifndef IEEE80211_STA_ONLY
835 		else {
836 			*ni = ieee80211_find_node(ic, eh->ether_shost);
837 			/*
838 			 * Make up associations for ad-hoc mode.  To support
839 			 * ad-hoc WPA, we'll need to maintain a bounded
840 			 * pool of ad-hoc stations.
841 			 */
842 			if (*ni == NULL &&
843 			    ic->ic_opmode != IEEE80211_M_HOSTAP) {
844 				*ni = ieee80211_dup_bss(ic, eh->ether_shost);
845 				if (*ni != NULL) {
846 					(*ni)->ni_associd = 1;
847 					ic->ic_newassoc(ic, *ni, 1);
848 				}
849 			}
850 			if (*ni == NULL) {
851 				m_freem(m);
852 				return (NULL);
853 			}
854 		}
855 #endif
856 		(*ni)->ni_inact = 0;
857 	}
858 	snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
859 	snap->llc_control = LLC_UI;
860 	snap->llc_snap.org_code[0] = 0;
861 	snap->llc_snap.org_code[1] = 0;
862 	snap->llc_snap.org_code[2] = 0;
863 	snap->llc_snap.ether_type = eh->ether_type;
864 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
865 	/* Doesn't look like much of the 802.11 header is available. */
866 	*(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
867 	/*
868 	 * Translate the addresses; WDS is not handled.
869 	 */
870 	switch (ic->ic_opmode) {
871 	case IEEE80211_M_STA:
872 		frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
873 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
874 		IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
875 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
876 		break;
877 #ifndef IEEE80211_STA_ONLY
878 	case IEEE80211_M_IBSS:
879 	case IEEE80211_M_AHDEMO:
880 		frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
881 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
882 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
883 		IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
884 		break;
885 	case IEEE80211_M_HOSTAP:
886 		/* HostAP forwarding defaults to being done on firmware. */
887 		frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
888 		IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
889 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
890 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
891 		break;
892 #endif
893 	default:
894 		break;
895 	}
896 	return (m);
897 }
898 
899 void
900 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
901 {
902 	struct ether_header eh;
903 	struct ifnet *ifp;
904 	struct ieee80211_channel *chan;
905 	struct ieee80211_rxinfo rxi;
906 	struct ieee80211_node *ni;
907 	struct ieee80211com *ic;
908 	struct pgt_rx_annex *pra;
909 	struct pgt_rx_header *pha;
910 	struct mbuf *next;
911 	unsigned int n;
912 	uint32_t rstamp;
913 	uint8_t rssi;
914 
915 	ic = &sc->sc_ic;
916 	ifp = &ic->ic_if;
917 	for (next = m; m != NULL; m = next) {
918 		next = m->m_nextpkt;
919 		m->m_nextpkt = NULL;
920 
921 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
922 			if (m->m_len < sizeof(*pha)) {
923 				m = m_pullup(m, sizeof(*pha));
924 				if (m == NULL) {
925 					if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
926 						DPRINTF(("%s: m_pullup "
927 						    "failure\n",
928 						    sc->sc_dev.dv_xname));
929 					ifp->if_ierrors++;
930 					continue;
931 				}
932 			}
933 			pha = mtod(m, struct pgt_rx_header *);
934 			pra = NULL;
935 			goto input;
936 		}
937 
938 		if (m->m_len < sizeof(*pra)) {
939 			m = m_pullup(m, sizeof(*pra));
940 			if (m == NULL) {
941 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
942 					DPRINTF(("%s: m_pullup failure\n",
943 					    sc->sc_dev.dv_xname));
944 				ifp->if_ierrors++;
945 				continue;
946 			}
947 		}
948 		pra = mtod(m, struct pgt_rx_annex *);
949 		pha = &pra->pra_header;
950 		if (sc->sc_debug & SC_DEBUG_RXANNEX)
951 			DPRINTF(("%s: rx annex: ? %04x "
952 			    "len %u clock %u flags %02x ? %02x rate %u ? %02x "
953 			    "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
954 			    sc->sc_dev.dv_xname,
955 			    letoh16(pha->pra_unknown0),
956 			    letoh16(pha->pra_length),
957 			    letoh32(pha->pra_clock), pha->pra_flags,
958 			    pha->pra_unknown1, pha->pra_rate,
959 			    pha->pra_unknown2, letoh32(pha->pra_frequency),
960 			    pha->pra_unknown3, pha->pra_rssi,
961 			    pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
962 		if (sc->sc_debug & SC_DEBUG_RXETHER)
963 			DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
964 			    sc->sc_dev.dv_xname,
965 			    ether_sprintf(pra->pra_ether_dhost),
966 			    ether_sprintf(pra->pra_ether_shost),
967 			    ntohs(pra->pra_ether_type)));
968 
969 		memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
970 		memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
971 		eh.ether_type = pra->pra_ether_type;
972 
973 input:
974 		/*
975 		 * This flag is set if e.g. packet could not be decrypted.
976 		 */
977 		if (pha->pra_flags & PRA_FLAG_BAD) {
978 			ifp->if_ierrors++;
979 			m_freem(m);
980 			continue;
981 		}
982 
983 		/*
984 		 * After getting what we want, chop off the annex, then
985 		 * turn into something that looks like it really was
986 		 * 802.11.
987 		 */
988 		rssi = pha->pra_rssi;
989 		rstamp = letoh32(pha->pra_clock);
990 		n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
991 		if (n <= IEEE80211_CHAN_MAX)
992 			chan = &ic->ic_channels[n];
993 		else
994 			chan = ic->ic_bss->ni_chan;
995 		/* Send to 802.3 listeners. */
996 		if (pra) {
997 			m_adj(m, sizeof(*pra));
998 		} else
999 			m_adj(m, sizeof(*pha));
1000 
1001 		m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1002 		if (m != NULL) {
1003 #if NBPFILTER > 0
1004 			if (sc->sc_drvbpf != NULL) {
1005 				struct mbuf mb;
1006 				struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
1007 
1008 				tap->wr_flags = 0;
1009 				tap->wr_chan_freq = htole16(chan->ic_freq);
1010 				tap->wr_chan_flags = htole16(chan->ic_flags);
1011 				tap->wr_rssi = rssi;
1012 				tap->wr_max_rssi = ic->ic_max_rssi;
1013 
1014 				mb.m_data = (caddr_t)tap;
1015 				mb.m_len = sc->sc_rxtap_len;
1016 				mb.m_next = m;
1017 				mb.m_nextpkt = NULL;
1018 				mb.m_type = 0;
1019 				mb.m_flags = 0;
1020 				bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
1021 			}
1022 #endif
1023 			rxi.rxi_flags = 0;
1024 			ni->ni_rssi = rxi.rxi_rssi = rssi;
1025 			ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1026 			ieee80211_input(ifp, m, ni, &rxi);
1027 			/*
1028 			 * The frame may have caused the node to be marked for
1029 			 * reclamation (e.g. in response to a DEAUTH message)
1030 			 * so use free_node here instead of unref_node.
1031 			 */
1032 			if (ni == ic->ic_bss)
1033 				ieee80211_unref_node(&ni);
1034 			else
1035 				ieee80211_release_node(&sc->sc_ic, ni);
1036 		} else {
1037 			ifp->if_ierrors++;
1038 		}
1039 	}
1040 }
1041 
1042 void
1043 pgt_wakeup_intr(struct pgt_softc *sc)
1044 {
1045 	int shouldupdate;
1046 	int i;
1047 
1048 	shouldupdate = 0;
1049 	/* Check for any queues being empty before updating. */
1050 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1051 	    sc->sc_cbdmam->dm_mapsize,
1052 	    BUS_DMASYNC_POSTREAD);
1053 	for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
1054 		if (pgt_queue_is_tx(i))
1055 			shouldupdate = pgt_queue_frags_pending(sc, i);
1056 		else
1057 			shouldupdate = pgt_queue_frags_pending(sc, i) <
1058 			    sc->sc_freeq_count[i];
1059 	}
1060 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1061 		shouldupdate = 1;
1062 	if (sc->sc_debug & SC_DEBUG_POWER)
1063 		DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1064 		    sc->sc_dev.dv_xname, shouldupdate));
1065 	sc->sc_flags &= ~SC_POWERSAVE;
1066 	if (shouldupdate) {
1067 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1068 		DELAY(PGT_WRITEIO_DELAY);
1069 	}
1070 }
1071 
1072 void
1073 pgt_sleep_intr(struct pgt_softc *sc)
1074 {
1075 	int allowed;
1076 	int i;
1077 
1078 	allowed = 1;
1079 	/* Check for any queues not being empty before allowing. */
1080 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1081 	    sc->sc_cbdmam->dm_mapsize,
1082 	    BUS_DMASYNC_POSTREAD);
1083 	for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
1084 		if (pgt_queue_is_tx(i))
1085 			allowed = pgt_queue_frags_pending(sc, i) == 0;
1086 		else
1087 			allowed = pgt_queue_frags_pending(sc, i) >=
1088 			    sc->sc_freeq_count[i];
1089 	}
1090 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1091 		allowed = 0;
1092 	if (sc->sc_debug & SC_DEBUG_POWER)
1093 		DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1094 		    sc->sc_dev.dv_xname, allowed));
1095 	if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
1096 		sc->sc_flags |= SC_POWERSAVE;
1097 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
1098 		DELAY(PGT_WRITEIO_DELAY);
1099 	}
1100 }
1101 
1102 void
1103 pgt_empty_traps(struct pgt_softc_kthread *sck)
1104 {
1105 	struct pgt_async_trap *pa;
1106 	struct mbuf *m;
1107 
1108 	while (!TAILQ_EMPTY(&sck->sck_traps)) {
1109 		pa = TAILQ_FIRST(&sck->sck_traps);
1110 		TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1111 		m = pa->pa_mbuf;
1112 		m_freem(m);
1113 	}
1114 }
1115 
1116 void
1117 pgt_per_device_kthread(void *argp)
1118 {
1119 	struct pgt_softc *sc;
1120 	struct pgt_softc_kthread *sck;
1121 	struct pgt_async_trap *pa;
1122 	struct mbuf *m;
1123 	int s;
1124 
1125 	sc = argp;
1126 	sck = &sc->sc_kthread;
1127 	while (!sck->sck_exit) {
1128 		if (!sck->sck_update && !sck->sck_reset &&
1129 		    TAILQ_EMPTY(&sck->sck_traps))
1130 			tsleep(&sc->sc_kthread, 0, "pgtkth", 0);
1131 		if (sck->sck_reset) {
1132 			DPRINTF(("%s: [thread] async reset\n",
1133 			    sc->sc_dev.dv_xname));
1134 			sck->sck_reset = 0;
1135 			sck->sck_update = 0;
1136 			pgt_empty_traps(sck);
1137 			s = splnet();
1138 			pgt_stop(sc, SC_NEEDS_RESET);
1139 			splx(s);
1140 		} else if (!TAILQ_EMPTY(&sck->sck_traps)) {
1141 			DPRINTF(("%s: [thread] got a trap\n",
1142 			    sc->sc_dev.dv_xname));
1143 			pa = TAILQ_FIRST(&sck->sck_traps);
1144 			TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1145 			m = pa->pa_mbuf;
1146 			m_adj(m, sizeof(*pa));
1147 			pgt_update_sw_from_hw(sc, pa, m);
1148 			m_freem(m);
1149 		} else if (sck->sck_update) {
1150 			sck->sck_update = 0;
1151 			pgt_update_sw_from_hw(sc, NULL, NULL);
1152 		}
1153 	}
1154 	pgt_empty_traps(sck);
1155 	kthread_exit(0);
1156 }
1157 
1158 void
1159 pgt_async_reset(struct pgt_softc *sc)
1160 {
1161 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1162 		return;
1163 	sc->sc_kthread.sck_reset = 1;
1164 	wakeup(&sc->sc_kthread);
1165 }
1166 
1167 void
1168 pgt_async_update(struct pgt_softc *sc)
1169 {
1170 	if (sc->sc_flags & SC_DYING)
1171 		return;
1172 	sc->sc_kthread.sck_update = 1;
1173 	wakeup(&sc->sc_kthread);
1174 }
1175 
1176 int
1177 pgt_intr(void *arg)
1178 {
1179 	struct pgt_softc *sc;
1180 	struct ifnet *ifp;
1181 	u_int32_t reg;
1182 
1183 	sc = arg;
1184 	ifp = &sc->sc_ic.ic_if;
1185 
1186 	/*
1187 	 * Here the Linux driver ands in the value of the INT_EN register,
1188 	 * and masks off everything but the documented interrupt bits.  Why?
1189 	 *
1190 	 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1191 	 * other times.
1192 	 */
1193 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
1194 	    sc->sc_flags & SC_POWERSAVE) {
1195 		/*
1196 		 * Don't try handling the interrupt in sleep mode.
1197 		 */
1198 		reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1199 		if (reg & PGT_CTRL_STAT_SLEEPMODE)
1200 			return (0);
1201 	}
1202 	reg = pgt_read_4(sc, PGT_REG_INT_STAT);
1203 	if (reg == 0)
1204 		return (0); /* This interrupt is not from us */
1205 
1206 	pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
1207 	if (reg & PGT_INT_STAT_INIT)
1208 		pgt_init_intr(sc);
1209 	if (reg & PGT_INT_STAT_UPDATE) {
1210 		pgt_update_intr(sc, 0);
1211 		/*
1212 		 * If we got an update, it's not really asleep.
1213 		 */
1214 		sc->sc_flags &= ~SC_POWERSAVE;
1215 		/*
1216 		 * Pretend I have any idea what the documentation
1217 		 * would say, and just give it a shot sending an
1218 		 * "update" after acknowledging the interrupt
1219 		 * bits and writing out the new control block.
1220 		 */
1221 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1222 		DELAY(PGT_WRITEIO_DELAY);
1223 	}
1224 	if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
1225 		pgt_sleep_intr(sc);
1226 	if (reg & PGT_INT_STAT_WAKEUP)
1227 		pgt_wakeup_intr(sc);
1228 
1229 	if (sc->sc_flags & SC_INTR_RESET) {
1230 		sc->sc_flags &= ~SC_INTR_RESET;
1231 		pgt_async_reset(sc);
1232 	}
1233 
1234 	if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
1235 		DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1236 		    sc->sc_dev.dv_xname,
1237 		    reg & ~PGT_INT_STAT_SOURCES,
1238 		    pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1239 	}
1240 
1241 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1242 		pgt_start(ifp);
1243 
1244 	return (1);
1245 }
1246 
1247 void
1248 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1249 {
1250 	struct pgt_desc *pd;
1251 
1252 	pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
1253 	TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1254 	sc->sc_dirtyq_count[pq]--;
1255 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1256 	sc->sc_freeq_count[pq]++;
1257 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1258 	    pd->pd_dmam->dm_mapsize,
1259 	    BUS_DMASYNC_POSTREAD);
1260 	/* Management frames want completion information. */
1261 	if (sc->sc_debug & SC_DEBUG_QUEUES) {
1262 		DPRINTF(("%s: queue: tx %u <- [%u]\n",
1263 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1264 		if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
1265 			struct pgt_mgmt_frame *pmf;
1266 
1267 			pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1268 			DPRINTF(("%s: queue: txmgmt %p <- "
1269 			    "(ver %u, op %u, flags %#x)\n",
1270 			    sc->sc_dev.dv_xname,
1271 			    pd, pmf->pmf_version, pmf->pmf_operation,
1272 			    pmf->pmf_flags));
1273 		}
1274 	}
1275 	pgt_unload_tx_desc_frag(sc, pd);
1276 }
1277 
1278 void
1279 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1280 {
1281 	struct pgt_desc *pd;
1282 
1283 	pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
1284 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1285 	sc->sc_freeq_count[pq]--;
1286 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1287 	sc->sc_dirtyq_count[pq]++;
1288 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1289 	    pd->pd_dmam->dm_mapsize,
1290 	    BUS_DMASYNC_POSTREAD);
1291 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1292 		DPRINTF(("%s: queue: rx %u <- [%u]\n",
1293 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1294 	if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
1295 	    pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
1296 		DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1297 		    sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1298 }
1299 
1300 /*
1301  * Traps are generally used for the firmware to report changes in state
1302  * back to the host.  Mostly this processes changes in link state, but
1303  * it needs to also be used to initiate WPA and other authentication
1304  * schemes in terms of client (station) or server (access point).
1305  */
1306 void
1307 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1308     size_t size)
1309 {
1310 	struct pgt_async_trap *pa;
1311 	struct mbuf *m;
1312 	char *p;
1313 	size_t total;
1314 
1315 	if (sc->sc_flags & SC_DYING)
1316 		return;
1317 
1318 	total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1319 	if (total > MLEN) {
1320 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1321 		if (m == NULL)
1322 			return;
1323 		MCLGET(m, M_DONTWAIT);
1324 		if (!(m->m_flags & M_EXT)) {
1325 			m_freem(m);
1326 			m = NULL;
1327 		}
1328 	} else
1329 		m = m_get(M_DONTWAIT, MT_DATA);
1330 
1331 	if (m == NULL)
1332 		return;
1333 	else
1334 		m->m_len = total;
1335 
1336 	pa = mtod(m, struct pgt_async_trap *);
1337 	p = mtod(m, char *) + sizeof(*pa);
1338 	*(uint32_t *)p = oid;
1339 	p += sizeof(uint32_t);
1340 	memcpy(p, trapdata, size);
1341 	pa->pa_mbuf = m;
1342 
1343 	TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
1344 	wakeup(&sc->sc_kthread);
1345 }
1346 
1347 /*
1348  * Process a completed management response (all requests should be
1349  * responded to, quickly) or an event (trap).
1350  */
1351 void
1352 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1353 {
1354 	struct pgt_desc *pd;
1355 	struct pgt_mgmt_frame *pmf;
1356 	uint32_t oid, size;
1357 
1358 	pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
1359 	TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
1360 	sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1361 	TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
1362 	    pd, pd_link);
1363 	sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1364 	if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
1365 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1366 			DPRINTF(("%s: mgmt desc too small: %u\n",
1367 			    sc->sc_dev.dv_xname,
1368 			    letoh16(pd->pd_fragp->pf_size)));
1369 		goto out_nopmd;
1370 	}
1371 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1372 	if (pmf->pmf_version != PMF_VER) {
1373 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1374 			DPRINTF(("%s: unknown mgmt version %u\n",
1375 			    sc->sc_dev.dv_xname, pmf->pmf_version));
1376 		goto out_nopmd;
1377 	}
1378 	if (pmf->pmf_device != PMF_DEV) {
1379 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1380 			DPRINTF(("%s: unknown mgmt dev %u\n",
1381 			    sc->sc_dev.dv_xname, pmf->pmf_device));
1382 		goto out;
1383 	}
1384 	if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
1385 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1386 			DPRINTF(("%s: unknown mgmt flags %x\n",
1387 			    sc->sc_dev.dv_xname,
1388 			    pmf->pmf_flags & ~PMF_FLAG_VALID));
1389 		goto out;
1390 	}
1391 	if (pmf->pmf_flags & PMF_FLAG_LE) {
1392 		oid = letoh32(pmf->pmf_oid);
1393 		size = letoh32(pmf->pmf_size);
1394 	} else {
1395 		oid = betoh32(pmf->pmf_oid);
1396 		size = betoh32(pmf->pmf_size);
1397 	}
1398 	if (pmf->pmf_operation == PMF_OP_TRAP) {
1399 		pmd = NULL; /* ignored */
1400 		DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1401 		    sc->sc_dev.dv_xname,
1402 		    pmf->pmf_operation, oid, size));
1403 		pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1404 		    min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
1405 		goto out_nopmd;
1406 	}
1407 	if (pmd == NULL) {
1408 		if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
1409 			DPRINTF(("%s: spurious mgmt received "
1410 			    "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1411 			    pmf->pmf_operation, oid, size));
1412 		goto out_nopmd;
1413 	}
1414 	switch (pmf->pmf_operation) {
1415 	case PMF_OP_RESPONSE:
1416 		pmd->pmd_error = 0;
1417 		break;
1418 	case PMF_OP_ERROR:
1419 		pmd->pmd_error = EPERM;
1420 		goto out;
1421 	default:
1422 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1423 			DPRINTF(("%s: unknown mgmt op %u\n",
1424 			    sc->sc_dev.dv_xname, pmf->pmf_operation));
1425 		pmd->pmd_error = EIO;
1426 		goto out;
1427 	}
1428 	if (oid != pmd->pmd_oid) {
1429 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1430 			DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1431 			    sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1432 		pmd->pmd_oid = oid;
1433 	}
1434 	if (pmd->pmd_recvbuf != NULL) {
1435 		if (size > PGT_FRAG_SIZE) {
1436 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1437 				DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1438 				    sc->sc_dev.dv_xname, oid, size));
1439 			pmd->pmd_error = EIO;
1440 			goto out;
1441 		}
1442 		if (size > pmd->pmd_len)
1443 			pmd->pmd_error = ENOMEM;
1444 		else
1445 			memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
1446 			    size);
1447 		pmd->pmd_len = size;
1448 	}
1449 
1450 out:
1451 	TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1452 	wakeup_one(pmd);
1453 	if (sc->sc_debug & SC_DEBUG_MGMT)
1454 		DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1455 		    sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1456 		    pmd->pmd_oid, pmd->pmd_len));
1457 out_nopmd:
1458 	pgt_reinit_rx_desc_frag(sc, pd);
1459 }
1460 
1461 /*
1462  * Queue packets for reception and defragmentation.  I don't know now
1463  * whether the rx queue being full enough to start, but not finish,
1464  * queueing a fragmented packet, can happen.
1465  */
1466 struct mbuf *
1467 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1468 {
1469 	struct ifnet *ifp;
1470 	struct pgt_desc *pd;
1471 	struct mbuf *top, **mp, *m;
1472 	size_t datalen;
1473 	uint16_t morefrags, dataoff;
1474 	int tlen = 0;
1475 
1476 	ifp = &sc->sc_ic.ic_if;
1477 	m = NULL;
1478 	top = NULL;
1479 	mp = &top;
1480 
1481 	while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
1482 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1483 		sc->sc_dirtyq_count[pq]--;
1484 		datalen = letoh16(pd->pd_fragp->pf_size);
1485 		dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
1486 		morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
1487 
1488 		if (sc->sc_debug & SC_DEBUG_RXFRAG)
1489 			DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1490 			    sc->sc_dev.dv_xname, datalen, dataoff,
1491 			    pd->pd_fragp->pf_flags));
1492 
1493 		/* Add the (two+?) bytes for the header. */
1494 		if (datalen + dataoff > PGT_FRAG_SIZE) {
1495 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1496 				DPRINTF(("%s data rx too big: %u\n",
1497 				    sc->sc_dev.dv_xname, datalen));
1498 			goto fail;
1499 		}
1500 
1501 		if (m == NULL)
1502 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1503 		else
1504 			m = m_get(M_DONTWAIT, MT_DATA);
1505 
1506 		if (m == NULL)
1507 			goto fail;
1508 		if (datalen > MHLEN) {
1509 			MCLGET(m, M_DONTWAIT);
1510 			if (!(m->m_flags & M_EXT)) {
1511 				m_free(m);
1512 				goto fail;
1513 			}
1514 		}
1515 		bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
1516 		m->m_len = datalen;
1517 		tlen += datalen;
1518 
1519 		*mp = m;
1520 		mp = &m->m_next;
1521 
1522 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1523 		sc->sc_freeq_count[pq]++;
1524 		pgt_reinit_rx_desc_frag(sc, pd);
1525 
1526 		if (!morefrags)
1527 			break;
1528 	}
1529 
1530 	if (top) {
1531 		top->m_pkthdr.len = tlen;
1532 	}
1533 	return (top);
1534 
1535 fail:
1536 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1537 	sc->sc_freeq_count[pq]++;
1538 	pgt_reinit_rx_desc_frag(sc, pd);
1539 
1540 	ifp->if_ierrors++;
1541 	if (top)
1542 		m_freem(top);
1543 	return (NULL);
1544 }
1545 
1546 int
1547 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1548     void *arg, size_t arglen)
1549 {
1550 	struct pgt_mgmt_desc pmd;
1551 	int error;
1552 
1553 	bzero(&pmd, sizeof(pmd));
1554 	pmd.pmd_recvbuf = arg;
1555 	pmd.pmd_len = arglen;
1556 	pmd.pmd_oid = oid;
1557 
1558 	error = pgt_mgmt_request(sc, &pmd);
1559 	if (error == 0)
1560 		error = pmd.pmd_error;
1561 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1562 		DPRINTF(("%s: failure getting oid %#x: %d\n",
1563 		    sc->sc_dev.dv_xname, oid, error));
1564 
1565 	return (error);
1566 }
1567 
1568 int
1569 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1570     void *arg, size_t arglen)
1571 {
1572 	struct pgt_mgmt_desc pmd;
1573 	int error;
1574 
1575 	bzero(&pmd, sizeof(pmd));
1576 	pmd.pmd_sendbuf = arg;
1577 	pmd.pmd_recvbuf = arg;
1578 	pmd.pmd_len = arglen;
1579 	pmd.pmd_oid = oid;
1580 
1581 	error = pgt_mgmt_request(sc, &pmd);
1582 	if (error == 0)
1583 		error = pmd.pmd_error;
1584 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1585 		DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1586 		    sc->sc_dev.dv_xname, oid, error));
1587 
1588 	return (error);
1589 }
1590 
1591 int
1592 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1593     const void *arg, size_t arglen)
1594 {
1595 	struct pgt_mgmt_desc pmd;
1596 	int error;
1597 
1598 	bzero(&pmd, sizeof(pmd));
1599 	pmd.pmd_sendbuf = arg;
1600 	pmd.pmd_len = arglen;
1601 	pmd.pmd_oid = oid;
1602 
1603 	error = pgt_mgmt_request(sc, &pmd);
1604 	if (error == 0)
1605 		error = pmd.pmd_error;
1606 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1607 		DPRINTF(("%s: failure setting oid %#x: %d\n",
1608 		    sc->sc_dev.dv_xname, oid, error));
1609 
1610 	return (error);
1611 }
1612 
1613 void
1614 pgt_state_dump(struct pgt_softc *sc)
1615 {
1616 	printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1617 	    sc->sc_dev.dv_xname,
1618 	    pgt_read_4(sc, PGT_REG_CTRL_STAT),
1619 	    pgt_read_4(sc, PGT_REG_INT_STAT));
1620 
1621 	printf("%s: state dump: driver curfrag[]\n",
1622 	    sc->sc_dev.dv_xname);
1623 
1624 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1625 	    sc->sc_dev.dv_xname,
1626 	    letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
1627 	    letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
1628 	    letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
1629 	    letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
1630 	    letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
1631 	    letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
1632 
1633 	printf("%s: state dump: device curfrag[]\n",
1634 	    sc->sc_dev.dv_xname);
1635 
1636 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1637 	    sc->sc_dev.dv_xname,
1638 	    letoh32(sc->sc_cb->pcb_device_curfrag[0]),
1639 	    letoh32(sc->sc_cb->pcb_device_curfrag[1]),
1640 	    letoh32(sc->sc_cb->pcb_device_curfrag[2]),
1641 	    letoh32(sc->sc_cb->pcb_device_curfrag[3]),
1642 	    letoh32(sc->sc_cb->pcb_device_curfrag[4]),
1643 	    letoh32(sc->sc_cb->pcb_device_curfrag[5]));
1644 }
1645 
1646 int
1647 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1648 {
1649 	struct pgt_desc *pd;
1650 	struct pgt_mgmt_frame *pmf;
1651 	int error, i;
1652 
1653 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1654 		return (EIO);
1655 	if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
1656 		return (ENOMEM);
1657 	pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
1658 	if (pd == NULL)
1659 		return (ENOMEM);
1660 	error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1661 	if (error)
1662 		return (error);
1663 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1664 	pmf->pmf_version = PMF_VER;
1665 	/* "get" and "retrieve" operations look the same */
1666 	if (pmd->pmd_recvbuf != NULL)
1667 		pmf->pmf_operation = PMF_OP_GET;
1668 	else
1669 		pmf->pmf_operation = PMF_OP_SET;
1670 	pmf->pmf_oid = htobe32(pmd->pmd_oid);
1671 	pmf->pmf_device = PMF_DEV;
1672 	pmf->pmf_flags = 0;
1673 	pmf->pmf_size = htobe32(pmd->pmd_len);
1674 	/* "set" and "retrieve" operations both send data */
1675 	if (pmd->pmd_sendbuf != NULL)
1676 		memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len);
1677 	else
1678 		bzero(pmf + 1, pmd->pmd_len);
1679 	pmd->pmd_error = EINPROGRESS;
1680 	TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
1681 	if (sc->sc_debug & SC_DEBUG_MGMT)
1682 		DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1683 		    sc->sc_dev.dv_xname,
1684 		    pmd, pmf->pmf_operation,
1685 		    pmd->pmd_oid, pmd->pmd_len));
1686 	pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1687 	    sizeof(*pmf) + pmd->pmd_len, 0);
1688 	/*
1689 	 * Try for one second, triggering 10 times.
1690 	 *
1691 	 * Do our best to work around seemingly buggy CardBus controllers
1692 	 * on Soekris 4521 that fail to get interrupts with alarming
1693 	 * regularity: run as if an interrupt occurred and service every
1694 	 * queue except for mbuf reception.
1695 	 */
1696 	i = 0;
1697 	do {
1698 		if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK)
1699 			break;
1700 		if (pmd->pmd_error != EINPROGRESS)
1701 			break;
1702 		if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
1703 			pmd->pmd_error = EIO;
1704 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1705 			break;
1706 		}
1707 		if (i != 9)
1708 			pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1709 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1710 		pgt_update_intr(sc, 0);
1711 #endif
1712 	} while (i++ < 10);
1713 
1714 	if (pmd->pmd_error == EINPROGRESS) {
1715 		printf("%s: timeout waiting for management "
1716 		    "packet response to %#x\n",
1717 		    sc->sc_dev.dv_xname, pmd->pmd_oid);
1718 		TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1719 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1720 			pgt_state_dump(sc);
1721 		pgt_async_reset(sc);
1722 		error = ETIMEDOUT;
1723 	} else
1724 		error = 0;
1725 
1726 	return (error);
1727 }
1728 
1729 void
1730 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1731     uint16_t len, int morecoming)
1732 {
1733 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1734 	sc->sc_freeq_count[pq]--;
1735 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1736 	sc->sc_dirtyq_count[pq]++;
1737 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1738 		DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1739 		    pd->pd_fragnum, pq));
1740 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1741 	    sc->sc_cbdmam->dm_mapsize,
1742 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
1743 	if (morecoming)
1744 		pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
1745 	pd->pd_fragp->pf_size = htole16(len);
1746 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1747 	    pd->pd_dmam->dm_mapsize,
1748 	    BUS_DMASYNC_POSTWRITE);
1749 	sc->sc_cb->pcb_driver_curfrag[pq] =
1750 	    htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
1751 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1752 	    sc->sc_cbdmam->dm_mapsize,
1753 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
1754 	if (!morecoming)
1755 		pgt_maybe_trigger(sc, pq);
1756 }
1757 
1758 void
1759 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1760 {
1761 	unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
1762 	uint32_t reg;
1763 
1764 	if (sc->sc_debug & SC_DEBUG_TRIGGER)
1765 		DPRINTF(("%s: triggered by queue [%u]\n",
1766 		    sc->sc_dev.dv_xname, pq));
1767 	pgt_debug_events(sc, "trig");
1768 	if (sc->sc_flags & SC_POWERSAVE) {
1769 		/* Magic values ahoy? */
1770 		if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
1771 			do {
1772 				reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1773 				if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
1774 					DELAY(PGT_WRITEIO_DELAY);
1775 			} while (tries-- != 0);
1776 			if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
1777 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1778 					DPRINTF(("%s: timeout triggering from "
1779 					    "sleep mode\n",
1780 					    sc->sc_dev.dv_xname));
1781 				pgt_async_reset(sc);
1782 				return;
1783 			}
1784 		}
1785 		pgt_write_4_flush(sc, PGT_REG_DEV_INT,
1786 		    PGT_DEV_INT_WAKEUP);
1787 		DELAY(PGT_WRITEIO_DELAY);
1788 		/* read the status back in */
1789 		(void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
1790 		DELAY(PGT_WRITEIO_DELAY);
1791 	} else {
1792 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1793 		DELAY(PGT_WRITEIO_DELAY);
1794 	}
1795 }
1796 
1797 struct ieee80211_node *
1798 pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1799 {
1800 	struct pgt_ieee80211_node *pin;
1801 
1802 	pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO);
1803 	if (pin != NULL) {
1804 		pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1805 	}
1806 	return (struct ieee80211_node *)pin;
1807 }
1808 
1809 void
1810 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1811     int reallynew)
1812 {
1813 	ieee80211_ref_node(ni);
1814 }
1815 
1816 void
1817 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1818 {
1819 	struct pgt_ieee80211_node *pin;
1820 
1821 	pin = (struct pgt_ieee80211_node *)ni;
1822 	free(pin, M_DEVBUF, 0);
1823 }
1824 
1825 void
1826 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1827     const struct ieee80211_node *src)
1828 {
1829 	const struct pgt_ieee80211_node *psrc;
1830 	struct pgt_ieee80211_node *pdst;
1831 
1832 	psrc = (const struct pgt_ieee80211_node *)src;
1833 	pdst = (struct pgt_ieee80211_node *)dst;
1834 	bcopy(psrc, pdst, sizeof(*psrc));
1835 }
1836 
1837 int
1838 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1839     int type, int arg1, int arg2)
1840 {
1841 	return (EOPNOTSUPP);
1842 }
1843 
1844 int
1845 pgt_net_attach(struct pgt_softc *sc)
1846 {
1847 	struct ieee80211com *ic = &sc->sc_ic;
1848 	struct ifnet *ifp = &ic->ic_if;
1849 	struct ieee80211_rateset *rs;
1850 	uint8_t rates[IEEE80211_RATE_MAXSIZE];
1851 	struct pgt_obj_buffer psbuffer;
1852 	struct pgt_obj_frequencies *freqs;
1853 	uint32_t phymode, country;
1854 	unsigned int chan, i, j, firstchan = -1;
1855 	int error;
1856 
1857 	psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
1858 	psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
1859 	error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1860 	if (error)
1861 		return (error);
1862 	error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1863 	if (error)
1864 		return (error);
1865 	error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1866 	    sizeof(ic->ic_myaddr));
1867 	if (error)
1868 		return (error);
1869 	error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1870 	if (error)
1871 		return (error);
1872 
1873 	ifp->if_softc = sc;
1874 	ifp->if_ioctl = pgt_ioctl;
1875 	ifp->if_start = pgt_start;
1876 	ifp->if_watchdog = pgt_watchdog;
1877 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
1878 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1879 
1880 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1881 
1882 	/*
1883 	 * Set channels
1884 	 *
1885 	 * Prism hardware likes to report supported frequencies that are
1886 	 * not actually available for the country of origin.
1887 	 */
1888 	j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
1889 	freqs = malloc(j, M_DEVBUF, M_WAITOK);
1890 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1891 	if (error) {
1892 		free(freqs, M_DEVBUF, 0);
1893 		return (error);
1894 	}
1895 
1896 	for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
1897 		chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
1898 		    0);
1899 
1900 		if (chan > IEEE80211_CHAN_MAX) {
1901 			printf("%s: reported bogus channel (%uMHz)\n",
1902 			    sc->sc_dev.dv_xname, chan);
1903 			free(freqs, M_DEVBUF, 0);
1904 			return (EIO);
1905 		}
1906 
1907 		if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
1908 			if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
1909 				continue;
1910 			if (country == letoh32(PGT_COUNTRY_USA)) {
1911 				if (chan >= 12 && chan <= 14)
1912 					continue;
1913 			}
1914 			if (chan <= 14)
1915 				ic->ic_channels[chan].ic_flags |=
1916 				    IEEE80211_CHAN_B;
1917 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
1918 		} else {
1919 			if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
1920 				continue;
1921 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
1922 		}
1923 
1924 		ic->ic_channels[chan].ic_freq =
1925 		    letoh16(freqs->pof_freqlist_mhz[i]);
1926 
1927 		if (firstchan == -1)
1928 			firstchan = chan;
1929 
1930 		DPRINTF(("%s: set channel %d to freq %uMHz\n",
1931 		    sc->sc_dev.dv_xname, chan,
1932 		    letoh16(freqs->pof_freqlist_mhz[i])));
1933 	}
1934 	free(freqs, M_DEVBUF, 0);
1935 	if (firstchan == -1) {
1936 		printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1937 		return (EIO);
1938 	}
1939 
1940 	/*
1941 	 * Set rates
1942 	 */
1943 	bzero(rates, sizeof(rates));
1944 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1945 	if (error)
1946 		return (error);
1947 	for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1948 		switch (rates[i]) {
1949 		case 2:
1950 		case 4:
1951 		case 11:
1952 		case 22:
1953 		case 44: /* maybe */
1954 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1955 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1956 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1957 			}
1958 		default:
1959 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1960 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1961 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1962 			}
1963 			if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
1964 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1965 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1966 			}
1967 			rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1968 			rs->rs_rates[rs->rs_nrates++] = rates[i];
1969 		}
1970 	}
1971 
1972 	ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT |
1973 	    IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
1974 #ifndef IEEE80211_STA_ONLY
1975 	ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP;
1976 #endif
1977 	ic->ic_opmode = IEEE80211_M_STA;
1978 	ic->ic_state = IEEE80211_S_INIT;
1979 
1980 	if_attach(ifp);
1981 	ieee80211_ifattach(ifp);
1982 
1983 	/* setup post-attach/pre-lateattach vector functions */
1984 	sc->sc_newstate = ic->ic_newstate;
1985 	ic->ic_newstate = pgt_newstate;
1986 	ic->ic_node_alloc = pgt_ieee80211_node_alloc;
1987 	ic->ic_newassoc = pgt_ieee80211_newassoc;
1988 	ic->ic_node_free = pgt_ieee80211_node_free;
1989 	ic->ic_node_copy = pgt_ieee80211_node_copy;
1990 	ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
1991 	ic->ic_max_rssi = 255;	/* rssi is a u_int8_t */
1992 
1993 	/* let net80211 handle switching around the media + resetting */
1994 	ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
1995 
1996 #if NBPFILTER > 0
1997 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
1998 	    sizeof(struct ieee80211_frame) + 64);
1999 
2000 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2001 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
2002 	sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
2003 
2004 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2005 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
2006 	sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
2007 #endif
2008 	return (0);
2009 }
2010 
2011 int
2012 pgt_media_change(struct ifnet *ifp)
2013 {
2014 	struct pgt_softc *sc = ifp->if_softc;
2015 	int error;
2016 
2017         error = ieee80211_media_change(ifp);
2018         if (error == ENETRESET) {
2019                 pgt_update_hw_from_sw(sc, 0, 0);
2020                 error = 0;
2021         }
2022 
2023         return (error);
2024 }
2025 
2026 void
2027 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2028 {
2029 	struct pgt_softc *sc = ifp->if_softc;
2030 	struct ieee80211com *ic = &sc->sc_ic;
2031 	uint32_t rate;
2032 	int s;
2033 
2034 	imr->ifm_status = 0;
2035 	imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
2036 
2037 	if (!(ifp->if_flags & IFF_UP))
2038 		return;
2039 
2040 	s = splnet();
2041 
2042 	if (ic->ic_fixed_rate != -1) {
2043 		rate = ic->ic_sup_rates[ic->ic_curmode].
2044 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
2045 	} else {
2046 		if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2047 			goto out;
2048 		rate = letoh32(rate);
2049 		if (sc->sc_debug & SC_DEBUG_LINK) {
2050 			DPRINTF(("%s: %s: link rate %u\n",
2051 			    sc->sc_dev.dv_xname, __func__, rate));
2052 		}
2053 		if (rate == 0)
2054 			goto out;
2055 	}
2056 
2057 	imr->ifm_status = IFM_AVALID;
2058 	imr->ifm_active = IFM_IEEE80211;
2059 	if (ic->ic_state == IEEE80211_S_RUN)
2060 		imr->ifm_status |= IFM_ACTIVE;
2061 
2062 	imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2063 
2064 	switch (ic->ic_opmode) {
2065 	case IEEE80211_M_STA:
2066 		break;
2067 #ifndef IEEE80211_STA_ONLY
2068 	case IEEE80211_M_IBSS:
2069 		imr->ifm_active |= IFM_IEEE80211_ADHOC;
2070 		break;
2071 	case IEEE80211_M_AHDEMO:
2072 		imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
2073 		break;
2074 	case IEEE80211_M_HOSTAP:
2075 		imr->ifm_active |= IFM_IEEE80211_HOSTAP;
2076 		break;
2077 #endif
2078 	case IEEE80211_M_MONITOR:
2079 		imr->ifm_active |= IFM_IEEE80211_MONITOR;
2080 		break;
2081 	default:
2082 		break;
2083 	}
2084 
2085 out:
2086 	splx(s);
2087 }
2088 
2089 /*
2090  * Start data frames.  Critical sections surround the boundary of
2091  * management frame transmission / transmission acknowledgement / response
2092  * and data frame transmission / transmission acknowledgement.
2093  */
2094 void
2095 pgt_start(struct ifnet *ifp)
2096 {
2097 	struct pgt_softc *sc;
2098 	struct ieee80211com *ic;
2099 	struct pgt_desc *pd;
2100 	struct mbuf *m;
2101 	int error;
2102 
2103 	sc = ifp->if_softc;
2104 	ic = &sc->sc_ic;
2105 
2106 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
2107 	    !(ifp->if_flags & IFF_RUNNING) ||
2108 	    ic->ic_state != IEEE80211_S_RUN) {
2109 		return;
2110 	}
2111 
2112 	/*
2113 	 * Management packets should probably be MLME frames
2114 	 * (i.e. hostap "managed" mode); we don't touch the
2115 	 * net80211 management queue.
2116 	 */
2117 	for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2118 	    PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) {
2119 		pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
2120 		m = ifq_deq_begin(&ifp->if_snd);
2121 		if (m == NULL)
2122 			break;
2123 		if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
2124 			error = pgt_load_tx_desc_frag(sc,
2125 			    PGT_QUEUE_DATA_LOW_TX, pd);
2126 			if (error) {
2127 				ifq_deq_rollback(&ifp->if_snd, m);
2128 				break;
2129 			}
2130 			ifq_deq_commit(&ifp->if_snd, m);
2131 			m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
2132 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2133 			    pd, m->m_pkthdr.len, 0);
2134 		} else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
2135 			struct pgt_desc *pd2;
2136 
2137 			/*
2138 			 * Transmit a fragmented frame if there is
2139 			 * not enough room in one fragment; limit
2140 			 * to two fragments (802.11 itself couldn't
2141 			 * even support a full two.)
2142 			 */
2143 			if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2144 			    PGT_QUEUE_FULL_THRESHOLD) {
2145 				ifq_deq_rollback(&ifp->if_snd, m);
2146 				break;
2147 			}
2148 			pd2 = TAILQ_NEXT(pd, pd_link);
2149 			error = pgt_load_tx_desc_frag(sc,
2150 			    PGT_QUEUE_DATA_LOW_TX, pd);
2151 			if (error == 0) {
2152 				error = pgt_load_tx_desc_frag(sc,
2153 				    PGT_QUEUE_DATA_LOW_TX, pd2);
2154 				if (error) {
2155 					pgt_unload_tx_desc_frag(sc, pd);
2156 					TAILQ_INSERT_HEAD(&sc->sc_freeq[
2157 					    PGT_QUEUE_DATA_LOW_TX], pd,
2158 					    pd_link);
2159 				}
2160 			}
2161 			if (error) {
2162 				ifq_deq_rollback(&ifp->if_snd, m);
2163 				break;
2164 			}
2165 			ifq_deq_commit(&ifp->if_snd, m);
2166 			m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
2167 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2168 			    pd, PGT_FRAG_SIZE, 1);
2169 			m_copydata(m, PGT_FRAG_SIZE,
2170 			    m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
2171 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2172 			    pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
2173 		} else {
2174 			ifq_deq_commit(&ifp->if_snd, m);
2175 			ifp->if_oerrors++;
2176 			m_freem(m);
2177 			m = NULL;
2178 		}
2179 		if (m != NULL) {
2180 			struct ieee80211_node *ni;
2181 #if NBPFILTER > 0
2182 			if (ifp->if_bpf != NULL)
2183 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2184 #endif
2185 			ifp->if_opackets++;
2186 			ifp->if_timer = 1;
2187 			sc->sc_txtimer = 5;
2188 			ni = ieee80211_find_txnode(&sc->sc_ic,
2189 			    mtod(m, struct ether_header *)->ether_dhost);
2190 			if (ni != NULL) {
2191 				ni->ni_inact = 0;
2192 				if (ni != ic->ic_bss)
2193 					ieee80211_release_node(&sc->sc_ic, ni);
2194 			}
2195 #if NBPFILTER > 0
2196 			if (sc->sc_drvbpf != NULL) {
2197 				struct mbuf mb;
2198 				struct ether_header eh;
2199 				struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
2200 
2201 				bcopy(mtod(m, struct ether_header *), &eh,
2202 				    sizeof(eh));
2203 				m_adj(m, sizeof(eh));
2204 				m = pgt_ieee80211_encap(sc, &eh, m, NULL);
2205 
2206 				tap->wt_flags = 0;
2207 				//tap->wt_rate = rate;
2208 				tap->wt_rate = 0;
2209 				tap->wt_chan_freq =
2210 				    htole16(ic->ic_bss->ni_chan->ic_freq);
2211 				tap->wt_chan_flags =
2212 				    htole16(ic->ic_bss->ni_chan->ic_flags);
2213 
2214 				if (m != NULL) {
2215 					mb.m_data = (caddr_t)tap;
2216 					mb.m_len = sc->sc_txtap_len;
2217 					mb.m_next = m;
2218 					mb.m_nextpkt = NULL;
2219 					mb.m_type = 0;
2220 					mb.m_flags = 0;
2221 
2222 					bpf_mtap(sc->sc_drvbpf, &mb,
2223 					    BPF_DIRECTION_OUT);
2224 				}
2225 			}
2226 #endif
2227 			if (m != NULL)
2228 				m_freem(m);
2229 		}
2230 	}
2231 }
2232 
2233 int
2234 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2235 {
2236 	struct pgt_softc *sc = ifp->if_softc;
2237 	struct ifreq *ifr;
2238 	struct wi_req *wreq;
2239 	struct ieee80211_nodereq_all *na;
2240 	struct ieee80211com *ic;
2241         struct pgt_obj_bsslist *pob;
2242         struct wi_scan_p2_hdr *p2hdr;
2243         struct wi_scan_res *res;
2244         uint32_t noise;
2245 	int maxscan, i, j, s, error = 0;
2246 
2247 	ic = &sc->sc_ic;
2248 	ifr = (struct ifreq *)req;
2249 
2250 	s = splnet();
2251 	switch (cmd) {
2252 	case SIOCS80211SCAN:
2253 		/*
2254 		 * This chip scans always as soon as it gets initialized.
2255 		 */
2256 
2257 		/*
2258 		 * Give us a bit time to scan in case we were not
2259 		 * initialized before and let the userland process wait.
2260 		 */
2261 		tsleep(&sc->sc_flags, 0, "pgtsca", hz * SCAN_TIMEOUT);
2262 
2263 		break;
2264 	case SIOCG80211ALLNODES: {
2265 		struct ieee80211_nodereq *nr = NULL;
2266 		na = (struct ieee80211_nodereq_all *)req;
2267 		wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO);
2268 
2269 		maxscan = PGT_OBJ_BSSLIST_NBSS;
2270 		pob = malloc(sizeof(*pob) +
2271 		    sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
2272 		error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2273 		    sizeof(noise));
2274 
2275 		if (error == 0) {
2276 			noise = letoh32(noise);
2277 			error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2278 			    sizeof(*pob) +
2279 			    sizeof(struct pgt_obj_bss) * maxscan);
2280 		}
2281 
2282 		if (error == 0) {
2283 			maxscan = min(PGT_OBJ_BSSLIST_NBSS,
2284 			    letoh32(pob->pob_count));
2285 			maxscan = min(maxscan,
2286 			    (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2287 			    WI_PRISM2_RES_SIZE);
2288 			p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2289 			p2hdr->wi_rsvd = 0;
2290 			p2hdr->wi_reason = 1;
2291 			wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
2292 			    sizeof(*p2hdr) / 2;
2293 			wreq->wi_type = WI_RID_SCAN_RES;
2294 		}
2295 
2296 		for (na->na_nodes = j = i = 0; i < maxscan &&
2297 		    (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2298 		    i++) {
2299 			/* allocate node space */
2300 			if (nr == NULL)
2301 				nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
2302 
2303 			/* get next BSS scan result */
2304 			res = (struct wi_scan_res *)
2305 			    ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2306 			    i * WI_PRISM2_RES_SIZE);
2307 			pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2308 			    res, noise);
2309 
2310 			/* copy it to node structure for ifconfig to read */
2311 			bzero(nr, sizeof(*nr));
2312 			IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
2313 			IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
2314 			nr->nr_channel = letoh16(res->wi_chan);
2315 			nr->nr_chan_flags = IEEE80211_CHAN_B;
2316 			nr->nr_rssi = letoh16(res->wi_signal);
2317 			nr->nr_max_rssi = 0; /* XXX */
2318 			nr->nr_nwid_len = letoh16(res->wi_ssid_len);
2319 			bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2320 			nr->nr_intval = letoh16(res->wi_interval);
2321 			nr->nr_capinfo = letoh16(res->wi_capinfo);
2322 			nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
2323 			    (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
2324 			    (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
2325 			    (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
2326 			nr->nr_nrates = 0;
2327 			while (res->wi_srates[nr->nr_nrates] != 0) {
2328 				nr->nr_rates[nr->nr_nrates] =
2329 				    res->wi_srates[nr->nr_nrates] &
2330 				    WI_VAR_SRATES_MASK;
2331 				nr->nr_nrates++;
2332 			}
2333 			nr->nr_flags = 0;
2334 			if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2335 			    IEEE80211_ADDR_LEN) == 0)
2336 				nr->nr_flags |= IEEE80211_NODEREQ_AP;
2337 			error = copyout(nr, (caddr_t)na->na_node + j,
2338 			    sizeof(struct ieee80211_nodereq));
2339 			if (error)
2340 				break;
2341 
2342 			/* point to next node entry */
2343 			j += sizeof(struct ieee80211_nodereq);
2344 			na->na_nodes++;
2345 		}
2346 		if (nr)
2347 			free(nr, M_DEVBUF, 0);
2348 		free(pob, M_DEVBUF, 0);
2349 		free(wreq, M_DEVBUF, 0);
2350 		break;
2351 	}
2352 	case SIOCSIFADDR:
2353 		ifp->if_flags |= IFF_UP;
2354 		/* FALLTHROUGH */
2355 	case SIOCSIFFLAGS:
2356 		if (ifp->if_flags & IFF_UP) {
2357 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
2358 				pgt_init(ifp);
2359 				error = ENETRESET;
2360 			}
2361 		} else {
2362 			if (ifp->if_flags & IFF_RUNNING) {
2363 				pgt_stop(sc, SC_NEEDS_RESET);
2364 				error = ENETRESET;
2365 			}
2366 		}
2367 		break;
2368 	case SIOCADDMULTI:
2369 	case SIOCDELMULTI:
2370 		error = (cmd == SIOCADDMULTI) ?
2371 		    ether_addmulti(ifr, &ic->ic_ac) :
2372 		    ether_delmulti(ifr, &ic->ic_ac);
2373 
2374 		if (error == ENETRESET)
2375 			error = 0;
2376 		break;
2377 	case SIOCSIFMTU:
2378 		if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
2379 			error = EINVAL;
2380 			break;
2381 		}
2382 		/* FALLTHROUGH */
2383 	default:
2384 		error = ieee80211_ioctl(ifp, cmd, req);
2385 		break;
2386 	}
2387 
2388 	if (error == ENETRESET) {
2389 		pgt_update_hw_from_sw(sc, 0, 0);
2390 		error = 0;
2391 	}
2392 	splx(s);
2393 
2394 	return (error);
2395 }
2396 
2397 void
2398 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2399     struct wi_scan_res *scanres, uint32_t noise)
2400 {
2401 	struct ieee80211_rateset *rs;
2402 	struct wi_scan_res ap;
2403 	unsigned int i, n;
2404 
2405 	rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2406 	bzero(&ap, sizeof(ap));
2407 	ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
2408 	ap.wi_noise = noise;
2409 	ap.wi_signal = letoh16(pob->pob_rssi);
2410 	IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
2411 	ap.wi_interval = letoh16(pob->pob_beacon_period);
2412 	ap.wi_capinfo = letoh16(pob->pob_capinfo);
2413 	ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2414 	memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
2415 	n = 0;
2416 	for (i = 0; i < 16; i++) {
2417 		if (letoh16(pob->pob_rates) & (1 << i)) {
2418 			if (i > rs->rs_nrates)
2419 				break;
2420 			ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2421 			if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2422 				break;
2423 		}
2424 	}
2425 	memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
2426 }
2427 
2428 void
2429 node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2430 {
2431 	/*
2432 	 * HostAP mode lets all nodes stick around unless
2433 	 * the firmware AP kicks them off.
2434 	 */
2435 	ni->ni_inact = 0;
2436 }
2437 
2438 void
2439 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2440 {
2441 	struct pgt_ieee80211_node *pin;
2442 
2443 	/*
2444 	 * As there is no association in ad-hoc, we let links just
2445 	 * time out naturally as long they are not holding any private
2446 	 * configuration, such as 802.1x authorization.
2447 	 */
2448 	pin = (struct pgt_ieee80211_node *)ni;
2449 	if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2450 		pin->pin_node.ni_inact = 0;
2451 }
2452 
2453 void
2454 pgt_watchdog(struct ifnet *ifp)
2455 {
2456 	struct pgt_softc *sc;
2457 
2458 	sc = ifp->if_softc;
2459 	/*
2460 	 * Check for timed out transmissions (and make sure to set
2461 	 * this watchdog to fire again if there is still data in the
2462 	 * output device queue).
2463 	 */
2464 	if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2465 		int count;
2466 
2467 		ifp->if_timer = 1;
2468 		if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2469 			count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2470 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
2471 				DPRINTF(("%s: timeout %d data transmissions\n",
2472 				    sc->sc_dev.dv_xname, count));
2473 		}
2474 	}
2475 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
2476 		return;
2477 	/*
2478 	 * If we're goign to kick the device out of power-save mode
2479 	 * just to update the BSSID and such, we should not do it
2480 	 * very often; need to determine in what way to do that.
2481 	 */
2482 	if (ifp->if_flags & IFF_RUNNING &&
2483 	    sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2484 	    sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2485 		pgt_async_update(sc);
2486 
2487 #ifndef IEEE80211_STA_ONLY
2488 	/*
2489 	 * As a firmware-based HostAP, we should not time out
2490 	 * nodes inside the driver additionally to the timeout
2491 	 * that exists in the firmware.  The only things we
2492 	 * should have to deal with timing out when doing HostAP
2493 	 * are the privacy-related.
2494 	 */
2495 	switch (sc->sc_ic.ic_opmode) {
2496 	case IEEE80211_M_HOSTAP:
2497 		ieee80211_iterate_nodes(&sc->sc_ic,
2498 		    node_mark_active_ap, NULL);
2499 		break;
2500 	case IEEE80211_M_IBSS:
2501 		ieee80211_iterate_nodes(&sc->sc_ic,
2502 		    node_mark_active_adhoc, NULL);
2503 		break;
2504 	default:
2505 		break;
2506 	}
2507 #endif
2508 	ieee80211_watchdog(ifp);
2509 	ifp->if_timer = 1;
2510 }
2511 
2512 int
2513 pgt_init(struct ifnet *ifp)
2514 {
2515 	struct pgt_softc *sc = ifp->if_softc;
2516 	struct ieee80211com *ic = &sc->sc_ic;
2517 
2518 	/* set default channel */
2519 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2520 
2521 	if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
2522 		pgt_update_hw_from_sw(sc,
2523 		    ic->ic_state != IEEE80211_S_INIT,
2524 		    ic->ic_opmode != IEEE80211_M_MONITOR);
2525 
2526 	ifp->if_flags |= IFF_RUNNING;
2527 	ifq_clr_oactive(&ifp->if_snd);
2528 
2529 	/* Begin background scanning */
2530 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
2531 
2532 	return (0);
2533 }
2534 
2535 /*
2536  * After most every configuration change, everything needs to be fully
2537  * reinitialized.  For some operations (currently, WEP settings
2538  * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2539  * "associations," and allows EAP authorization to occur again.
2540  * If keepassoc is specified, the reset operation should try to go
2541  * back to the BSS had before.
2542  */
2543 void
2544 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes)
2545 {
2546 	struct ieee80211com *ic = &sc->sc_ic;
2547 	struct arpcom *ac = &ic->ic_ac;
2548 	struct ifnet *ifp = &ac->ac_if;
2549 	struct pgt_obj_key keyobj;
2550 	struct pgt_obj_ssid essid;
2551 	uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
2552 	uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2553 	uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2554 	unsigned int i;
2555 	int success, shouldbeup, s;
2556 
2557 	config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
2558 
2559 	/*
2560 	 * Promiscuous mode is currently a no-op since packets transmitted,
2561 	 * while in promiscuous mode, don't ever seem to go anywhere.
2562 	 */
2563 	shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
2564 
2565 	if (shouldbeup) {
2566 		switch (ic->ic_opmode) {
2567 		case IEEE80211_M_STA:
2568 			if (ifp->if_flags & IFF_PROMISC)
2569 				mode = PGT_MODE_CLIENT;	/* what to do? */
2570 			else
2571 				mode = PGT_MODE_CLIENT;
2572 			bsstype = PGT_BSS_TYPE_STA;
2573 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2574 			break;
2575 #ifndef IEEE80211_STA_ONLY
2576 		case IEEE80211_M_IBSS:
2577 			if (ifp->if_flags & IFF_PROMISC)
2578 				mode = PGT_MODE_CLIENT;	/* what to do? */
2579 			else
2580 				mode = PGT_MODE_CLIENT;
2581 			bsstype = PGT_BSS_TYPE_IBSS;
2582 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2583 			break;
2584 		case IEEE80211_M_HOSTAP:
2585 			mode = PGT_MODE_AP;
2586 			bsstype = PGT_BSS_TYPE_STA;
2587 			/*
2588 			 * For IEEE 802.1x, we need to authenticate and
2589 			 * authorize hosts from here on or they remain
2590 			 * associated but without the ability to send or
2591 			 * receive normal traffic to us (courtesy the
2592 			 * firmware AP implementation).
2593 			 */
2594 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2595 			/*
2596 			 * WDS mode needs several things to work:
2597 			 * discovery of exactly how creating the WDS
2598 			 * links is meant to function, an interface
2599 			 * for this, and ability to encode or decode
2600 			 * the WDS frames.
2601 			 */
2602 			if (sc->sc_wds)
2603 				config |= PGT_CONFIG_WDS;
2604 			break;
2605 #endif
2606 		case IEEE80211_M_MONITOR:
2607 			mode = PGT_MODE_PROMISCUOUS;
2608 			bsstype = PGT_BSS_TYPE_ANY;
2609 			dot1x = PGT_DOT1X_AUTH_NONE;
2610 			break;
2611 		default:
2612 			goto badopmode;
2613 		}
2614 	} else {
2615 badopmode:
2616 		mode = PGT_MODE_CLIENT;
2617 		bsstype = PGT_BSS_TYPE_NONE;
2618 	}
2619 
2620 	DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2621 	switch (ic->ic_curmode) {
2622 	case IEEE80211_MODE_11A:
2623 		profile = PGT_PROFILE_A_ONLY;
2624 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2625 		DPRINTF(("IEEE80211_MODE_11A\n"));
2626 		break;
2627 	case IEEE80211_MODE_11B:
2628 		profile = PGT_PROFILE_B_ONLY;
2629 		preamble = PGT_OID_PREAMBLE_MODE_LONG;
2630 		DPRINTF(("IEEE80211_MODE_11B\n"));
2631 		break;
2632 	case IEEE80211_MODE_11G:
2633 		profile = PGT_PROFILE_G_ONLY;
2634 		preamble = PGT_OID_PREAMBLE_MODE_SHORT;
2635 		DPRINTF(("IEEE80211_MODE_11G\n"));
2636 		break;
2637 	case IEEE80211_MODE_AUTO:
2638 		profile = PGT_PROFILE_MIXED_G_WIFI;
2639 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2640 		DPRINTF(("IEEE80211_MODE_AUTO\n"));
2641 		break;
2642 	default:
2643 		panic("unknown mode %d", ic->ic_curmode);
2644 	}
2645 
2646 	switch (sc->sc_80211_ioc_auth) {
2647 	case IEEE80211_AUTH_NONE:
2648 		auth = PGT_AUTH_MODE_NONE;
2649 		break;
2650 	case IEEE80211_AUTH_OPEN:
2651 		auth = PGT_AUTH_MODE_OPEN;
2652 		break;
2653 	default:
2654 		auth = PGT_AUTH_MODE_SHARED;
2655 		break;
2656 	}
2657 
2658 	if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
2659 		wep = 1;
2660 		exunencrypted = 1;
2661 	} else {
2662 		wep = 0;
2663 		exunencrypted = 0;
2664 	}
2665 
2666 	mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
2667 	wep = htole32(wep);
2668 	exunencrypted = htole32(exunencrypted);
2669 	profile = htole32(profile);
2670 	preamble = htole32(preamble);
2671 	bsstype = htole32(bsstype);
2672 	config = htole32(config);
2673 	mode = htole32(mode);
2674 
2675 	if (!wep || !sc->sc_dot1x)
2676 		dot1x = PGT_DOT1X_AUTH_NONE;
2677 	dot1x = htole32(dot1x);
2678 	auth = htole32(auth);
2679 
2680 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2681 		slot = htole32(PGT_OID_SLOT_MODE_SHORT);
2682 	else
2683 		slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
2684 
2685 	if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
2686 		if (keepassoc)
2687 			channel = 0;
2688 		else
2689 			channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2690 	} else
2691 		channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2692 
2693 	DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2694 	for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2695 		availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2696 		DPRINTF((" %d", availrates[i]));
2697 	}
2698 	DPRINTF(("\n"));
2699 	availrates[i++] = 0;
2700 
2701 	essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2702 	memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
2703 
2704 	s = splnet();
2705 	for (success = 0; success == 0; success = 1) {
2706 		SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
2707 		SETOID(PGT_OID_CONFIG, &config, sizeof(config));
2708 		SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
2709 
2710 		if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
2711 			SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
2712 			    sizeof(ac->ac_enaddr));
2713 			IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
2714 		}
2715 
2716 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2717 		SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
2718 
2719 		if (channel != 0 && channel != IEEE80211_CHAN_ANY)
2720 			SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
2721 
2722 		if (ic->ic_flags & IEEE80211_F_DESBSSID) {
2723 			SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
2724 			    sizeof(ic->ic_des_bssid));
2725 		} else if (keepassoc) {
2726 			SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2727 			    sizeof(ic->ic_bss->ni_bssid));
2728 		}
2729 
2730 		SETOID(PGT_OID_SSID, &essid, sizeof(essid));
2731 
2732 		if (ic->ic_des_esslen > 0)
2733 			SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
2734 
2735 		SETOID(PGT_OID_RATES, &availrates, i);
2736 		SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
2737 		SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
2738 		SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
2739 		SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
2740 		SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
2741 		    sizeof(exunencrypted));
2742 		SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
2743 		SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
2744 		/*
2745 		 * Setting WEP key(s)
2746 		 */
2747 		if (letoh32(wep) != 0) {
2748 			keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
2749 			/* key 1 */
2750 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2751 			    IEEE80211_KEYBUF_SIZE);
2752 			keyobj.pok_length = min(keyobj.pok_length,
2753 			    ic->ic_nw_keys[0].k_len);
2754 			bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2755 			    keyobj.pok_length);
2756 			SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
2757 			/* key 2 */
2758 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2759 			    IEEE80211_KEYBUF_SIZE);
2760 			keyobj.pok_length = min(keyobj.pok_length,
2761 			    ic->ic_nw_keys[1].k_len);
2762 			bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2763 			    keyobj.pok_length);
2764 			SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
2765 			/* key 3 */
2766 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2767 			    IEEE80211_KEYBUF_SIZE);
2768 			keyobj.pok_length = min(keyobj.pok_length,
2769 			    ic->ic_nw_keys[2].k_len);
2770 			bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2771 			    keyobj.pok_length);
2772 			SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
2773 			/* key 4 */
2774 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2775 			    IEEE80211_KEYBUF_SIZE);
2776 			keyobj.pok_length = min(keyobj.pok_length,
2777 			    ic->ic_nw_keys[3].k_len);
2778 			bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2779 			    keyobj.pok_length);
2780 			SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
2781 
2782 			wepkey = htole32(ic->ic_wep_txkey);
2783 			SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
2784 		}
2785 		/* set mode again to commit */
2786 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2787 	}
2788 	splx(s);
2789 
2790 	if (success) {
2791 		if (shouldbeup && keepnodes)
2792 			sc->sc_flags |= SC_NOFREE_ALLNODES;
2793 		if (shouldbeup)
2794 			ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2795 		else
2796 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2797 	} else {
2798 		printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2799 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2800 	}
2801 }
2802 
2803 void
2804 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2805     struct pgt_obj_mlme *mlme)
2806 {
2807 	struct ieee80211com *ic = &sc->sc_ic;
2808 	struct pgt_ieee80211_node *pin;
2809 	struct ieee80211_node *ni;
2810 
2811 	ni = ieee80211_find_node(ic, mlme->pom_address);
2812 	pin = (struct pgt_ieee80211_node *)ni;
2813 	switch (oid) {
2814 	case PGT_OID_DISASSOCIATE:
2815 		if (ni != NULL)
2816 			ieee80211_release_node(&sc->sc_ic, ni);
2817 		break;
2818 	case PGT_OID_ASSOCIATE:
2819 		if (ni == NULL) {
2820 			ni = ieee80211_dup_bss(ic, mlme->pom_address);
2821 			if (ni == NULL)
2822 				break;
2823 			ic->ic_newassoc(ic, ni, 1);
2824 			pin = (struct pgt_ieee80211_node *)ni;
2825 		}
2826 		ni->ni_associd = letoh16(mlme->pom_id);
2827 		pin->pin_mlme_state = letoh16(mlme->pom_state);
2828 		break;
2829 	default:
2830 		if (pin != NULL)
2831 			pin->pin_mlme_state = letoh16(mlme->pom_state);
2832 		break;
2833 	}
2834 }
2835 
2836 /*
2837  * Either in response to an event or after a certain amount of time,
2838  * synchronize our idea of the network we're part of from the hardware.
2839  */
2840 void
2841 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2842 	    struct mbuf *args)
2843 {
2844 	struct ieee80211com *ic = &sc->sc_ic;
2845 	struct pgt_obj_ssid ssid;
2846 	struct pgt_obj_bss bss;
2847 	uint32_t channel, noise, ls;
2848 	int error, s;
2849 
2850 	if (pa != NULL) {
2851 		struct pgt_obj_mlme *mlme;
2852 		uint32_t oid;
2853 
2854 		oid = *mtod(args, uint32_t *);
2855 		m_adj(args, sizeof(uint32_t));
2856 		if (sc->sc_debug & SC_DEBUG_TRAP)
2857 			DPRINTF(("%s: trap: oid %#x len %u\n",
2858 			    sc->sc_dev.dv_xname, oid, args->m_len));
2859 		switch (oid) {
2860 		case PGT_OID_LINK_STATE:
2861 			if (args->m_len < sizeof(uint32_t))
2862 				break;
2863 			ls = letoh32(*mtod(args, uint32_t *));
2864 			if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
2865 				DPRINTF(("%s: %s: link rate %u\n",
2866 				    sc->sc_dev.dv_xname, __func__, ls));
2867 			if (ls)
2868 				ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2869 			else
2870 				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2871 			goto gotlinkstate;
2872 		case PGT_OID_DEAUTHENTICATE:
2873 		case PGT_OID_AUTHENTICATE:
2874 		case PGT_OID_DISASSOCIATE:
2875 		case PGT_OID_ASSOCIATE:
2876 			if (args->m_len < sizeof(struct pgt_obj_mlme))
2877 				break;
2878 			mlme = mtod(args, struct pgt_obj_mlme *);
2879 			if (sc->sc_debug & SC_DEBUG_TRAP)
2880 				DPRINTF(("%s: mlme: address "
2881 				    "%s id 0x%02x state 0x%02x code 0x%02x\n",
2882 				    sc->sc_dev.dv_xname,
2883 				    ether_sprintf(mlme->pom_address),
2884 				    letoh16(mlme->pom_id),
2885 				    letoh16(mlme->pom_state),
2886 				    letoh16(mlme->pom_code)));
2887 #ifndef IEEE80211_STA_ONLY
2888 			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2889 				pgt_hostap_handle_mlme(sc, oid, mlme);
2890 #endif
2891 			break;
2892 		}
2893 		return;
2894 	}
2895 	if (ic->ic_state == IEEE80211_S_SCAN) {
2896 		s = splnet();
2897 		error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2898 		splx(s);
2899 		if (error)
2900 			return;
2901 		DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2902 		    htole32(ls)));
2903 		if (ls != 0)
2904 			ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2905 	}
2906 
2907 gotlinkstate:
2908 	s = splnet();
2909 	if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2910 		goto out;
2911 	sc->sc_noise = letoh32(noise);
2912 	if (ic->ic_state == IEEE80211_S_RUN) {
2913 		if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2914 		    sizeof(channel)) != 0)
2915 			goto out;
2916 		channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
2917 		ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2918 		if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2919 		    sizeof(ic->ic_bss->ni_bssid)) != 0)
2920 			goto out;
2921 		IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
2922 		error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2923 		    sizeof(bss));
2924 		if (error == 0)
2925 			ic->ic_bss->ni_rssi = bss.pob_rssi;
2926 		else if (error != EPERM)
2927 			goto out;
2928 		error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2929 		if (error)
2930 			goto out;
2931 		ic->ic_bss->ni_esslen = min(ssid.pos_length,
2932 		    sizeof(ic->ic_bss->ni_essid));
2933 		memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
2934 		    ssid.pos_length);
2935 	}
2936 
2937 out:
2938 	splx(s);
2939 }
2940 
2941 int
2942 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2943 {
2944 	struct pgt_softc *sc = ic->ic_if.if_softc;
2945 	enum ieee80211_state ostate;
2946 
2947 	ostate = ic->ic_state;
2948 
2949 	DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2950 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2951 
2952 	switch (nstate) {
2953 	case IEEE80211_S_INIT:
2954 		if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2955 			ic->ic_if.if_timer = 0;
2956 		ic->ic_mgt_timer = 0;
2957 		ic->ic_flags &= ~IEEE80211_F_SIBSS;
2958 		ieee80211_free_allnodes(ic);
2959 		break;
2960 	case IEEE80211_S_SCAN:
2961 		ic->ic_if.if_timer = 1;
2962 		ic->ic_mgt_timer = 0;
2963 		if (sc->sc_flags & SC_NOFREE_ALLNODES)
2964 			sc->sc_flags &= ~SC_NOFREE_ALLNODES;
2965 		else
2966 			ieee80211_free_allnodes(ic);
2967 
2968 #ifndef IEEE80211_STA_ONLY
2969 		/* Just use any old channel; we override it anyway. */
2970 		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2971 			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
2972 #endif
2973 		break;
2974 	case IEEE80211_S_RUN:
2975 		ic->ic_if.if_timer = 1;
2976 		break;
2977 	default:
2978 		break;
2979 	}
2980 
2981 	return (sc->sc_newstate(ic, nstate, arg));
2982 }
2983 
2984 int
2985 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
2986 {
2987 	int wokeup = 0;
2988 
2989 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2990 	    sc->sc_cbdmam->dm_mapsize,
2991 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
2992 	sc->sc_cb->pcb_device_curfrag[pq] =
2993 	    sc->sc_cb->pcb_driver_curfrag[pq];
2994 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2995 	    sc->sc_cbdmam->dm_mapsize,
2996 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
2997 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
2998 		struct pgt_desc *pd;
2999 
3000 		pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
3001 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
3002 		sc->sc_dirtyq_count[pq]--;
3003 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3004 		sc->sc_freeq_count[pq]++;
3005 		pgt_unload_tx_desc_frag(sc, pd);
3006 		if (sc->sc_debug & SC_DEBUG_QUEUES)
3007 			DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
3008 			    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
3009 		wokeup++;
3010 		if (pgt_queue_is_data(pq))
3011 			sc->sc_ic.ic_if.if_oerrors++;
3012 	}
3013 
3014 	return (wokeup);
3015 }
3016 
3017 int
3018 pgt_dma_alloc(struct pgt_softc *sc)
3019 {
3020 	size_t size;
3021 	int i, error, nsegs;
3022 
3023 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
3024 		TAILQ_INIT(&sc->sc_freeq[i]);
3025 		TAILQ_INIT(&sc->sc_dirtyq[i]);
3026 	}
3027 
3028 	/*
3029 	 * control block
3030 	 */
3031 	size = sizeof(struct pgt_control_block);
3032 
3033 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3034 	    BUS_DMA_NOWAIT, &sc->sc_cbdmam);
3035 	if (error != 0) {
3036 		printf("%s: can not create DMA tag for control block\n",
3037 		    sc->sc_dev.dv_xname);
3038 		goto out;
3039 	}
3040 
3041 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3042 	    0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3043 	if (error != 0) {
3044 		printf("%s: can not allocate DMA memory for control block\n",
3045 		    sc->sc_dev.dv_xname);
3046 		goto out;
3047 	}
3048 
3049 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
3050 	    size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
3051 	if (error != 0) {
3052 		printf("%s: can not map DMA memory for control block\n",
3053 		    sc->sc_dev.dv_xname);
3054 		goto out;
3055 	}
3056 
3057 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
3058 	    sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
3059 	if (error != 0) {
3060 		printf("%s: can not load DMA map for control block\n",
3061 		    sc->sc_dev.dv_xname);
3062 		goto out;
3063 	}
3064 
3065 	/*
3066 	 * powersave
3067 	 */
3068 	size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
3069 
3070 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3071 	    BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
3072 	if (error != 0) {
3073 		printf("%s: can not create DMA tag for powersave\n",
3074 		    sc->sc_dev.dv_xname);
3075 		goto out;
3076 	}
3077 
3078 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3079 	   0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3080 	if (error != 0) {
3081 		printf("%s: can not allocate DMA memory for powersave\n",
3082 		    sc->sc_dev.dv_xname);
3083 		goto out;
3084 	}
3085 
3086 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
3087 	    size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
3088 	if (error != 0) {
3089 		printf("%s: can not map DMA memory for powersave\n",
3090 		    sc->sc_dev.dv_xname);
3091 		goto out;
3092 	}
3093 
3094 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
3095 	    sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
3096 	if (error != 0) {
3097 		printf("%s: can not load DMA map for powersave\n",
3098 		    sc->sc_dev.dv_xname);
3099 		goto out;
3100 	}
3101 
3102 	/*
3103 	 * fragments
3104 	 */
3105 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3106 	if (error != 0)
3107 		goto out;
3108 
3109 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3110 	if (error != 0)
3111 		goto out;
3112 
3113 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3114 	if (error != 0)
3115 		goto out;
3116 
3117 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3118 	if (error != 0)
3119 		goto out;
3120 
3121 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3122 	if (error != 0)
3123 		goto out;
3124 
3125 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3126 	if (error != 0)
3127 		goto out;
3128 
3129 out:
3130 	if (error) {
3131 		printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3132 		pgt_dma_free(sc);
3133 	}
3134 
3135 	return (error);
3136 }
3137 
3138 int
3139 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3140 {
3141 	struct pgt_desc *pd;
3142 	size_t i, qsize;
3143 	int error, nsegs;
3144 
3145 	switch (pq) {
3146 		case PGT_QUEUE_DATA_LOW_RX:
3147 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3148 			break;
3149 		case PGT_QUEUE_DATA_LOW_TX:
3150 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3151 			break;
3152 		case PGT_QUEUE_DATA_HIGH_RX:
3153 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3154 			break;
3155 		case PGT_QUEUE_DATA_HIGH_TX:
3156 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3157 			break;
3158 		case PGT_QUEUE_MGMT_RX:
3159 			qsize = PGT_QUEUE_MGMT_SIZE;
3160 			break;
3161 		case PGT_QUEUE_MGMT_TX:
3162 			qsize = PGT_QUEUE_MGMT_SIZE;
3163 			break;
3164 		default:
3165 			return (EINVAL);
3166 	}
3167 
3168 	for (i = 0; i < qsize; i++) {
3169 		pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3170 
3171 		error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
3172 		    PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
3173 		if (error != 0) {
3174 			printf("%s: can not create DMA tag for fragment\n",
3175 			    sc->sc_dev.dv_xname);
3176 			free(pd, M_DEVBUF, 0);
3177 			break;
3178 		}
3179 
3180 		error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
3181 		    0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
3182 		if (error != 0) {
3183 			printf("%s: error alloc frag %zu on queue %u\n",
3184 			    sc->sc_dev.dv_xname, i, pq);
3185 			free(pd, M_DEVBUF, 0);
3186 			break;
3187 		}
3188 
3189 		error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
3190 		    PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
3191 		if (error != 0) {
3192 			printf("%s: error map frag %zu on queue %u\n",
3193 			    sc->sc_dev.dv_xname, i, pq);
3194 			free(pd, M_DEVBUF, 0);
3195 			break;
3196 		}
3197 
3198 		if (pgt_queue_is_rx(pq)) {
3199 			error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
3200 			    pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
3201 			if (error != 0) {
3202 				printf("%s: error load frag %zu on queue %u\n",
3203 				    sc->sc_dev.dv_xname, i, pq);
3204 				bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
3205 				    nsegs);
3206 				free(pd, M_DEVBUF, 0);
3207 				break;
3208 			}
3209 			pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3210 		}
3211 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3212 	}
3213 
3214 	return (error);
3215 }
3216 
3217 void
3218 pgt_dma_free(struct pgt_softc *sc)
3219 {
3220 	/*
3221 	 * fragments
3222 	 */
3223 	if (sc->sc_dmat != NULL) {
3224 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3225 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3226 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3227 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3228 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3229 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3230 	}
3231 
3232 	/*
3233 	 * powersave
3234 	 */
3235 	if (sc->sc_psmbuf != NULL) {
3236 		bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
3237 		bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
3238 		sc->sc_psmbuf = NULL;
3239 		sc->sc_psmdmam = NULL;
3240 	}
3241 
3242 	/*
3243 	 * control block
3244 	 */
3245 	if (sc->sc_cb != NULL) {
3246 		bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
3247 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
3248 		sc->sc_cb = NULL;
3249 		sc->sc_cbdmam = NULL;
3250 	}
3251 }
3252 
3253 void
3254 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3255 {
3256 	struct pgt_desc	*pd;
3257 
3258 	while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
3259 		pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
3260 		TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
3261 		if (pd->pd_dmam != NULL) {
3262 			bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
3263 			pd->pd_dmam = NULL;
3264 		}
3265 		bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
3266 		free(pd, M_DEVBUF, 0);
3267 	}
3268 }
3269 
3270 int
3271 pgt_activate(struct device *self, int act)
3272 {
3273 	struct pgt_softc *sc = (struct pgt_softc *)self;
3274 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3275 
3276 	DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3277 
3278 	switch (act) {
3279 	case DVACT_SUSPEND:
3280 		if (ifp->if_flags & IFF_RUNNING) {
3281 			pgt_stop(sc, SC_NEEDS_RESET);
3282 			pgt_update_hw_from_sw(sc, 0, 0);
3283 		}
3284 		if (sc->sc_power != NULL)
3285 			(*sc->sc_power)(sc, act);
3286 		break;
3287 	case DVACT_WAKEUP:
3288 		pgt_wakeup(sc);
3289 		break;
3290 	}
3291 	return 0;
3292 }
3293 
3294 void
3295 pgt_wakeup(struct pgt_softc *sc)
3296 {
3297 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3298 
3299 	if (sc->sc_power != NULL)
3300 		(*sc->sc_power)(sc, DVACT_RESUME);
3301 
3302 	pgt_stop(sc, SC_NEEDS_RESET);
3303 	pgt_update_hw_from_sw(sc, 0, 0);
3304 
3305 	if (ifp->if_flags & IFF_UP) {
3306 		pgt_init(ifp);
3307 		pgt_update_hw_from_sw(sc, 0, 0);
3308 	}
3309 }
3310