1 /* $OpenBSD: pgt.c,v 1.67 2011/06/21 16:52:45 tedu Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Copyright (c) 2004 Fujitsu Laboratories of America, Inc. 22 * Copyright (c) 2004 Brian Fundakowski Feldman 23 * All rights reserved. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 37 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 44 * SUCH DAMAGE. 45 */ 46 47 #include <sys/cdefs.h> 48 #include "bpfilter.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/malloc.h> 54 #include <sys/socket.h> 55 #include <sys/mbuf.h> 56 #include <sys/endian.h> 57 #include <sys/sockio.h> 58 #include <sys/kthread.h> 59 #include <sys/time.h> 60 #include <sys/ioctl.h> 61 #include <sys/device.h> 62 #include <sys/workq.h> 63 64 #include <machine/bus.h> 65 #include <machine/endian.h> 66 #include <machine/intr.h> 67 68 #include <net/if.h> 69 #include <net/if_arp.h> 70 #include <net/if_dl.h> 71 #include <net/if_llc.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 75 #if NBPFILTER > 0 76 #include <net/bpf.h> 77 #endif 78 79 #ifdef INET 80 #include <netinet/in.h> 81 #include <netinet/in_systm.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #include <netinet/ip.h> 85 #endif 86 87 #include <net80211/ieee80211_var.h> 88 #include <net80211/ieee80211_radiotap.h> 89 90 #include <dev/ic/pgtreg.h> 91 #include <dev/ic/pgtvar.h> 92 93 #include <dev/ic/if_wireg.h> 94 #include <dev/ic/if_wi_ieee.h> 95 #include <dev/ic/if_wivar.h> 96 97 #ifdef PGT_DEBUG 98 #define DPRINTF(x) do { printf x; } while (0) 99 #else 100 #define DPRINTF(x) 101 #endif 102 103 #define SETOID(oid, var, size) { \ 104 if (pgt_oid_set(sc, oid, var, size) != 0) \ 105 break; \ 106 } 107 108 /* 109 * This is a driver for the Intersil Prism family of 802.11g network cards, 110 * based upon version 1.2 of the Linux driver and firmware found at 111 * http://www.prism54.org/. 112 */ 113 114 #define SCAN_TIMEOUT 5 /* 5 seconds */ 115 116 struct cfdriver pgt_cd = { 117 NULL, "pgt", DV_IFNET 118 }; 119 120 void pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr); 121 int pgt_media_change(struct ifnet *ifp); 122 void pgt_write_memory_barrier(struct pgt_softc *); 123 uint32_t pgt_read_4(struct pgt_softc *, uint16_t); 124 void pgt_write_4(struct pgt_softc *, uint16_t, uint32_t); 125 void pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t); 126 void pgt_debug_events(struct pgt_softc *, const char *); 127 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue); 128 void pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *); 129 int pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue, 130 struct pgt_desc *); 131 void pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *); 132 int pgt_load_firmware(struct pgt_softc *); 133 void pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue, 134 struct pgt_frag *); 135 int pgt_reset(struct pgt_softc *); 136 void pgt_stop(struct pgt_softc *, unsigned int); 137 void pgt_reboot(struct pgt_softc *); 138 void pgt_init_intr(struct pgt_softc *); 139 void pgt_update_intr(struct pgt_softc *, int); 140 struct mbuf 141 *pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *, 142 struct mbuf *, struct ieee80211_node **); 143 void pgt_input_frames(struct pgt_softc *, struct mbuf *); 144 void pgt_wakeup_intr(struct pgt_softc *); 145 void pgt_sleep_intr(struct pgt_softc *); 146 void pgt_empty_traps(struct pgt_softc_kthread *); 147 void pgt_per_device_kthread(void *); 148 void pgt_async_reset(struct pgt_softc *); 149 void pgt_async_update(struct pgt_softc *); 150 void pgt_txdone(struct pgt_softc *, enum pgt_queue); 151 void pgt_rxdone(struct pgt_softc *, enum pgt_queue); 152 void pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t); 153 void pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *); 154 struct mbuf 155 *pgt_datarx_completion(struct pgt_softc *, enum pgt_queue); 156 int pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t); 157 int pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t); 158 int pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t); 159 void pgt_state_dump(struct pgt_softc *); 160 int pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *); 161 void pgt_desc_transmit(struct pgt_softc *, enum pgt_queue, 162 struct pgt_desc *, uint16_t, int); 163 void pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue); 164 struct ieee80211_node 165 *pgt_ieee80211_node_alloc(struct ieee80211com *); 166 void pgt_ieee80211_newassoc(struct ieee80211com *, 167 struct ieee80211_node *, int); 168 void pgt_ieee80211_node_free(struct ieee80211com *, 169 struct ieee80211_node *); 170 void pgt_ieee80211_node_copy(struct ieee80211com *, 171 struct ieee80211_node *, 172 const struct ieee80211_node *); 173 int pgt_ieee80211_send_mgmt(struct ieee80211com *, 174 struct ieee80211_node *, int, int, int); 175 int pgt_net_attach(struct pgt_softc *); 176 void pgt_start(struct ifnet *); 177 int pgt_ioctl(struct ifnet *, u_long, caddr_t); 178 void pgt_obj_bss2scanres(struct pgt_softc *, 179 struct pgt_obj_bss *, struct wi_scan_res *, uint32_t); 180 void node_mark_active_ap(void *, struct ieee80211_node *); 181 void node_mark_active_adhoc(void *, struct ieee80211_node *); 182 void pgt_watchdog(struct ifnet *); 183 int pgt_init(struct ifnet *); 184 void pgt_update_hw_from_sw(struct pgt_softc *, int, int); 185 void pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t, 186 struct pgt_obj_mlme *); 187 void pgt_update_sw_from_hw(struct pgt_softc *, 188 struct pgt_async_trap *, struct mbuf *); 189 int pgt_newstate(struct ieee80211com *, enum ieee80211_state, int); 190 int pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue); 191 int pgt_dma_alloc(struct pgt_softc *); 192 int pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq); 193 void pgt_dma_free(struct pgt_softc *); 194 void pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq); 195 void pgt_resume(void *, void *); 196 197 void 198 pgt_write_memory_barrier(struct pgt_softc *sc) 199 { 200 bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0, 201 BUS_SPACE_BARRIER_WRITE); 202 } 203 204 u_int32_t 205 pgt_read_4(struct pgt_softc *sc, uint16_t offset) 206 { 207 return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset)); 208 } 209 210 void 211 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value) 212 { 213 bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value); 214 } 215 216 /* 217 * Write out 4 bytes and cause a PCI flush by reading back in on a 218 * harmless register. 219 */ 220 void 221 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value) 222 { 223 bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value); 224 (void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN); 225 } 226 227 /* 228 * Print the state of events in the queues from an interrupt or a trigger. 229 */ 230 void 231 pgt_debug_events(struct pgt_softc *sc, const char *when) 232 { 233 #define COUNT(i) \ 234 letoh32(sc->sc_cb->pcb_driver_curfrag[i]) - \ 235 letoh32(sc->sc_cb->pcb_device_curfrag[i]) 236 if (sc->sc_debug & SC_DEBUG_EVENTS) 237 DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n", 238 sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2), 239 COUNT(3), COUNT(4), COUNT(5))); 240 #undef COUNT 241 } 242 243 uint32_t 244 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq) 245 { 246 return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) - 247 letoh32(sc->sc_cb->pcb_device_curfrag[pq])); 248 } 249 250 void 251 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd) 252 { 253 pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr); 254 pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE); 255 pd->pd_fragp->pf_flags = 0; 256 257 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize, 258 BUS_DMASYNC_POSTWRITE); 259 } 260 261 int 262 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq, 263 struct pgt_desc *pd) 264 { 265 int error; 266 267 error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem, 268 PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT); 269 if (error) { 270 DPRINTF(("%s: unable to load %s tx DMA: %d\n", 271 sc->sc_dev.dv_xname, 272 pgt_queue_is_data(pq) ? "data" : "mgmt", error)); 273 return (error); 274 } 275 pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr; 276 pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr); 277 pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE); 278 pd->pd_fragp->pf_flags = htole16(0); 279 280 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize, 281 BUS_DMASYNC_POSTWRITE); 282 283 return (0); 284 } 285 286 void 287 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd) 288 { 289 bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam); 290 pd->pd_dmaaddr = 0; 291 } 292 293 int 294 pgt_load_firmware(struct pgt_softc *sc) 295 { 296 int error, reg, dirreg, fwoff, ucodeoff, fwlen; 297 uint8_t *ucode; 298 uint32_t *uc; 299 size_t size; 300 char *name; 301 302 if (sc->sc_flags & SC_ISL3877) 303 name = "pgt-isl3877"; 304 else 305 name = "pgt-isl3890"; /* includes isl3880 */ 306 307 error = loadfirmware(name, &ucode, &size); 308 309 if (error != 0) { 310 DPRINTF(("%s: error %d, could not read firmware %s\n", 311 sc->sc_dev.dv_xname, error, name)); 312 return (EIO); 313 } 314 315 if (size & 3) { 316 DPRINTF(("%s: bad firmware size %u\n", 317 sc->sc_dev.dv_xname, size)); 318 free(ucode, M_DEVBUF); 319 return (EINVAL); 320 } 321 322 pgt_reboot(sc); 323 324 fwoff = 0; 325 ucodeoff = 0; 326 uc = (uint32_t *)ucode; 327 reg = PGT_FIRMWARE_INTERNAL_OFFSET; 328 while (fwoff < size) { 329 pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg); 330 331 if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE) 332 fwlen = PGT_DIRECT_MEMORY_SIZE; 333 else 334 fwlen = size - fwoff; 335 336 dirreg = PGT_DIRECT_MEMORY_OFFSET; 337 while (fwlen > 4) { 338 pgt_write_4(sc, dirreg, uc[ucodeoff]); 339 fwoff += 4; 340 dirreg += 4; 341 reg += 4; 342 fwlen -= 4; 343 ucodeoff++; 344 } 345 pgt_write_4_flush(sc, dirreg, uc[ucodeoff]); 346 fwoff += 4; 347 dirreg += 4; 348 reg += 4; 349 fwlen -= 4; 350 ucodeoff++; 351 } 352 DPRINTF(("%s: %d bytes microcode loaded from %s\n", 353 sc->sc_dev.dv_xname, fwoff, name)); 354 355 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 356 reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN); 357 reg |= PGT_CTRL_STAT_RAMBOOT; 358 pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg); 359 pgt_write_memory_barrier(sc); 360 DELAY(PGT_WRITEIO_DELAY); 361 362 reg |= PGT_CTRL_STAT_RESET; 363 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 364 pgt_write_memory_barrier(sc); 365 DELAY(PGT_WRITEIO_DELAY); 366 367 reg &= ~PGT_CTRL_STAT_RESET; 368 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 369 pgt_write_memory_barrier(sc); 370 DELAY(PGT_WRITEIO_DELAY); 371 372 free(ucode, M_DEVBUF); 373 374 return (0); 375 } 376 377 void 378 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq, 379 struct pgt_frag *pqfrags) 380 { 381 struct pgt_desc *pd; 382 unsigned int i; 383 384 sc->sc_cb->pcb_device_curfrag[pq] = 0; 385 i = 0; 386 /* XXX why only freeq ??? */ 387 TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) { 388 pd->pd_fragnum = i; 389 pd->pd_fragp = &pqfrags[i]; 390 if (pgt_queue_is_rx(pq)) 391 pgt_reinit_rx_desc_frag(sc, pd); 392 i++; 393 } 394 sc->sc_freeq_count[pq] = i; 395 /* 396 * The ring buffer describes how many free buffers are available from 397 * the host (for receive queues) or how many are pending (for 398 * transmit queues). 399 */ 400 if (pgt_queue_is_rx(pq)) 401 sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i); 402 else 403 sc->sc_cb->pcb_driver_curfrag[pq] = 0; 404 } 405 406 /* 407 * Turn off interrupts, reset the device (possibly loading firmware), 408 * and put everything in a known state. 409 */ 410 int 411 pgt_reset(struct pgt_softc *sc) 412 { 413 int error; 414 415 /* disable all interrupts */ 416 pgt_write_4_flush(sc, PGT_REG_INT_EN, 0); 417 DELAY(PGT_WRITEIO_DELAY); 418 419 /* 420 * Set up the management receive queue, assuming there are no 421 * requests in progress. 422 */ 423 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 424 sc->sc_cbdmam->dm_mapsize, 425 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 426 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX, 427 &sc->sc_cb->pcb_data_low_rx[0]); 428 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX, 429 &sc->sc_cb->pcb_data_low_tx[0]); 430 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX, 431 &sc->sc_cb->pcb_data_high_rx[0]); 432 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX, 433 &sc->sc_cb->pcb_data_high_tx[0]); 434 pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX, 435 &sc->sc_cb->pcb_mgmt_rx[0]); 436 pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX, 437 &sc->sc_cb->pcb_mgmt_tx[0]); 438 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 439 sc->sc_cbdmam->dm_mapsize, 440 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 441 442 /* load firmware */ 443 if (sc->sc_flags & SC_NEEDS_FIRMWARE) { 444 error = pgt_load_firmware(sc); 445 if (error) { 446 printf("%s: firmware load failed\n", 447 sc->sc_dev.dv_xname); 448 return (error); 449 } 450 sc->sc_flags &= ~SC_NEEDS_FIRMWARE; 451 DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname)); 452 } 453 454 /* upload the control block's DMA address */ 455 pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE, 456 htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr)); 457 DELAY(PGT_WRITEIO_DELAY); 458 459 /* send a reset event */ 460 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET); 461 DELAY(PGT_WRITEIO_DELAY); 462 463 /* await only the initialization interrupt */ 464 pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT); 465 DELAY(PGT_WRITEIO_DELAY); 466 467 return (0); 468 } 469 470 /* 471 * If we're trying to reset and the device has seemingly not been detached, 472 * we'll spend a minute seeing if we can't do the reset. 473 */ 474 void 475 pgt_stop(struct pgt_softc *sc, unsigned int flag) 476 { 477 struct ieee80211com *ic; 478 unsigned int wokeup; 479 int tryagain = 0; 480 481 ic = &sc->sc_ic; 482 483 ic->ic_if.if_flags &= ~IFF_RUNNING; 484 sc->sc_flags |= SC_UNINITIALIZED; 485 sc->sc_flags |= flag; 486 487 pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX); 488 pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX); 489 pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX); 490 491 trying_again: 492 /* disable all interrupts */ 493 pgt_write_4_flush(sc, PGT_REG_INT_EN, 0); 494 DELAY(PGT_WRITEIO_DELAY); 495 496 /* reboot card */ 497 pgt_reboot(sc); 498 499 do { 500 wokeup = 0; 501 /* 502 * We don't expect to be woken up, just to drop the lock 503 * and time out. Only tx queues can have anything valid 504 * on them outside of an interrupt. 505 */ 506 while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) { 507 struct pgt_mgmt_desc *pmd; 508 509 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog); 510 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 511 pmd->pmd_error = ENETRESET; 512 wakeup_one(pmd); 513 if (sc->sc_debug & SC_DEBUG_MGMT) 514 DPRINTF(("%s: queue: mgmt %p <- %#x " 515 "(drained)\n", sc->sc_dev.dv_xname, 516 pmd, pmd->pmd_oid)); 517 wokeup++; 518 } 519 if (wokeup > 0) { 520 if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) { 521 sc->sc_flags &= ~flag; 522 return; 523 } 524 } 525 } while (wokeup > 0); 526 527 if (flag == SC_NEEDS_RESET) { 528 int error; 529 530 DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname)); 531 sc->sc_flags &= ~SC_POWERSAVE; 532 sc->sc_flags |= SC_NEEDS_FIRMWARE; 533 error = pgt_reset(sc); 534 if (error == 0) { 535 tsleep(&sc->sc_flags, 0, "pgtres", hz); 536 if (sc->sc_flags & SC_UNINITIALIZED) { 537 printf("%s: not responding\n", 538 sc->sc_dev.dv_xname); 539 /* Thud. It was probably removed. */ 540 if (tryagain) 541 panic("pgt went for lunch"); /* XXX */ 542 tryagain = 1; 543 } else { 544 /* await all interrupts */ 545 pgt_write_4_flush(sc, PGT_REG_INT_EN, 546 PGT_INT_STAT_SOURCES); 547 DELAY(PGT_WRITEIO_DELAY); 548 ic->ic_if.if_flags |= IFF_RUNNING; 549 } 550 } 551 552 if (tryagain) 553 goto trying_again; 554 555 sc->sc_flags &= ~flag; 556 if (ic->ic_if.if_flags & IFF_RUNNING) 557 pgt_update_hw_from_sw(sc, 558 ic->ic_state != IEEE80211_S_INIT, 559 ic->ic_opmode != IEEE80211_M_MONITOR); 560 } 561 562 ic->ic_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 563 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 564 } 565 566 void 567 pgt_attach(void *xsc) 568 { 569 struct pgt_softc *sc = xsc; 570 int error; 571 572 /* debug flags */ 573 //sc->sc_debug |= SC_DEBUG_QUEUES; /* super verbose */ 574 //sc->sc_debug |= SC_DEBUG_MGMT; 575 sc->sc_debug |= SC_DEBUG_UNEXPECTED; 576 //sc->sc_debug |= SC_DEBUG_TRIGGER; /* verbose */ 577 //sc->sc_debug |= SC_DEBUG_EVENTS; /* super verbose */ 578 //sc->sc_debug |= SC_DEBUG_POWER; 579 sc->sc_debug |= SC_DEBUG_TRAP; 580 sc->sc_debug |= SC_DEBUG_LINK; 581 //sc->sc_debug |= SC_DEBUG_RXANNEX; 582 //sc->sc_debug |= SC_DEBUG_RXFRAG; 583 //sc->sc_debug |= SC_DEBUG_RXETHER; 584 585 /* enable card if possible */ 586 if (sc->sc_enable != NULL) 587 (*sc->sc_enable)(sc); 588 589 error = pgt_dma_alloc(sc); 590 if (error) 591 return; 592 593 sc->sc_ic.ic_if.if_softc = sc; 594 TAILQ_INIT(&sc->sc_mgmtinprog); 595 TAILQ_INIT(&sc->sc_kthread.sck_traps); 596 sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED; 597 sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN; 598 599 error = pgt_reset(sc); 600 if (error) 601 return; 602 603 tsleep(&sc->sc_flags, 0, "pgtres", hz); 604 if (sc->sc_flags & SC_UNINITIALIZED) { 605 printf("%s: not responding\n", sc->sc_dev.dv_xname); 606 sc->sc_flags |= SC_NEEDS_FIRMWARE; 607 return; 608 } else { 609 /* await all interrupts */ 610 pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES); 611 DELAY(PGT_WRITEIO_DELAY); 612 } 613 614 error = pgt_net_attach(sc); 615 if (error) 616 return; 617 618 if (kthread_create(pgt_per_device_kthread, sc, NULL, 619 sc->sc_dev.dv_xname) != 0) 620 return; 621 622 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 623 } 624 625 int 626 pgt_detach(struct pgt_softc *sc) 627 { 628 if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED) 629 /* device was not initialized correctly, so leave early */ 630 goto out; 631 632 /* stop card */ 633 pgt_stop(sc, SC_DYING); 634 pgt_reboot(sc); 635 636 ieee80211_ifdetach(&sc->sc_ic.ic_if); 637 if_detach(&sc->sc_ic.ic_if); 638 639 out: 640 /* disable card if possible */ 641 if (sc->sc_disable != NULL) 642 (*sc->sc_disable)(sc); 643 644 pgt_dma_free(sc); 645 646 return (0); 647 } 648 649 void 650 pgt_reboot(struct pgt_softc *sc) 651 { 652 uint32_t reg; 653 654 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 655 reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT); 656 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 657 pgt_write_memory_barrier(sc); 658 DELAY(PGT_WRITEIO_DELAY); 659 660 reg |= PGT_CTRL_STAT_RESET; 661 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 662 pgt_write_memory_barrier(sc); 663 DELAY(PGT_WRITEIO_DELAY); 664 665 reg &= ~PGT_CTRL_STAT_RESET; 666 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 667 pgt_write_memory_barrier(sc); 668 DELAY(PGT_RESET_DELAY); 669 } 670 671 void 672 pgt_init_intr(struct pgt_softc *sc) 673 { 674 if ((sc->sc_flags & SC_UNINITIALIZED) == 0) { 675 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 676 DPRINTF(("%s: spurious initialization\n", 677 sc->sc_dev.dv_xname)); 678 } else { 679 sc->sc_flags &= ~SC_UNINITIALIZED; 680 wakeup(&sc->sc_flags); 681 } 682 } 683 684 /* 685 * If called with a NULL last_nextpkt, only the mgmt queue will be checked 686 * for new packets. 687 */ 688 void 689 pgt_update_intr(struct pgt_softc *sc, int hack) 690 { 691 /* priority order */ 692 enum pgt_queue pqs[PGT_QUEUE_COUNT] = { 693 PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX, 694 PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX, 695 PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX 696 }; 697 struct mbuf *m; 698 uint32_t npend; 699 unsigned int dirtycount; 700 int i; 701 702 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 703 sc->sc_cbdmam->dm_mapsize, 704 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 705 pgt_debug_events(sc, "intr"); 706 /* 707 * Check for completion of tx in their dirty queues. 708 * Check completion of rx into their dirty queues. 709 */ 710 for (i = 0; i < PGT_QUEUE_COUNT; i++) { 711 size_t qdirty, qfree; 712 713 qdirty = sc->sc_dirtyq_count[pqs[i]]; 714 qfree = sc->sc_freeq_count[pqs[i]]; 715 /* 716 * We want the wrap-around here. 717 */ 718 if (pgt_queue_is_rx(pqs[i])) { 719 int data; 720 721 data = pgt_queue_is_data(pqs[i]); 722 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY 723 if (hack && data) 724 continue; 725 #endif 726 npend = pgt_queue_frags_pending(sc, pqs[i]); 727 /* 728 * Receive queues clean up below, so qdirty must 729 * always be 0. 730 */ 731 if (npend > qfree) { 732 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 733 DPRINTF(("%s: rx queue [%u] " 734 "overflowed by %u\n", 735 sc->sc_dev.dv_xname, pqs[i], 736 npend - qfree)); 737 sc->sc_flags |= SC_INTR_RESET; 738 break; 739 } 740 while (qfree-- > npend) 741 pgt_rxdone(sc, pqs[i]); 742 } else { 743 npend = pgt_queue_frags_pending(sc, pqs[i]); 744 if (npend > qdirty) { 745 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 746 DPRINTF(("%s: tx queue [%u] " 747 "underflowed by %u\n", 748 sc->sc_dev.dv_xname, pqs[i], 749 npend - qdirty)); 750 sc->sc_flags |= SC_INTR_RESET; 751 break; 752 } 753 /* 754 * If the free queue was empty, or the data transmit 755 * queue just became empty, wake up any waiters. 756 */ 757 if (qdirty > npend) { 758 if (pgt_queue_is_data(pqs[i])) { 759 sc->sc_ic.ic_if.if_timer = 0; 760 sc->sc_ic.ic_if.if_flags &= 761 ~IFF_OACTIVE; 762 } 763 while (qdirty-- > npend) 764 pgt_txdone(sc, pqs[i]); 765 } 766 } 767 } 768 769 /* 770 * This is the deferred completion for received management frames 771 * and where we queue network frames for stack input. 772 */ 773 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]; 774 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) { 775 struct pgt_mgmt_desc *pmd; 776 777 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog); 778 /* 779 * If there is no mgmt request in progress or the operation 780 * returned is explicitly a trap, this pmd will essentially 781 * be ignored. 782 */ 783 pgt_mgmtrx_completion(sc, pmd); 784 } 785 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] = 786 htole32(dirtycount + 787 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX])); 788 789 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX]; 790 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) { 791 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX))) 792 pgt_input_frames(sc, m); 793 } 794 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] = 795 htole32(dirtycount + 796 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX])); 797 798 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX]; 799 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) { 800 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX))) 801 pgt_input_frames(sc, m); 802 } 803 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] = 804 htole32(dirtycount + 805 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX])); 806 807 /* 808 * Write out what we've finished with. 809 */ 810 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 811 sc->sc_cbdmam->dm_mapsize, 812 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 813 } 814 815 struct mbuf * 816 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh, 817 struct mbuf *m, struct ieee80211_node **ni) 818 { 819 struct ieee80211com *ic; 820 struct ieee80211_frame *frame; 821 struct llc *snap; 822 823 ic = &sc->sc_ic; 824 if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) { 825 *ni = ieee80211_ref_node(ic->ic_bss); 826 (*ni)->ni_inact = 0; 827 return (m); 828 } 829 830 M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT); 831 if (m == NULL) 832 return (m); 833 if (m->m_len < sizeof(*frame) + sizeof(*snap)) { 834 m = m_pullup(m, sizeof(*frame) + sizeof(*snap)); 835 if (m == NULL) 836 return (m); 837 } 838 frame = mtod(m, struct ieee80211_frame *); 839 snap = (struct llc *)&frame[1]; 840 if (ni != NULL) { 841 if (ic->ic_opmode == IEEE80211_M_STA) { 842 *ni = ieee80211_ref_node(ic->ic_bss); 843 } 844 #ifndef IEEE80211_STA_ONLY 845 else { 846 *ni = ieee80211_find_node(ic, eh->ether_shost); 847 /* 848 * Make up associations for ad-hoc mode. To support 849 * ad-hoc WPA, we'll need to maintain a bounded 850 * pool of ad-hoc stations. 851 */ 852 if (*ni == NULL && 853 ic->ic_opmode != IEEE80211_M_HOSTAP) { 854 *ni = ieee80211_dup_bss(ic, eh->ether_shost); 855 if (*ni != NULL) { 856 (*ni)->ni_associd = 1; 857 ic->ic_newassoc(ic, *ni, 1); 858 } 859 } 860 if (*ni == NULL) { 861 m_freem(m); 862 return (NULL); 863 } 864 } 865 #endif 866 (*ni)->ni_inact = 0; 867 } 868 snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP; 869 snap->llc_control = LLC_UI; 870 snap->llc_snap.org_code[0] = 0; 871 snap->llc_snap.org_code[1] = 0; 872 snap->llc_snap.org_code[2] = 0; 873 snap->llc_snap.ether_type = eh->ether_type; 874 frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; 875 /* Doesn't look like much of the 802.11 header is available. */ 876 *(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0; 877 /* 878 * Translate the addresses; WDS is not handled. 879 */ 880 switch (ic->ic_opmode) { 881 case IEEE80211_M_STA: 882 frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; 883 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost); 884 IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid); 885 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost); 886 break; 887 #ifndef IEEE80211_STA_ONLY 888 case IEEE80211_M_IBSS: 889 case IEEE80211_M_AHDEMO: 890 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS; 891 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost); 892 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost); 893 IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid); 894 break; 895 case IEEE80211_M_HOSTAP: 896 /* HostAP forwarding defaults to being done on firmware. */ 897 frame->i_fc[1] = IEEE80211_FC1_DIR_TODS; 898 IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid); 899 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost); 900 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost); 901 break; 902 #endif 903 default: 904 break; 905 } 906 return (m); 907 } 908 909 void 910 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m) 911 { 912 struct ether_header eh; 913 struct ifnet *ifp; 914 struct ieee80211_channel *chan; 915 struct ieee80211_rxinfo rxi; 916 struct ieee80211_node *ni; 917 struct ieee80211com *ic; 918 struct pgt_rx_annex *pra; 919 struct pgt_rx_header *pha; 920 struct mbuf *next; 921 unsigned int n; 922 uint32_t rstamp; 923 uint8_t rssi; 924 925 ic = &sc->sc_ic; 926 ifp = &ic->ic_if; 927 for (next = m; m != NULL; m = next) { 928 next = m->m_nextpkt; 929 m->m_nextpkt = NULL; 930 931 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 932 if (m->m_len < sizeof(*pha)) { 933 m = m_pullup(m, sizeof(*pha)); 934 if (m == NULL) { 935 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 936 DPRINTF(("%s: m_pullup " 937 "failure\n", 938 sc->sc_dev.dv_xname)); 939 ifp->if_ierrors++; 940 continue; 941 } 942 } 943 pha = mtod(m, struct pgt_rx_header *); 944 pra = NULL; 945 goto input; 946 } 947 948 if (m->m_len < sizeof(*pra)) { 949 m = m_pullup(m, sizeof(*pra)); 950 if (m == NULL) { 951 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 952 DPRINTF(("%s: m_pullup failure\n", 953 sc->sc_dev.dv_xname)); 954 ifp->if_ierrors++; 955 continue; 956 } 957 } 958 pra = mtod(m, struct pgt_rx_annex *); 959 pha = &pra->pra_header; 960 if (sc->sc_debug & SC_DEBUG_RXANNEX) 961 DPRINTF(("%s: rx annex: ? %04x " 962 "len %u clock %u flags %02x ? %02x rate %u ? %02x " 963 "freq %u ? %04x rssi %u pad %02x%02x%02x\n", 964 sc->sc_dev.dv_xname, 965 letoh16(pha->pra_unknown0), 966 letoh16(pha->pra_length), 967 letoh32(pha->pra_clock), pha->pra_flags, 968 pha->pra_unknown1, pha->pra_rate, 969 pha->pra_unknown2, letoh32(pha->pra_frequency), 970 pha->pra_unknown3, pha->pra_rssi, 971 pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2])); 972 if (sc->sc_debug & SC_DEBUG_RXETHER) 973 DPRINTF(("%s: rx ether: %s < %s 0x%04x\n", 974 sc->sc_dev.dv_xname, 975 ether_sprintf(pra->pra_ether_dhost), 976 ether_sprintf(pra->pra_ether_shost), 977 ntohs(pra->pra_ether_type))); 978 979 memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN); 980 memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN); 981 eh.ether_type = pra->pra_ether_type; 982 983 input: 984 /* 985 * This flag is set if e.g. packet could not be decrypted. 986 */ 987 if (pha->pra_flags & PRA_FLAG_BAD) { 988 ifp->if_ierrors++; 989 m_freem(m); 990 continue; 991 } 992 993 /* 994 * After getting what we want, chop off the annex, then 995 * turn into something that looks like it really was 996 * 802.11. 997 */ 998 rssi = pha->pra_rssi; 999 rstamp = letoh32(pha->pra_clock); 1000 n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0); 1001 if (n <= IEEE80211_CHAN_MAX) 1002 chan = &ic->ic_channels[n]; 1003 else 1004 chan = ic->ic_bss->ni_chan; 1005 /* Send to 802.3 listeners. */ 1006 if (pra) { 1007 m_adj(m, sizeof(*pra)); 1008 } else 1009 m_adj(m, sizeof(*pha)); 1010 1011 m = pgt_ieee80211_encap(sc, &eh, m, &ni); 1012 if (m != NULL) { 1013 #if NBPFILTER > 0 1014 if (sc->sc_drvbpf != NULL) { 1015 struct mbuf mb; 1016 struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap; 1017 1018 tap->wr_flags = 0; 1019 tap->wr_chan_freq = htole16(chan->ic_freq); 1020 tap->wr_chan_flags = htole16(chan->ic_flags); 1021 tap->wr_rssi = rssi; 1022 tap->wr_max_rssi = ic->ic_max_rssi; 1023 1024 mb.m_data = (caddr_t)tap; 1025 mb.m_len = sc->sc_rxtap_len; 1026 mb.m_next = m; 1027 mb.m_nextpkt = NULL; 1028 mb.m_type = 0; 1029 mb.m_flags = 0; 1030 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 1031 } 1032 #endif 1033 rxi.rxi_flags = 0; 1034 ni->ni_rssi = rxi.rxi_rssi = rssi; 1035 ni->ni_rstamp = rxi.rxi_tstamp = rstamp; 1036 ieee80211_input(ifp, m, ni, &rxi); 1037 /* 1038 * The frame may have caused the node to be marked for 1039 * reclamation (e.g. in response to a DEAUTH message) 1040 * so use free_node here instead of unref_node. 1041 */ 1042 if (ni == ic->ic_bss) 1043 ieee80211_unref_node(&ni); 1044 else 1045 ieee80211_release_node(&sc->sc_ic, ni); 1046 } else { 1047 ifp->if_ierrors++; 1048 } 1049 } 1050 } 1051 1052 void 1053 pgt_wakeup_intr(struct pgt_softc *sc) 1054 { 1055 int shouldupdate; 1056 int i; 1057 1058 shouldupdate = 0; 1059 /* Check for any queues being empty before updating. */ 1060 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1061 sc->sc_cbdmam->dm_mapsize, 1062 BUS_DMASYNC_POSTREAD); 1063 for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) { 1064 if (pgt_queue_is_tx(i)) 1065 shouldupdate = pgt_queue_frags_pending(sc, i); 1066 else 1067 shouldupdate = pgt_queue_frags_pending(sc, i) < 1068 sc->sc_freeq_count[i]; 1069 } 1070 if (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) 1071 shouldupdate = 1; 1072 if (sc->sc_debug & SC_DEBUG_POWER) 1073 DPRINTF(("%s: wakeup interrupt (update = %d)\n", 1074 sc->sc_dev.dv_xname, shouldupdate)); 1075 sc->sc_flags &= ~SC_POWERSAVE; 1076 if (shouldupdate) { 1077 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE); 1078 DELAY(PGT_WRITEIO_DELAY); 1079 } 1080 } 1081 1082 void 1083 pgt_sleep_intr(struct pgt_softc *sc) 1084 { 1085 int allowed; 1086 int i; 1087 1088 allowed = 1; 1089 /* Check for any queues not being empty before allowing. */ 1090 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1091 sc->sc_cbdmam->dm_mapsize, 1092 BUS_DMASYNC_POSTREAD); 1093 for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) { 1094 if (pgt_queue_is_tx(i)) 1095 allowed = pgt_queue_frags_pending(sc, i) == 0; 1096 else 1097 allowed = pgt_queue_frags_pending(sc, i) >= 1098 sc->sc_freeq_count[i]; 1099 } 1100 if (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) 1101 allowed = 0; 1102 if (sc->sc_debug & SC_DEBUG_POWER) 1103 DPRINTF(("%s: sleep interrupt (allowed = %d)\n", 1104 sc->sc_dev.dv_xname, allowed)); 1105 if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) { 1106 sc->sc_flags |= SC_POWERSAVE; 1107 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP); 1108 DELAY(PGT_WRITEIO_DELAY); 1109 } 1110 } 1111 1112 void 1113 pgt_empty_traps(struct pgt_softc_kthread *sck) 1114 { 1115 struct pgt_async_trap *pa; 1116 struct mbuf *m; 1117 1118 while (!TAILQ_EMPTY(&sck->sck_traps)) { 1119 pa = TAILQ_FIRST(&sck->sck_traps); 1120 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link); 1121 m = pa->pa_mbuf; 1122 m_freem(m); 1123 } 1124 } 1125 1126 void 1127 pgt_per_device_kthread(void *argp) 1128 { 1129 struct pgt_softc *sc; 1130 struct pgt_softc_kthread *sck; 1131 struct pgt_async_trap *pa; 1132 struct mbuf *m; 1133 int s; 1134 1135 sc = argp; 1136 sck = &sc->sc_kthread; 1137 while (!sck->sck_exit) { 1138 if (!sck->sck_update && !sck->sck_reset && 1139 TAILQ_EMPTY(&sck->sck_traps)) 1140 tsleep(&sc->sc_kthread, 0, "pgtkth", 0); 1141 if (sck->sck_reset) { 1142 DPRINTF(("%s: [thread] async reset\n", 1143 sc->sc_dev.dv_xname)); 1144 sck->sck_reset = 0; 1145 sck->sck_update = 0; 1146 pgt_empty_traps(sck); 1147 s = splnet(); 1148 pgt_stop(sc, SC_NEEDS_RESET); 1149 splx(s); 1150 } else if (!TAILQ_EMPTY(&sck->sck_traps)) { 1151 DPRINTF(("%s: [thread] got a trap\n", 1152 sc->sc_dev.dv_xname)); 1153 pa = TAILQ_FIRST(&sck->sck_traps); 1154 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link); 1155 m = pa->pa_mbuf; 1156 m_adj(m, sizeof(*pa)); 1157 pgt_update_sw_from_hw(sc, pa, m); 1158 m_freem(m); 1159 } else if (sck->sck_update) { 1160 sck->sck_update = 0; 1161 pgt_update_sw_from_hw(sc, NULL, NULL); 1162 } 1163 } 1164 pgt_empty_traps(sck); 1165 kthread_exit(0); 1166 } 1167 1168 void 1169 pgt_async_reset(struct pgt_softc *sc) 1170 { 1171 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) 1172 return; 1173 sc->sc_kthread.sck_reset = 1; 1174 wakeup(&sc->sc_kthread); 1175 } 1176 1177 void 1178 pgt_async_update(struct pgt_softc *sc) 1179 { 1180 if (sc->sc_flags & SC_DYING) 1181 return; 1182 sc->sc_kthread.sck_update = 1; 1183 wakeup(&sc->sc_kthread); 1184 } 1185 1186 int 1187 pgt_intr(void *arg) 1188 { 1189 struct pgt_softc *sc; 1190 struct ifnet *ifp; 1191 u_int32_t reg; 1192 1193 sc = arg; 1194 ifp = &sc->sc_ic.ic_if; 1195 1196 /* 1197 * Here the Linux driver ands in the value of the INT_EN register, 1198 * and masks off everything but the documented interrupt bits. Why? 1199 * 1200 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some 1201 * other times. 1202 */ 1203 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON && 1204 sc->sc_flags & SC_POWERSAVE) { 1205 /* 1206 * Don't try handling the interrupt in sleep mode. 1207 */ 1208 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 1209 if (reg & PGT_CTRL_STAT_SLEEPMODE) 1210 return (0); 1211 } 1212 reg = pgt_read_4(sc, PGT_REG_INT_STAT); 1213 if (reg == 0) 1214 return (0); /* This interrupt is not from us */ 1215 1216 pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg); 1217 if (reg & PGT_INT_STAT_INIT) 1218 pgt_init_intr(sc); 1219 if (reg & PGT_INT_STAT_UPDATE) { 1220 pgt_update_intr(sc, 0); 1221 /* 1222 * If we got an update, it's not really asleep. 1223 */ 1224 sc->sc_flags &= ~SC_POWERSAVE; 1225 /* 1226 * Pretend I have any idea what the documentation 1227 * would say, and just give it a shot sending an 1228 * "update" after acknowledging the interrupt 1229 * bits and writing out the new control block. 1230 */ 1231 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE); 1232 DELAY(PGT_WRITEIO_DELAY); 1233 } 1234 if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP)) 1235 pgt_sleep_intr(sc); 1236 if (reg & PGT_INT_STAT_WAKEUP) 1237 pgt_wakeup_intr(sc); 1238 1239 if (sc->sc_flags & SC_INTR_RESET) { 1240 sc->sc_flags &= ~SC_INTR_RESET; 1241 pgt_async_reset(sc); 1242 } 1243 1244 if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) { 1245 DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n", 1246 sc->sc_dev.dv_xname, 1247 reg & ~PGT_INT_STAT_SOURCES, 1248 pgt_read_4(sc, PGT_REG_CTRL_STAT))); 1249 } 1250 1251 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1252 pgt_start(ifp); 1253 1254 return (1); 1255 } 1256 1257 void 1258 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq) 1259 { 1260 struct pgt_desc *pd; 1261 1262 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]); 1263 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link); 1264 sc->sc_dirtyq_count[pq]--; 1265 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 1266 sc->sc_freeq_count[pq]++; 1267 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, 1268 pd->pd_dmam->dm_mapsize, 1269 BUS_DMASYNC_POSTREAD); 1270 /* Management frames want completion information. */ 1271 if (sc->sc_debug & SC_DEBUG_QUEUES) { 1272 DPRINTF(("%s: queue: tx %u <- [%u]\n", 1273 sc->sc_dev.dv_xname, pd->pd_fragnum, pq)); 1274 if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) { 1275 struct pgt_mgmt_frame *pmf; 1276 1277 pmf = (struct pgt_mgmt_frame *)pd->pd_mem; 1278 DPRINTF(("%s: queue: txmgmt %p <- " 1279 "(ver %u, op %u, flags %#x)\n", 1280 sc->sc_dev.dv_xname, 1281 pd, pmf->pmf_version, pmf->pmf_operation, 1282 pmf->pmf_flags)); 1283 } 1284 } 1285 pgt_unload_tx_desc_frag(sc, pd); 1286 } 1287 1288 void 1289 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq) 1290 { 1291 struct pgt_desc *pd; 1292 1293 pd = TAILQ_FIRST(&sc->sc_freeq[pq]); 1294 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link); 1295 sc->sc_freeq_count[pq]--; 1296 TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link); 1297 sc->sc_dirtyq_count[pq]++; 1298 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, 1299 pd->pd_dmam->dm_mapsize, 1300 BUS_DMASYNC_POSTREAD); 1301 if (sc->sc_debug & SC_DEBUG_QUEUES) 1302 DPRINTF(("%s: queue: rx %u <- [%u]\n", 1303 sc->sc_dev.dv_xname, pd->pd_fragnum, pq)); 1304 if (sc->sc_debug & SC_DEBUG_UNEXPECTED && 1305 pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF)) 1306 DPRINTF(("%s: unknown flags on rx [%u]: %#x\n", 1307 sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags))); 1308 } 1309 1310 /* 1311 * Traps are generally used for the firmware to report changes in state 1312 * back to the host. Mostly this processes changes in link state, but 1313 * it needs to also be used to initiate WPA and other authentication 1314 * schemes in terms of client (station) or server (access point). 1315 */ 1316 void 1317 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata, 1318 size_t size) 1319 { 1320 struct pgt_async_trap *pa; 1321 struct mbuf *m; 1322 char *p; 1323 size_t total; 1324 1325 if (sc->sc_flags & SC_DYING) 1326 return; 1327 1328 total = sizeof(oid) + size + sizeof(struct pgt_async_trap); 1329 if (total > MLEN) { 1330 MGETHDR(m, M_DONTWAIT, MT_DATA); 1331 if (m == NULL) 1332 return; 1333 MCLGET(m, M_DONTWAIT); 1334 if (!(m->m_flags & M_EXT)) { 1335 m_freem(m); 1336 m = NULL; 1337 } 1338 } else 1339 m = m_get(M_DONTWAIT, MT_DATA); 1340 1341 if (m == NULL) 1342 return; 1343 else 1344 m->m_len = total; 1345 1346 pa = mtod(m, struct pgt_async_trap *); 1347 p = mtod(m, char *) + sizeof(*pa); 1348 *(uint32_t *)p = oid; 1349 p += sizeof(uint32_t); 1350 memcpy(p, trapdata, size); 1351 pa->pa_mbuf = m; 1352 1353 TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link); 1354 wakeup(&sc->sc_kthread); 1355 } 1356 1357 /* 1358 * Process a completed management response (all requests should be 1359 * responded to, quickly) or an event (trap). 1360 */ 1361 void 1362 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd) 1363 { 1364 struct pgt_desc *pd; 1365 struct pgt_mgmt_frame *pmf; 1366 uint32_t oid, size; 1367 1368 pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]); 1369 TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link); 1370 sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--; 1371 TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX], 1372 pd, pd_link); 1373 sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++; 1374 if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) { 1375 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1376 DPRINTF(("%s: mgmt desc too small: %u\n", 1377 sc->sc_dev.dv_xname, 1378 letoh16(pd->pd_fragp->pf_size))); 1379 goto out_nopmd; 1380 } 1381 pmf = (struct pgt_mgmt_frame *)pd->pd_mem; 1382 if (pmf->pmf_version != PMF_VER) { 1383 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1384 DPRINTF(("%s: unknown mgmt version %u\n", 1385 sc->sc_dev.dv_xname, pmf->pmf_version)); 1386 goto out_nopmd; 1387 } 1388 if (pmf->pmf_device != PMF_DEV) { 1389 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1390 DPRINTF(("%s: unknown mgmt dev %u\n", 1391 sc->sc_dev.dv_xname, pmf->pmf_device)); 1392 goto out; 1393 } 1394 if (pmf->pmf_flags & ~PMF_FLAG_VALID) { 1395 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1396 DPRINTF(("%s: unknown mgmt flags %x\n", 1397 sc->sc_dev.dv_xname, 1398 pmf->pmf_flags & ~PMF_FLAG_VALID)); 1399 goto out; 1400 } 1401 if (pmf->pmf_flags & PMF_FLAG_LE) { 1402 oid = letoh32(pmf->pmf_oid); 1403 size = letoh32(pmf->pmf_size); 1404 } else { 1405 oid = betoh32(pmf->pmf_oid); 1406 size = betoh32(pmf->pmf_size); 1407 } 1408 if (pmf->pmf_operation == PMF_OP_TRAP) { 1409 pmd = NULL; /* ignored */ 1410 DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n", 1411 sc->sc_dev.dv_xname, 1412 pmf->pmf_operation, oid, size)); 1413 pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf), 1414 min(size, PGT_FRAG_SIZE - sizeof(*pmf))); 1415 goto out_nopmd; 1416 } 1417 if (pmd == NULL) { 1418 if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT)) 1419 DPRINTF(("%s: spurious mgmt received " 1420 "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname, 1421 pmf->pmf_operation, oid, size)); 1422 goto out_nopmd; 1423 } 1424 switch (pmf->pmf_operation) { 1425 case PMF_OP_RESPONSE: 1426 pmd->pmd_error = 0; 1427 break; 1428 case PMF_OP_ERROR: 1429 pmd->pmd_error = EPERM; 1430 goto out; 1431 default: 1432 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1433 DPRINTF(("%s: unknown mgmt op %u\n", 1434 sc->sc_dev.dv_xname, pmf->pmf_operation)); 1435 pmd->pmd_error = EIO; 1436 goto out; 1437 } 1438 if (oid != pmd->pmd_oid) { 1439 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1440 DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n", 1441 sc->sc_dev.dv_xname, pmd->pmd_oid, oid)); 1442 pmd->pmd_oid = oid; 1443 } 1444 if (pmd->pmd_recvbuf != NULL) { 1445 if (size > PGT_FRAG_SIZE) { 1446 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1447 DPRINTF(("%s: mgmt oid %#x has bad size %u\n", 1448 sc->sc_dev.dv_xname, oid, size)); 1449 pmd->pmd_error = EIO; 1450 goto out; 1451 } 1452 if (size > pmd->pmd_len) 1453 pmd->pmd_error = ENOMEM; 1454 else 1455 memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf), 1456 size); 1457 pmd->pmd_len = size; 1458 } 1459 1460 out: 1461 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 1462 wakeup_one(pmd); 1463 if (sc->sc_debug & SC_DEBUG_MGMT) 1464 DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n", 1465 sc->sc_dev.dv_xname, pmd, pmf->pmf_operation, 1466 pmd->pmd_oid, pmd->pmd_len)); 1467 out_nopmd: 1468 pgt_reinit_rx_desc_frag(sc, pd); 1469 } 1470 1471 /* 1472 * Queue packets for reception and defragmentation. I don't know now 1473 * whether the rx queue being full enough to start, but not finish, 1474 * queueing a fragmented packet, can happen. 1475 */ 1476 struct mbuf * 1477 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq) 1478 { 1479 struct ifnet *ifp; 1480 struct pgt_desc *pd; 1481 struct mbuf *top, **mp, *m; 1482 size_t datalen; 1483 uint16_t morefrags, dataoff; 1484 int tlen = 0; 1485 1486 ifp = &sc->sc_ic.ic_if; 1487 m = NULL; 1488 top = NULL; 1489 mp = ⊤ 1490 1491 while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) { 1492 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link); 1493 sc->sc_dirtyq_count[pq]--; 1494 datalen = letoh16(pd->pd_fragp->pf_size); 1495 dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr; 1496 morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF); 1497 1498 if (sc->sc_debug & SC_DEBUG_RXFRAG) 1499 DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n", 1500 sc->sc_dev.dv_xname, datalen, dataoff, 1501 pd->pd_fragp->pf_flags)); 1502 1503 /* Add the (two+?) bytes for the header. */ 1504 if (datalen + dataoff > PGT_FRAG_SIZE) { 1505 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1506 DPRINTF(("%s data rx too big: %u\n", 1507 sc->sc_dev.dv_xname, datalen)); 1508 goto fail; 1509 } 1510 1511 if (m == NULL) 1512 MGETHDR(m, M_DONTWAIT, MT_DATA); 1513 else 1514 m = m_get(M_DONTWAIT, MT_DATA); 1515 1516 if (m == NULL) 1517 goto fail; 1518 if (datalen > MHLEN) { 1519 MCLGET(m, M_DONTWAIT); 1520 if (!(m->m_flags & M_EXT)) { 1521 m_free(m); 1522 goto fail; 1523 } 1524 } 1525 bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen); 1526 m->m_len = datalen; 1527 tlen += datalen; 1528 1529 *mp = m; 1530 mp = &m->m_next; 1531 1532 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 1533 sc->sc_freeq_count[pq]++; 1534 pgt_reinit_rx_desc_frag(sc, pd); 1535 1536 if (!morefrags) 1537 break; 1538 } 1539 1540 if (top) { 1541 top->m_pkthdr.len = tlen; 1542 top->m_pkthdr.rcvif = ifp; 1543 } 1544 return (top); 1545 1546 fail: 1547 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 1548 sc->sc_freeq_count[pq]++; 1549 pgt_reinit_rx_desc_frag(sc, pd); 1550 1551 ifp->if_ierrors++; 1552 if (top) 1553 m_freem(top); 1554 return (NULL); 1555 } 1556 1557 int 1558 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid, 1559 void *arg, size_t arglen) 1560 { 1561 struct pgt_mgmt_desc pmd; 1562 int error; 1563 1564 bzero(&pmd, sizeof(pmd)); 1565 pmd.pmd_recvbuf = arg; 1566 pmd.pmd_len = arglen; 1567 pmd.pmd_oid = oid; 1568 1569 error = pgt_mgmt_request(sc, &pmd); 1570 if (error == 0) 1571 error = pmd.pmd_error; 1572 if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED) 1573 DPRINTF(("%s: failure getting oid %#x: %d\n", 1574 sc->sc_dev.dv_xname, oid, error)); 1575 1576 return (error); 1577 } 1578 1579 int 1580 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid, 1581 void *arg, size_t arglen) 1582 { 1583 struct pgt_mgmt_desc pmd; 1584 int error; 1585 1586 bzero(&pmd, sizeof(pmd)); 1587 pmd.pmd_sendbuf = arg; 1588 pmd.pmd_recvbuf = arg; 1589 pmd.pmd_len = arglen; 1590 pmd.pmd_oid = oid; 1591 1592 error = pgt_mgmt_request(sc, &pmd); 1593 if (error == 0) 1594 error = pmd.pmd_error; 1595 if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED) 1596 DPRINTF(("%s: failure retrieving oid %#x: %d\n", 1597 sc->sc_dev.dv_xname, oid, error)); 1598 1599 return (error); 1600 } 1601 1602 int 1603 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid, 1604 const void *arg, size_t arglen) 1605 { 1606 struct pgt_mgmt_desc pmd; 1607 int error; 1608 1609 bzero(&pmd, sizeof(pmd)); 1610 pmd.pmd_sendbuf = arg; 1611 pmd.pmd_len = arglen; 1612 pmd.pmd_oid = oid; 1613 1614 error = pgt_mgmt_request(sc, &pmd); 1615 if (error == 0) 1616 error = pmd.pmd_error; 1617 if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED) 1618 DPRINTF(("%s: failure setting oid %#x: %d\n", 1619 sc->sc_dev.dv_xname, oid, error)); 1620 1621 return (error); 1622 } 1623 1624 void 1625 pgt_state_dump(struct pgt_softc *sc) 1626 { 1627 printf("%s: state dump: control 0x%08x interrupt 0x%08x\n", 1628 sc->sc_dev.dv_xname, 1629 pgt_read_4(sc, PGT_REG_CTRL_STAT), 1630 pgt_read_4(sc, PGT_REG_INT_STAT)); 1631 1632 printf("%s: state dump: driver curfrag[]\n", 1633 sc->sc_dev.dv_xname); 1634 1635 printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1636 sc->sc_dev.dv_xname, 1637 letoh32(sc->sc_cb->pcb_driver_curfrag[0]), 1638 letoh32(sc->sc_cb->pcb_driver_curfrag[1]), 1639 letoh32(sc->sc_cb->pcb_driver_curfrag[2]), 1640 letoh32(sc->sc_cb->pcb_driver_curfrag[3]), 1641 letoh32(sc->sc_cb->pcb_driver_curfrag[4]), 1642 letoh32(sc->sc_cb->pcb_driver_curfrag[5])); 1643 1644 printf("%s: state dump: device curfrag[]\n", 1645 sc->sc_dev.dv_xname); 1646 1647 printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1648 sc->sc_dev.dv_xname, 1649 letoh32(sc->sc_cb->pcb_device_curfrag[0]), 1650 letoh32(sc->sc_cb->pcb_device_curfrag[1]), 1651 letoh32(sc->sc_cb->pcb_device_curfrag[2]), 1652 letoh32(sc->sc_cb->pcb_device_curfrag[3]), 1653 letoh32(sc->sc_cb->pcb_device_curfrag[4]), 1654 letoh32(sc->sc_cb->pcb_device_curfrag[5])); 1655 } 1656 1657 int 1658 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd) 1659 { 1660 struct pgt_desc *pd; 1661 struct pgt_mgmt_frame *pmf; 1662 int error, i; 1663 1664 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) 1665 return (EIO); 1666 if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf)) 1667 return (ENOMEM); 1668 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]); 1669 if (pd == NULL) 1670 return (ENOMEM); 1671 error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd); 1672 if (error) 1673 return (error); 1674 pmf = (struct pgt_mgmt_frame *)pd->pd_mem; 1675 pmf->pmf_version = PMF_VER; 1676 /* "get" and "retrieve" operations look the same */ 1677 if (pmd->pmd_recvbuf != NULL) 1678 pmf->pmf_operation = PMF_OP_GET; 1679 else 1680 pmf->pmf_operation = PMF_OP_SET; 1681 pmf->pmf_oid = htobe32(pmd->pmd_oid); 1682 pmf->pmf_device = PMF_DEV; 1683 pmf->pmf_flags = 0; 1684 pmf->pmf_size = htobe32(pmd->pmd_len); 1685 /* "set" and "retrieve" operations both send data */ 1686 if (pmd->pmd_sendbuf != NULL) 1687 memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len); 1688 else 1689 bzero(pmf + 1, pmd->pmd_len); 1690 pmd->pmd_error = EINPROGRESS; 1691 TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link); 1692 if (sc->sc_debug & SC_DEBUG_MGMT) 1693 DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n", 1694 sc->sc_dev.dv_xname, 1695 pmd, pmf->pmf_operation, 1696 pmd->pmd_oid, pmd->pmd_len)); 1697 pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd, 1698 sizeof(*pmf) + pmd->pmd_len, 0); 1699 /* 1700 * Try for one second, triggering 10 times. 1701 * 1702 * Do our best to work around seemingly buggy CardBus controllers 1703 * on Soekris 4521 that fail to get interrupts with alarming 1704 * regularity: run as if an interrupt occurred and service every 1705 * queue except for mbuf reception. 1706 */ 1707 i = 0; 1708 do { 1709 if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK) 1710 break; 1711 if (pmd->pmd_error != EINPROGRESS) 1712 break; 1713 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) { 1714 pmd->pmd_error = EIO; 1715 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 1716 break; 1717 } 1718 if (i != 9) 1719 pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX); 1720 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY 1721 pgt_update_intr(sc, 0); 1722 #endif 1723 } while (i++ < 10); 1724 1725 if (pmd->pmd_error == EINPROGRESS) { 1726 printf("%s: timeout waiting for management " 1727 "packet response to %#x\n", 1728 sc->sc_dev.dv_xname, pmd->pmd_oid); 1729 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 1730 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1731 pgt_state_dump(sc); 1732 pgt_async_reset(sc); 1733 error = ETIMEDOUT; 1734 } else 1735 error = 0; 1736 1737 return (error); 1738 } 1739 1740 void 1741 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd, 1742 uint16_t len, int morecoming) 1743 { 1744 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link); 1745 sc->sc_freeq_count[pq]--; 1746 TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link); 1747 sc->sc_dirtyq_count[pq]++; 1748 if (sc->sc_debug & SC_DEBUG_QUEUES) 1749 DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname, 1750 pd->pd_fragnum, pq)); 1751 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1752 sc->sc_cbdmam->dm_mapsize, 1753 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 1754 if (morecoming) 1755 pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF); 1756 pd->pd_fragp->pf_size = htole16(len); 1757 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, 1758 pd->pd_dmam->dm_mapsize, 1759 BUS_DMASYNC_POSTWRITE); 1760 sc->sc_cb->pcb_driver_curfrag[pq] = 1761 htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1); 1762 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1763 sc->sc_cbdmam->dm_mapsize, 1764 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 1765 if (!morecoming) 1766 pgt_maybe_trigger(sc, pq); 1767 } 1768 1769 void 1770 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq) 1771 { 1772 unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */ 1773 uint32_t reg; 1774 1775 if (sc->sc_debug & SC_DEBUG_TRIGGER) 1776 DPRINTF(("%s: triggered by queue [%u]\n", 1777 sc->sc_dev.dv_xname, pq)); 1778 pgt_debug_events(sc, "trig"); 1779 if (sc->sc_flags & SC_POWERSAVE) { 1780 /* Magic values ahoy? */ 1781 if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) { 1782 do { 1783 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 1784 if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) 1785 DELAY(PGT_WRITEIO_DELAY); 1786 } while (tries-- != 0); 1787 if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) { 1788 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1789 DPRINTF(("%s: timeout triggering from " 1790 "sleep mode\n", 1791 sc->sc_dev.dv_xname)); 1792 pgt_async_reset(sc); 1793 return; 1794 } 1795 } 1796 pgt_write_4_flush(sc, PGT_REG_DEV_INT, 1797 PGT_DEV_INT_WAKEUP); 1798 DELAY(PGT_WRITEIO_DELAY); 1799 /* read the status back in */ 1800 (void)pgt_read_4(sc, PGT_REG_CTRL_STAT); 1801 DELAY(PGT_WRITEIO_DELAY); 1802 } else { 1803 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE); 1804 DELAY(PGT_WRITEIO_DELAY); 1805 } 1806 } 1807 1808 struct ieee80211_node * 1809 pgt_ieee80211_node_alloc(struct ieee80211com *ic) 1810 { 1811 struct pgt_ieee80211_node *pin; 1812 1813 pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO); 1814 if (pin != NULL) { 1815 pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED; 1816 } 1817 return (struct ieee80211_node *)pin; 1818 } 1819 1820 void 1821 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, 1822 int reallynew) 1823 { 1824 ieee80211_ref_node(ni); 1825 } 1826 1827 void 1828 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1829 { 1830 struct pgt_ieee80211_node *pin; 1831 1832 pin = (struct pgt_ieee80211_node *)ni; 1833 free(pin, M_DEVBUF); 1834 } 1835 1836 void 1837 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst, 1838 const struct ieee80211_node *src) 1839 { 1840 const struct pgt_ieee80211_node *psrc; 1841 struct pgt_ieee80211_node *pdst; 1842 1843 psrc = (const struct pgt_ieee80211_node *)src; 1844 pdst = (struct pgt_ieee80211_node *)dst; 1845 bcopy(psrc, pdst, sizeof(*psrc)); 1846 } 1847 1848 int 1849 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni, 1850 int type, int arg1, int arg2) 1851 { 1852 return (EOPNOTSUPP); 1853 } 1854 1855 int 1856 pgt_net_attach(struct pgt_softc *sc) 1857 { 1858 struct ieee80211com *ic = &sc->sc_ic; 1859 struct ifnet *ifp = &ic->ic_if; 1860 struct ieee80211_rateset *rs; 1861 uint8_t rates[IEEE80211_RATE_MAXSIZE]; 1862 struct pgt_obj_buffer psbuffer; 1863 struct pgt_obj_frequencies *freqs; 1864 uint32_t phymode, country; 1865 unsigned int chan, i, j, firstchan = -1; 1866 int error; 1867 1868 psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT); 1869 psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr); 1870 error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country)); 1871 if (error) 1872 return (error); 1873 error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode)); 1874 if (error) 1875 return (error); 1876 error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr, 1877 sizeof(ic->ic_myaddr)); 1878 if (error) 1879 return (error); 1880 error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country)); 1881 if (error) 1882 return (error); 1883 1884 ifp->if_softc = sc; 1885 ifp->if_ioctl = pgt_ioctl; 1886 ifp->if_start = pgt_start; 1887 ifp->if_watchdog = pgt_watchdog; 1888 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 1889 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1890 1891 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1892 IFQ_SET_READY(&ifp->if_snd); 1893 1894 /* 1895 * Set channels 1896 * 1897 * Prism hardware likes to report supported frequencies that are 1898 * not actually available for the country of origin. 1899 */ 1900 j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t); 1901 freqs = malloc(j, M_DEVBUF, M_WAITOK); 1902 error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j); 1903 if (error) { 1904 free(freqs, M_DEVBUF); 1905 return (error); 1906 } 1907 1908 for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) { 1909 chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]), 1910 0); 1911 1912 if (chan > IEEE80211_CHAN_MAX) { 1913 printf("%s: reported bogus channel (%uMHz)\n", 1914 sc->sc_dev.dv_xname, chan); 1915 free(freqs, M_DEVBUF); 1916 return (EIO); 1917 } 1918 1919 if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) { 1920 if (!(phymode & htole32(PGT_OID_PHY_2400MHZ))) 1921 continue; 1922 if (country == letoh32(PGT_COUNTRY_USA)) { 1923 if (chan >= 12 && chan <= 14) 1924 continue; 1925 } 1926 if (chan <= 14) 1927 ic->ic_channels[chan].ic_flags |= 1928 IEEE80211_CHAN_B; 1929 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG; 1930 } else { 1931 if (!(phymode & htole32(PGT_OID_PHY_5000MHZ))) 1932 continue; 1933 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A; 1934 } 1935 1936 ic->ic_channels[chan].ic_freq = 1937 letoh16(freqs->pof_freqlist_mhz[i]); 1938 1939 if (firstchan == -1) 1940 firstchan = chan; 1941 1942 DPRINTF(("%s: set channel %d to freq %uMHz\n", 1943 sc->sc_dev.dv_xname, chan, 1944 letoh16(freqs->pof_freqlist_mhz[i]))); 1945 } 1946 free(freqs, M_DEVBUF); 1947 if (firstchan == -1) { 1948 printf("%s: no channels found\n", sc->sc_dev.dv_xname); 1949 return (EIO); 1950 } 1951 1952 /* 1953 * Set rates 1954 */ 1955 bzero(rates, sizeof(rates)); 1956 error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates)); 1957 if (error) 1958 return (error); 1959 for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) { 1960 switch (rates[i]) { 1961 case 2: 1962 case 4: 1963 case 11: 1964 case 22: 1965 case 44: /* maybe */ 1966 if (phymode & htole32(PGT_OID_PHY_2400MHZ)) { 1967 rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; 1968 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1969 } 1970 default: 1971 if (phymode & htole32(PGT_OID_PHY_2400MHZ)) { 1972 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 1973 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1974 } 1975 if (phymode & htole32(PGT_OID_PHY_5000MHZ)) { 1976 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 1977 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1978 } 1979 rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO]; 1980 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1981 } 1982 } 1983 1984 ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT | 1985 IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR; 1986 #ifndef IEEE80211_STA_ONLY 1987 ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP; 1988 #endif 1989 ic->ic_opmode = IEEE80211_M_STA; 1990 ic->ic_state = IEEE80211_S_INIT; 1991 1992 if_attach(ifp); 1993 ieee80211_ifattach(ifp); 1994 1995 /* setup post-attach/pre-lateattach vector functions */ 1996 sc->sc_newstate = ic->ic_newstate; 1997 ic->ic_newstate = pgt_newstate; 1998 ic->ic_node_alloc = pgt_ieee80211_node_alloc; 1999 ic->ic_newassoc = pgt_ieee80211_newassoc; 2000 ic->ic_node_free = pgt_ieee80211_node_free; 2001 ic->ic_node_copy = pgt_ieee80211_node_copy; 2002 ic->ic_send_mgmt = pgt_ieee80211_send_mgmt; 2003 ic->ic_max_rssi = 255; /* rssi is a u_int8_t */ 2004 2005 /* let net80211 handle switching around the media + resetting */ 2006 ieee80211_media_init(ifp, pgt_media_change, pgt_media_status); 2007 2008 #if NBPFILTER > 0 2009 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 2010 sizeof(struct ieee80211_frame) + 64); 2011 2012 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 2013 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 2014 sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT); 2015 2016 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 2017 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 2018 sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT); 2019 #endif 2020 return (0); 2021 } 2022 2023 int 2024 pgt_media_change(struct ifnet *ifp) 2025 { 2026 struct pgt_softc *sc = ifp->if_softc; 2027 int error; 2028 2029 error = ieee80211_media_change(ifp); 2030 if (error == ENETRESET) { 2031 pgt_update_hw_from_sw(sc, 0, 0); 2032 error = 0; 2033 } 2034 2035 return (error); 2036 } 2037 2038 void 2039 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr) 2040 { 2041 struct pgt_softc *sc = ifp->if_softc; 2042 struct ieee80211com *ic = &sc->sc_ic; 2043 uint32_t rate; 2044 int s; 2045 2046 imr->ifm_status = 0; 2047 imr->ifm_active = IFM_IEEE80211 | IFM_NONE; 2048 2049 if (!(ifp->if_flags & IFF_UP)) 2050 return; 2051 2052 s = splnet(); 2053 2054 if (ic->ic_fixed_rate != -1) { 2055 rate = ic->ic_sup_rates[ic->ic_curmode]. 2056 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 2057 } else { 2058 if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate))) 2059 return; 2060 rate = letoh32(rate); 2061 if (sc->sc_debug & SC_DEBUG_LINK) { 2062 DPRINTF(("%s: %s: link rate %u\n", 2063 sc->sc_dev.dv_xname, __func__, rate)); 2064 } 2065 if (rate == 0) 2066 return; 2067 } 2068 2069 imr->ifm_status = IFM_AVALID; 2070 imr->ifm_active = IFM_IEEE80211; 2071 if (ic->ic_state == IEEE80211_S_RUN) 2072 imr->ifm_status |= IFM_ACTIVE; 2073 2074 imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode); 2075 2076 switch (ic->ic_opmode) { 2077 case IEEE80211_M_STA: 2078 break; 2079 #ifndef IEEE80211_STA_ONLY 2080 case IEEE80211_M_IBSS: 2081 imr->ifm_active |= IFM_IEEE80211_ADHOC; 2082 break; 2083 case IEEE80211_M_AHDEMO: 2084 imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0; 2085 break; 2086 case IEEE80211_M_HOSTAP: 2087 imr->ifm_active |= IFM_IEEE80211_HOSTAP; 2088 break; 2089 #endif 2090 case IEEE80211_M_MONITOR: 2091 imr->ifm_active |= IFM_IEEE80211_MONITOR; 2092 break; 2093 default: 2094 break; 2095 } 2096 2097 splx(s); 2098 } 2099 2100 /* 2101 * Start data frames. Critical sections surround the boundary of 2102 * management frame transmission / transmission acknowledgement / response 2103 * and data frame transmission / transmission acknowledgement. 2104 */ 2105 void 2106 pgt_start(struct ifnet *ifp) 2107 { 2108 struct pgt_softc *sc; 2109 struct ieee80211com *ic; 2110 struct pgt_desc *pd; 2111 struct mbuf *m; 2112 int error; 2113 2114 sc = ifp->if_softc; 2115 ic = &sc->sc_ic; 2116 2117 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) || 2118 !(ifp->if_flags & IFF_RUNNING) || 2119 ic->ic_state != IEEE80211_S_RUN) { 2120 return; 2121 } 2122 2123 /* 2124 * Management packets should probably be MLME frames 2125 * (i.e. hostap "managed" mode); we don't touch the 2126 * net80211 management queue. 2127 */ 2128 for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] < 2129 PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) { 2130 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]); 2131 IFQ_POLL(&ifp->if_snd, m); 2132 if (m == NULL) 2133 break; 2134 if (m->m_pkthdr.len <= PGT_FRAG_SIZE) { 2135 error = pgt_load_tx_desc_frag(sc, 2136 PGT_QUEUE_DATA_LOW_TX, pd); 2137 if (error) 2138 break; 2139 IFQ_DEQUEUE(&ifp->if_snd, m); 2140 m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem); 2141 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX, 2142 pd, m->m_pkthdr.len, 0); 2143 } else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) { 2144 struct pgt_desc *pd2; 2145 2146 /* 2147 * Transmit a fragmented frame if there is 2148 * not enough room in one fragment; limit 2149 * to two fragments (802.11 itself couldn't 2150 * even support a full two.) 2151 */ 2152 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 > 2153 PGT_QUEUE_FULL_THRESHOLD) 2154 break; 2155 pd2 = TAILQ_NEXT(pd, pd_link); 2156 error = pgt_load_tx_desc_frag(sc, 2157 PGT_QUEUE_DATA_LOW_TX, pd); 2158 if (error == 0) { 2159 error = pgt_load_tx_desc_frag(sc, 2160 PGT_QUEUE_DATA_LOW_TX, pd2); 2161 if (error) { 2162 pgt_unload_tx_desc_frag(sc, pd); 2163 TAILQ_INSERT_HEAD(&sc->sc_freeq[ 2164 PGT_QUEUE_DATA_LOW_TX], pd, 2165 pd_link); 2166 } 2167 } 2168 if (error) 2169 break; 2170 IFQ_DEQUEUE(&ifp->if_snd, m); 2171 m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem); 2172 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX, 2173 pd, PGT_FRAG_SIZE, 1); 2174 m_copydata(m, PGT_FRAG_SIZE, 2175 m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem); 2176 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX, 2177 pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0); 2178 } else { 2179 IFQ_DEQUEUE(&ifp->if_snd, m); 2180 ifp->if_oerrors++; 2181 m_freem(m); 2182 m = NULL; 2183 } 2184 if (m != NULL) { 2185 struct ieee80211_node *ni; 2186 #if NBPFILTER > 0 2187 if (ifp->if_bpf != NULL) 2188 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 2189 #endif 2190 ifp->if_opackets++; 2191 ifp->if_timer = 1; 2192 sc->sc_txtimer = 5; 2193 ni = ieee80211_find_txnode(&sc->sc_ic, 2194 mtod(m, struct ether_header *)->ether_dhost); 2195 if (ni != NULL) { 2196 ni->ni_inact = 0; 2197 if (ni != ic->ic_bss) 2198 ieee80211_release_node(&sc->sc_ic, ni); 2199 } 2200 #if NBPFILTER > 0 2201 if (sc->sc_drvbpf != NULL) { 2202 struct mbuf mb; 2203 struct ether_header eh; 2204 struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap; 2205 2206 bcopy(mtod(m, struct ether_header *), &eh, 2207 sizeof(eh)); 2208 m_adj(m, sizeof(eh)); 2209 m = pgt_ieee80211_encap(sc, &eh, m, NULL); 2210 2211 tap->wt_flags = 0; 2212 //tap->wt_rate = rate; 2213 tap->wt_rate = 0; 2214 tap->wt_chan_freq = 2215 htole16(ic->ic_bss->ni_chan->ic_freq); 2216 tap->wt_chan_flags = 2217 htole16(ic->ic_bss->ni_chan->ic_flags); 2218 2219 if (m != NULL) { 2220 mb.m_data = (caddr_t)tap; 2221 mb.m_len = sc->sc_txtap_len; 2222 mb.m_next = m; 2223 mb.m_nextpkt = NULL; 2224 mb.m_type = 0; 2225 mb.m_flags = 0; 2226 2227 bpf_mtap(sc->sc_drvbpf, &mb, 2228 BPF_DIRECTION_OUT); 2229 } 2230 } 2231 #endif 2232 if (m != NULL) 2233 m_freem(m); 2234 } 2235 } 2236 } 2237 2238 int 2239 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req) 2240 { 2241 struct pgt_softc *sc = ifp->if_softc; 2242 struct ifaddr *ifa; 2243 struct ifreq *ifr; 2244 struct wi_req *wreq; 2245 struct ieee80211_nodereq_all *na; 2246 struct ieee80211com *ic; 2247 struct pgt_obj_bsslist *pob; 2248 struct wi_scan_p2_hdr *p2hdr; 2249 struct wi_scan_res *res; 2250 uint32_t noise; 2251 int maxscan, i, j, s, error = 0; 2252 2253 ic = &sc->sc_ic; 2254 ifr = (struct ifreq *)req; 2255 2256 s = splnet(); 2257 switch (cmd) { 2258 case SIOCS80211SCAN: 2259 /* 2260 * This chip scans always as soon as it gets initialized. 2261 */ 2262 2263 /* 2264 * Give us a bit time to scan in case we were not 2265 * initialized before and let the userland process wait. 2266 */ 2267 tsleep(&sc->sc_flags, 0, "pgtsca", hz * SCAN_TIMEOUT); 2268 2269 break; 2270 case SIOCG80211ALLNODES: { 2271 struct ieee80211_nodereq *nr = NULL; 2272 na = (struct ieee80211_nodereq_all *)req; 2273 wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO); 2274 2275 maxscan = PGT_OBJ_BSSLIST_NBSS; 2276 pob = malloc(sizeof(*pob) + 2277 sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK); 2278 error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, 2279 sizeof(noise)); 2280 2281 if (error == 0) { 2282 noise = letoh32(noise); 2283 error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob, 2284 sizeof(*pob) + 2285 sizeof(struct pgt_obj_bss) * maxscan); 2286 } 2287 2288 if (error == 0) { 2289 maxscan = min(PGT_OBJ_BSSLIST_NBSS, 2290 letoh32(pob->pob_count)); 2291 maxscan = min(maxscan, 2292 (sizeof(wreq->wi_val) - sizeof(*p2hdr)) / 2293 WI_PRISM2_RES_SIZE); 2294 p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val; 2295 p2hdr->wi_rsvd = 0; 2296 p2hdr->wi_reason = 1; 2297 wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 + 2298 sizeof(*p2hdr) / 2; 2299 wreq->wi_type = WI_RID_SCAN_RES; 2300 } 2301 2302 for (na->na_nodes = j = i = 0; i < maxscan && 2303 (na->na_size >= j + sizeof(struct ieee80211_nodereq)); 2304 i++) { 2305 /* allocate node space */ 2306 if (nr == NULL) 2307 nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK); 2308 2309 /* get next BSS scan result */ 2310 res = (struct wi_scan_res *) 2311 ((char *)&wreq->wi_val + sizeof(*p2hdr) + 2312 i * WI_PRISM2_RES_SIZE); 2313 pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i], 2314 res, noise); 2315 2316 /* copy it to node structure for ifconfig to read */ 2317 bzero(nr, sizeof(*nr)); 2318 IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid); 2319 IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid); 2320 nr->nr_channel = letoh16(res->wi_chan); 2321 nr->nr_chan_flags = IEEE80211_CHAN_B; 2322 nr->nr_rssi = letoh16(res->wi_signal); 2323 nr->nr_max_rssi = 0; /* XXX */ 2324 nr->nr_nwid_len = letoh16(res->wi_ssid_len); 2325 bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len); 2326 nr->nr_intval = letoh16(res->wi_interval); 2327 nr->nr_capinfo = letoh16(res->wi_capinfo); 2328 nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 : 2329 (res->wi_rate == WI_WAVELAN_RES_2M ? 4 : 2330 (res->wi_rate == WI_WAVELAN_RES_5M ? 11 : 2331 (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0))); 2332 nr->nr_nrates = 0; 2333 while (res->wi_srates[nr->nr_nrates] != 0) { 2334 nr->nr_rates[nr->nr_nrates] = 2335 res->wi_srates[nr->nr_nrates] & 2336 WI_VAR_SRATES_MASK; 2337 nr->nr_nrates++; 2338 } 2339 nr->nr_flags = 0; 2340 if (bcmp(nr->nr_macaddr, nr->nr_bssid, 2341 IEEE80211_ADDR_LEN) == 0) 2342 nr->nr_flags |= IEEE80211_NODEREQ_AP; 2343 error = copyout(nr, (caddr_t)na->na_node + j, 2344 sizeof(struct ieee80211_nodereq)); 2345 if (error) 2346 break; 2347 2348 /* point to next node entry */ 2349 j += sizeof(struct ieee80211_nodereq); 2350 na->na_nodes++; 2351 } 2352 if (nr) 2353 free(nr, M_DEVBUF); 2354 free(pob, M_DEVBUF); 2355 free(wreq, M_DEVBUF); 2356 break; 2357 } 2358 case SIOCSIFADDR: 2359 ifa = (struct ifaddr *)req; 2360 ifp->if_flags |= IFF_UP; 2361 #ifdef INET 2362 if (ifa->ifa_addr->sa_family == AF_INET) 2363 arp_ifinit(&sc->sc_ic.ic_ac, ifa); 2364 #endif 2365 /* FALLTHROUGH */ 2366 case SIOCSIFFLAGS: 2367 if (ifp->if_flags & IFF_UP) { 2368 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2369 pgt_init(ifp); 2370 error = ENETRESET; 2371 } 2372 } else { 2373 if (ifp->if_flags & IFF_RUNNING) { 2374 pgt_stop(sc, SC_NEEDS_RESET); 2375 error = ENETRESET; 2376 } 2377 } 2378 break; 2379 case SIOCADDMULTI: 2380 case SIOCDELMULTI: 2381 error = (cmd == SIOCADDMULTI) ? 2382 ether_addmulti(ifr, &ic->ic_ac) : 2383 ether_delmulti(ifr, &ic->ic_ac); 2384 2385 if (error == ENETRESET) 2386 error = 0; 2387 break; 2388 case SIOCSIFMTU: 2389 if (ifr->ifr_mtu > PGT_FRAG_SIZE) { 2390 error = EINVAL; 2391 break; 2392 } 2393 /* FALLTHROUGH */ 2394 default: 2395 error = ieee80211_ioctl(ifp, cmd, req); 2396 break; 2397 } 2398 2399 if (error == ENETRESET) { 2400 pgt_update_hw_from_sw(sc, 0, 0); 2401 error = 0; 2402 } 2403 splx(s); 2404 2405 return (error); 2406 } 2407 2408 void 2409 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob, 2410 struct wi_scan_res *scanres, uint32_t noise) 2411 { 2412 struct ieee80211_rateset *rs; 2413 struct wi_scan_res ap; 2414 unsigned int i, n; 2415 2416 rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO]; 2417 bzero(&ap, sizeof(ap)); 2418 ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0); 2419 ap.wi_noise = noise; 2420 ap.wi_signal = letoh16(pob->pob_rssi); 2421 IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address); 2422 ap.wi_interval = letoh16(pob->pob_beacon_period); 2423 ap.wi_capinfo = letoh16(pob->pob_capinfo); 2424 ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length); 2425 memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len); 2426 n = 0; 2427 for (i = 0; i < 16; i++) { 2428 if (letoh16(pob->pob_rates) & (1 << i)) { 2429 if (i > rs->rs_nrates) 2430 break; 2431 ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i]; 2432 if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0])) 2433 break; 2434 } 2435 } 2436 memcpy(scanres, &ap, WI_PRISM2_RES_SIZE); 2437 } 2438 2439 void 2440 node_mark_active_ap(void *arg, struct ieee80211_node *ni) 2441 { 2442 /* 2443 * HostAP mode lets all nodes stick around unless 2444 * the firmware AP kicks them off. 2445 */ 2446 ni->ni_inact = 0; 2447 } 2448 2449 void 2450 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni) 2451 { 2452 struct pgt_ieee80211_node *pin; 2453 2454 /* 2455 * As there is no association in ad-hoc, we let links just 2456 * time out naturally as long they are not holding any private 2457 * configuration, such as 802.1x authorization. 2458 */ 2459 pin = (struct pgt_ieee80211_node *)ni; 2460 if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED) 2461 pin->pin_node.ni_inact = 0; 2462 } 2463 2464 void 2465 pgt_watchdog(struct ifnet *ifp) 2466 { 2467 struct pgt_softc *sc; 2468 2469 sc = ifp->if_softc; 2470 /* 2471 * Check for timed out transmissions (and make sure to set 2472 * this watchdog to fire again if there is still data in the 2473 * output device queue). 2474 */ 2475 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) { 2476 int count; 2477 2478 ifp->if_timer = 1; 2479 if (sc->sc_txtimer && --sc->sc_txtimer == 0) { 2480 count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX); 2481 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 2482 DPRINTF(("%s: timeout %d data transmissions\n", 2483 sc->sc_dev.dv_xname, count)); 2484 } 2485 } 2486 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) 2487 return; 2488 /* 2489 * If we're goign to kick the device out of power-save mode 2490 * just to update the BSSID and such, we should not do it 2491 * very often; need to determine in what way to do that. 2492 */ 2493 if (ifp->if_flags & IFF_RUNNING && 2494 sc->sc_ic.ic_state != IEEE80211_S_INIT && 2495 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2496 pgt_async_update(sc); 2497 2498 #ifndef IEEE80211_STA_ONLY 2499 /* 2500 * As a firmware-based HostAP, we should not time out 2501 * nodes inside the driver additionally to the timeout 2502 * that exists in the firmware. The only things we 2503 * should have to deal with timing out when doing HostAP 2504 * are the privacy-related. 2505 */ 2506 switch (sc->sc_ic.ic_opmode) { 2507 case IEEE80211_M_HOSTAP: 2508 ieee80211_iterate_nodes(&sc->sc_ic, 2509 node_mark_active_ap, NULL); 2510 break; 2511 case IEEE80211_M_IBSS: 2512 ieee80211_iterate_nodes(&sc->sc_ic, 2513 node_mark_active_adhoc, NULL); 2514 break; 2515 default: 2516 break; 2517 } 2518 #endif 2519 ieee80211_watchdog(ifp); 2520 ifp->if_timer = 1; 2521 } 2522 2523 int 2524 pgt_init(struct ifnet *ifp) 2525 { 2526 struct pgt_softc *sc = ifp->if_softc; 2527 struct ieee80211com *ic = &sc->sc_ic; 2528 2529 /* set default channel */ 2530 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 2531 2532 if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED))) 2533 pgt_update_hw_from_sw(sc, 2534 ic->ic_state != IEEE80211_S_INIT, 2535 ic->ic_opmode != IEEE80211_M_MONITOR); 2536 2537 ifp->if_flags |= IFF_RUNNING; 2538 ifp->if_flags &= ~IFF_OACTIVE; 2539 2540 /* Begin background scanning */ 2541 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1); 2542 2543 return (0); 2544 } 2545 2546 /* 2547 * After most every configuration change, everything needs to be fully 2548 * reinitialized. For some operations (currently, WEP settings 2549 * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove 2550 * "associations," and allows EAP authorization to occur again. 2551 * If keepassoc is specified, the reset operation should try to go 2552 * back to the BSS had before. 2553 */ 2554 void 2555 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes) 2556 { 2557 struct ieee80211com *ic = &sc->sc_ic; 2558 struct arpcom *ac = &ic->ic_ac; 2559 struct ifnet *ifp = &ac->ac_if; 2560 struct pgt_obj_key keyobj; 2561 struct pgt_obj_ssid essid; 2562 uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1]; 2563 uint32_t mode, bsstype, config, profile, channel, slot, preamble; 2564 uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme; 2565 unsigned int i; 2566 int success, shouldbeup, s; 2567 2568 config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX; 2569 2570 /* 2571 * Promiscuous mode is currently a no-op since packets transmitted, 2572 * while in promiscuous mode, don't ever seem to go anywhere. 2573 */ 2574 shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP; 2575 2576 if (shouldbeup) { 2577 switch (ic->ic_opmode) { 2578 case IEEE80211_M_STA: 2579 if (ifp->if_flags & IFF_PROMISC) 2580 mode = PGT_MODE_CLIENT; /* what to do? */ 2581 else 2582 mode = PGT_MODE_CLIENT; 2583 bsstype = PGT_BSS_TYPE_STA; 2584 dot1x = PGT_DOT1X_AUTH_ENABLED; 2585 break; 2586 #ifndef IEEE80211_STA_ONLY 2587 case IEEE80211_M_IBSS: 2588 if (ifp->if_flags & IFF_PROMISC) 2589 mode = PGT_MODE_CLIENT; /* what to do? */ 2590 else 2591 mode = PGT_MODE_CLIENT; 2592 bsstype = PGT_BSS_TYPE_IBSS; 2593 dot1x = PGT_DOT1X_AUTH_ENABLED; 2594 break; 2595 case IEEE80211_M_HOSTAP: 2596 mode = PGT_MODE_AP; 2597 bsstype = PGT_BSS_TYPE_STA; 2598 /* 2599 * For IEEE 802.1x, we need to authenticate and 2600 * authorize hosts from here on or they remain 2601 * associated but without the ability to send or 2602 * receive normal traffic to us (courtesy the 2603 * firmware AP implementation). 2604 */ 2605 dot1x = PGT_DOT1X_AUTH_ENABLED; 2606 /* 2607 * WDS mode needs several things to work: 2608 * discovery of exactly how creating the WDS 2609 * links is meant to function, an interface 2610 * for this, and ability to encode or decode 2611 * the WDS frames. 2612 */ 2613 if (sc->sc_wds) 2614 config |= PGT_CONFIG_WDS; 2615 break; 2616 #endif 2617 case IEEE80211_M_MONITOR: 2618 mode = PGT_MODE_PROMISCUOUS; 2619 bsstype = PGT_BSS_TYPE_ANY; 2620 dot1x = PGT_DOT1X_AUTH_NONE; 2621 break; 2622 default: 2623 goto badopmode; 2624 } 2625 } else { 2626 badopmode: 2627 mode = PGT_MODE_CLIENT; 2628 bsstype = PGT_BSS_TYPE_NONE; 2629 } 2630 2631 DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname)); 2632 switch (ic->ic_curmode) { 2633 case IEEE80211_MODE_11A: 2634 profile = PGT_PROFILE_A_ONLY; 2635 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC; 2636 DPRINTF(("IEEE80211_MODE_11A\n")); 2637 break; 2638 case IEEE80211_MODE_11B: 2639 profile = PGT_PROFILE_B_ONLY; 2640 preamble = PGT_OID_PREAMBLE_MODE_LONG; 2641 DPRINTF(("IEEE80211_MODE_11B\n")); 2642 break; 2643 case IEEE80211_MODE_11G: 2644 profile = PGT_PROFILE_G_ONLY; 2645 preamble = PGT_OID_PREAMBLE_MODE_SHORT; 2646 DPRINTF(("IEEE80211_MODE_11G\n")); 2647 break; 2648 case IEEE80211_MODE_TURBO: /* not handled */ 2649 /* FALLTHROUGH */ 2650 case IEEE80211_MODE_AUTO: 2651 profile = PGT_PROFILE_MIXED_G_WIFI; 2652 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC; 2653 DPRINTF(("IEEE80211_MODE_AUTO\n")); 2654 break; 2655 default: 2656 panic("unknown mode %d", ic->ic_curmode); 2657 } 2658 2659 switch (sc->sc_80211_ioc_auth) { 2660 case IEEE80211_AUTH_NONE: 2661 auth = PGT_AUTH_MODE_NONE; 2662 break; 2663 case IEEE80211_AUTH_OPEN: 2664 auth = PGT_AUTH_MODE_OPEN; 2665 break; 2666 default: 2667 auth = PGT_AUTH_MODE_SHARED; 2668 break; 2669 } 2670 2671 if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) { 2672 wep = 1; 2673 exunencrypted = 1; 2674 } else { 2675 wep = 0; 2676 exunencrypted = 0; 2677 } 2678 2679 mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO); 2680 wep = htole32(wep); 2681 exunencrypted = htole32(exunencrypted); 2682 profile = htole32(profile); 2683 preamble = htole32(preamble); 2684 bsstype = htole32(bsstype); 2685 config = htole32(config); 2686 mode = htole32(mode); 2687 2688 if (!wep || !sc->sc_dot1x) 2689 dot1x = PGT_DOT1X_AUTH_NONE; 2690 dot1x = htole32(dot1x); 2691 auth = htole32(auth); 2692 2693 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2694 slot = htole32(PGT_OID_SLOT_MODE_SHORT); 2695 else 2696 slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC); 2697 2698 if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) { 2699 if (keepassoc) 2700 channel = 0; 2701 else 2702 channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan); 2703 } else 2704 channel = ieee80211_chan2ieee(ic, ic->ic_des_chan); 2705 2706 DPRINTF(("%s: set rates", sc->sc_dev.dv_xname)); 2707 for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) { 2708 availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i]; 2709 DPRINTF((" %d", availrates[i])); 2710 } 2711 DPRINTF(("\n")); 2712 availrates[i++] = 0; 2713 2714 essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid)); 2715 memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length); 2716 2717 s = splnet(); 2718 for (success = 0; success == 0; success = 1) { 2719 SETOID(PGT_OID_PROFILE, &profile, sizeof(profile)); 2720 SETOID(PGT_OID_CONFIG, &config, sizeof(config)); 2721 SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme)); 2722 2723 if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) { 2724 SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr, 2725 sizeof(ac->ac_enaddr)); 2726 IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr); 2727 } 2728 2729 SETOID(PGT_OID_MODE, &mode, sizeof(mode)); 2730 SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype)); 2731 2732 if (channel != 0 && channel != IEEE80211_CHAN_ANY) 2733 SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel)); 2734 2735 if (ic->ic_flags & IEEE80211_F_DESBSSID) { 2736 SETOID(PGT_OID_BSSID, ic->ic_des_bssid, 2737 sizeof(ic->ic_des_bssid)); 2738 } else if (keepassoc) { 2739 SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid, 2740 sizeof(ic->ic_bss->ni_bssid)); 2741 } 2742 2743 SETOID(PGT_OID_SSID, &essid, sizeof(essid)); 2744 2745 if (ic->ic_des_esslen > 0) 2746 SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid)); 2747 2748 SETOID(PGT_OID_RATES, &availrates, i); 2749 SETOID(PGT_OID_EXTENDED_RATES, &availrates, i); 2750 SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble)); 2751 SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot)); 2752 SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth)); 2753 SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted, 2754 sizeof(exunencrypted)); 2755 SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x)); 2756 SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep)); 2757 /* 2758 * Setting WEP key(s) 2759 */ 2760 if (letoh32(wep) != 0) { 2761 keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP; 2762 /* key 1 */ 2763 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2764 IEEE80211_KEYBUF_SIZE); 2765 keyobj.pok_length = min(keyobj.pok_length, 2766 ic->ic_nw_keys[0].k_len); 2767 bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key, 2768 keyobj.pok_length); 2769 SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj)); 2770 /* key 2 */ 2771 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2772 IEEE80211_KEYBUF_SIZE); 2773 keyobj.pok_length = min(keyobj.pok_length, 2774 ic->ic_nw_keys[1].k_len); 2775 bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key, 2776 keyobj.pok_length); 2777 SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj)); 2778 /* key 3 */ 2779 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2780 IEEE80211_KEYBUF_SIZE); 2781 keyobj.pok_length = min(keyobj.pok_length, 2782 ic->ic_nw_keys[2].k_len); 2783 bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key, 2784 keyobj.pok_length); 2785 SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj)); 2786 /* key 4 */ 2787 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2788 IEEE80211_KEYBUF_SIZE); 2789 keyobj.pok_length = min(keyobj.pok_length, 2790 ic->ic_nw_keys[3].k_len); 2791 bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key, 2792 keyobj.pok_length); 2793 SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj)); 2794 2795 wepkey = htole32(ic->ic_wep_txkey); 2796 SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey)); 2797 } 2798 /* set mode again to commit */ 2799 SETOID(PGT_OID_MODE, &mode, sizeof(mode)); 2800 } 2801 splx(s); 2802 2803 if (success) { 2804 if (shouldbeup && keepnodes) 2805 sc->sc_flags |= SC_NOFREE_ALLNODES; 2806 if (shouldbeup) 2807 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2808 else 2809 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2810 } else { 2811 printf("%s: problem setting modes\n", sc->sc_dev.dv_xname); 2812 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2813 } 2814 } 2815 2816 void 2817 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid, 2818 struct pgt_obj_mlme *mlme) 2819 { 2820 struct ieee80211com *ic = &sc->sc_ic; 2821 struct pgt_ieee80211_node *pin; 2822 struct ieee80211_node *ni; 2823 2824 ni = ieee80211_find_node(ic, mlme->pom_address); 2825 pin = (struct pgt_ieee80211_node *)ni; 2826 switch (oid) { 2827 case PGT_OID_DISASSOCIATE: 2828 if (ni != NULL) 2829 ieee80211_release_node(&sc->sc_ic, ni); 2830 break; 2831 case PGT_OID_ASSOCIATE: 2832 if (ni == NULL) { 2833 ni = ieee80211_dup_bss(ic, mlme->pom_address); 2834 if (ni == NULL) 2835 break; 2836 ic->ic_newassoc(ic, ni, 1); 2837 pin = (struct pgt_ieee80211_node *)ni; 2838 } 2839 ni->ni_associd = letoh16(mlme->pom_id); 2840 pin->pin_mlme_state = letoh16(mlme->pom_state); 2841 break; 2842 default: 2843 if (pin != NULL) 2844 pin->pin_mlme_state = letoh16(mlme->pom_state); 2845 break; 2846 } 2847 } 2848 2849 /* 2850 * Either in response to an event or after a certain amount of time, 2851 * synchronize our idea of the network we're part of from the hardware. 2852 */ 2853 void 2854 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa, 2855 struct mbuf *args) 2856 { 2857 struct ieee80211com *ic = &sc->sc_ic; 2858 struct pgt_obj_ssid ssid; 2859 struct pgt_obj_bss bss; 2860 uint32_t channel, noise, ls; 2861 int error, s; 2862 2863 if (pa != NULL) { 2864 struct pgt_obj_mlme *mlme; 2865 uint32_t oid; 2866 2867 oid = *mtod(args, uint32_t *); 2868 m_adj(args, sizeof(uint32_t)); 2869 if (sc->sc_debug & SC_DEBUG_TRAP) 2870 DPRINTF(("%s: trap: oid %#x len %u\n", 2871 sc->sc_dev.dv_xname, oid, args->m_len)); 2872 switch (oid) { 2873 case PGT_OID_LINK_STATE: 2874 if (args->m_len < sizeof(uint32_t)) 2875 break; 2876 ls = letoh32(*mtod(args, uint32_t *)); 2877 if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK)) 2878 DPRINTF(("%s: %s: link rate %u\n", 2879 sc->sc_dev.dv_xname, __func__, ls)); 2880 if (ls) 2881 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2882 else 2883 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2884 goto gotlinkstate; 2885 case PGT_OID_DEAUTHENTICATE: 2886 case PGT_OID_AUTHENTICATE: 2887 case PGT_OID_DISASSOCIATE: 2888 case PGT_OID_ASSOCIATE: 2889 if (args->m_len < sizeof(struct pgt_obj_mlme)) 2890 break; 2891 mlme = mtod(args, struct pgt_obj_mlme *); 2892 if (sc->sc_debug & SC_DEBUG_TRAP) 2893 DPRINTF(("%s: mlme: address " 2894 "%s id 0x%02x state 0x%02x code 0x%02x\n", 2895 sc->sc_dev.dv_xname, 2896 ether_sprintf(mlme->pom_address), 2897 letoh16(mlme->pom_id), 2898 letoh16(mlme->pom_state), 2899 letoh16(mlme->pom_code))); 2900 #ifndef IEEE80211_STA_ONLY 2901 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 2902 pgt_hostap_handle_mlme(sc, oid, mlme); 2903 #endif 2904 break; 2905 } 2906 return; 2907 } 2908 if (ic->ic_state == IEEE80211_S_SCAN) { 2909 s = splnet(); 2910 error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls)); 2911 splx(s); 2912 if (error) 2913 return; 2914 DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname, 2915 htole32(ls))); 2916 if (ls != 0) 2917 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2918 } 2919 2920 gotlinkstate: 2921 s = splnet(); 2922 if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0) 2923 goto out; 2924 sc->sc_noise = letoh32(noise); 2925 if (ic->ic_state == IEEE80211_S_RUN) { 2926 if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel, 2927 sizeof(channel)) != 0) 2928 goto out; 2929 channel = min(letoh32(channel), IEEE80211_CHAN_MAX); 2930 ic->ic_bss->ni_chan = &ic->ic_channels[channel]; 2931 if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid, 2932 sizeof(ic->ic_bss->ni_bssid)) != 0) 2933 goto out; 2934 IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid); 2935 error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss, 2936 sizeof(bss)); 2937 if (error == 0) 2938 ic->ic_bss->ni_rssi = bss.pob_rssi; 2939 else if (error != EPERM) 2940 goto out; 2941 error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid)); 2942 if (error) 2943 goto out; 2944 ic->ic_bss->ni_esslen = min(ssid.pos_length, 2945 sizeof(ic->ic_bss->ni_essid)); 2946 memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid, 2947 ssid.pos_length); 2948 } 2949 2950 out: 2951 splx(s); 2952 } 2953 2954 int 2955 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2956 { 2957 struct pgt_softc *sc = ic->ic_if.if_softc; 2958 enum ieee80211_state ostate; 2959 2960 ostate = ic->ic_state; 2961 2962 DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname, 2963 ieee80211_state_name[ostate], ieee80211_state_name[nstate])); 2964 2965 switch (nstate) { 2966 case IEEE80211_S_INIT: 2967 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0) 2968 ic->ic_if.if_timer = 0; 2969 ic->ic_mgt_timer = 0; 2970 ic->ic_flags &= ~IEEE80211_F_SIBSS; 2971 ieee80211_free_allnodes(ic); 2972 break; 2973 case IEEE80211_S_SCAN: 2974 ic->ic_if.if_timer = 1; 2975 ic->ic_mgt_timer = 0; 2976 if (sc->sc_flags & SC_NOFREE_ALLNODES) 2977 sc->sc_flags &= ~SC_NOFREE_ALLNODES; 2978 else 2979 ieee80211_free_allnodes(ic); 2980 2981 #ifndef IEEE80211_STA_ONLY 2982 /* Just use any old channel; we override it anyway. */ 2983 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 2984 ieee80211_create_ibss(ic, ic->ic_ibss_chan); 2985 #endif 2986 break; 2987 case IEEE80211_S_RUN: 2988 ic->ic_if.if_timer = 1; 2989 break; 2990 default: 2991 break; 2992 } 2993 2994 return (sc->sc_newstate(ic, nstate, arg)); 2995 } 2996 2997 int 2998 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq) 2999 { 3000 int wokeup = 0; 3001 3002 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 3003 sc->sc_cbdmam->dm_mapsize, 3004 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 3005 sc->sc_cb->pcb_device_curfrag[pq] = 3006 sc->sc_cb->pcb_driver_curfrag[pq]; 3007 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 3008 sc->sc_cbdmam->dm_mapsize, 3009 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 3010 while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) { 3011 struct pgt_desc *pd; 3012 3013 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]); 3014 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link); 3015 sc->sc_dirtyq_count[pq]--; 3016 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 3017 sc->sc_freeq_count[pq]++; 3018 pgt_unload_tx_desc_frag(sc, pd); 3019 if (sc->sc_debug & SC_DEBUG_QUEUES) 3020 DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n", 3021 sc->sc_dev.dv_xname, pd->pd_fragnum, pq)); 3022 wokeup++; 3023 if (pgt_queue_is_data(pq)) 3024 sc->sc_ic.ic_if.if_oerrors++; 3025 } 3026 3027 return (wokeup); 3028 } 3029 3030 int 3031 pgt_dma_alloc(struct pgt_softc *sc) 3032 { 3033 size_t size; 3034 int i, error, nsegs; 3035 3036 for (i = 0; i < PGT_QUEUE_COUNT; i++) { 3037 TAILQ_INIT(&sc->sc_freeq[i]); 3038 TAILQ_INIT(&sc->sc_dirtyq[i]); 3039 } 3040 3041 /* 3042 * control block 3043 */ 3044 size = sizeof(struct pgt_control_block); 3045 3046 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 3047 BUS_DMA_NOWAIT, &sc->sc_cbdmam); 3048 if (error != 0) { 3049 printf("%s: can not create DMA tag for control block\n", 3050 sc->sc_dev.dv_xname); 3051 goto out; 3052 } 3053 3054 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 3055 0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 3056 if (error != 0) { 3057 printf("%s: can not allocate DMA memory for control block\n", 3058 sc->sc_dev.dv_xname); 3059 goto out; 3060 } 3061 3062 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs, 3063 size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT); 3064 if (error != 0) { 3065 printf("%s: can not map DMA memory for control block\n", 3066 sc->sc_dev.dv_xname); 3067 goto out; 3068 } 3069 3070 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam, 3071 sc->sc_cb, size, NULL, BUS_DMA_NOWAIT); 3072 if (error != 0) { 3073 printf("%s: can not load DMA map for control block\n", 3074 sc->sc_dev.dv_xname); 3075 goto out; 3076 } 3077 3078 /* 3079 * powersave 3080 */ 3081 size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT; 3082 3083 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 3084 BUS_DMA_ALLOCNOW, &sc->sc_psmdmam); 3085 if (error != 0) { 3086 printf("%s: can not create DMA tag for powersave\n", 3087 sc->sc_dev.dv_xname); 3088 goto out; 3089 } 3090 3091 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 3092 0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 3093 if (error != 0) { 3094 printf("%s: can not allocate DMA memory for powersave\n", 3095 sc->sc_dev.dv_xname); 3096 goto out; 3097 } 3098 3099 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs, 3100 size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT); 3101 if (error != 0) { 3102 printf("%s: can not map DMA memory for powersave\n", 3103 sc->sc_dev.dv_xname); 3104 goto out; 3105 } 3106 3107 error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam, 3108 sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK); 3109 if (error != 0) { 3110 printf("%s: can not load DMA map for powersave\n", 3111 sc->sc_dev.dv_xname); 3112 goto out; 3113 } 3114 3115 /* 3116 * fragments 3117 */ 3118 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX); 3119 if (error != 0) 3120 goto out; 3121 3122 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX); 3123 if (error != 0) 3124 goto out; 3125 3126 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX); 3127 if (error != 0) 3128 goto out; 3129 3130 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX); 3131 if (error != 0) 3132 goto out; 3133 3134 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX); 3135 if (error != 0) 3136 goto out; 3137 3138 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX); 3139 if (error != 0) 3140 goto out; 3141 3142 out: 3143 if (error) { 3144 printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname); 3145 pgt_dma_free(sc); 3146 } 3147 3148 return (error); 3149 } 3150 3151 int 3152 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq) 3153 { 3154 struct pgt_desc *pd; 3155 size_t i, qsize; 3156 int error, nsegs; 3157 3158 switch (pq) { 3159 case PGT_QUEUE_DATA_LOW_RX: 3160 qsize = PGT_QUEUE_DATA_RX_SIZE; 3161 break; 3162 case PGT_QUEUE_DATA_LOW_TX: 3163 qsize = PGT_QUEUE_DATA_TX_SIZE; 3164 break; 3165 case PGT_QUEUE_DATA_HIGH_RX: 3166 qsize = PGT_QUEUE_DATA_RX_SIZE; 3167 break; 3168 case PGT_QUEUE_DATA_HIGH_TX: 3169 qsize = PGT_QUEUE_DATA_TX_SIZE; 3170 break; 3171 case PGT_QUEUE_MGMT_RX: 3172 qsize = PGT_QUEUE_MGMT_SIZE; 3173 break; 3174 case PGT_QUEUE_MGMT_TX: 3175 qsize = PGT_QUEUE_MGMT_SIZE; 3176 break; 3177 default: 3178 return (EINVAL); 3179 } 3180 3181 for (i = 0; i < qsize; i++) { 3182 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK); 3183 3184 error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1, 3185 PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam); 3186 if (error != 0) { 3187 printf("%s: can not create DMA tag for fragment\n", 3188 sc->sc_dev.dv_xname); 3189 free(pd, M_DEVBUF); 3190 break; 3191 } 3192 3193 error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE, 3194 0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK); 3195 if (error != 0) { 3196 printf("%s: error alloc frag %u on queue %u\n", 3197 sc->sc_dev.dv_xname, i, pq); 3198 free(pd, M_DEVBUF); 3199 break; 3200 } 3201 3202 error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs, 3203 PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK); 3204 if (error != 0) { 3205 printf("%s: error map frag %u on queue %u\n", 3206 sc->sc_dev.dv_xname, i, pq); 3207 free(pd, M_DEVBUF); 3208 break; 3209 } 3210 3211 if (pgt_queue_is_rx(pq)) { 3212 error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, 3213 pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT); 3214 if (error != 0) { 3215 printf("%s: error load frag %u on queue %u\n", 3216 sc->sc_dev.dv_xname, i, pq); 3217 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 3218 nsegs); 3219 free(pd, M_DEVBUF); 3220 break; 3221 } 3222 pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr; 3223 } 3224 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 3225 } 3226 3227 return (error); 3228 } 3229 3230 void 3231 pgt_dma_free(struct pgt_softc *sc) 3232 { 3233 /* 3234 * fragments 3235 */ 3236 if (sc->sc_dmat != NULL) { 3237 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX); 3238 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX); 3239 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX); 3240 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX); 3241 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX); 3242 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX); 3243 } 3244 3245 /* 3246 * powersave 3247 */ 3248 if (sc->sc_psmbuf != NULL) { 3249 bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam); 3250 bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1); 3251 sc->sc_psmbuf = NULL; 3252 sc->sc_psmdmam = NULL; 3253 } 3254 3255 /* 3256 * control block 3257 */ 3258 if (sc->sc_cb != NULL) { 3259 bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam); 3260 bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1); 3261 sc->sc_cb = NULL; 3262 sc->sc_cbdmam = NULL; 3263 } 3264 } 3265 3266 void 3267 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq) 3268 { 3269 struct pgt_desc *pd; 3270 3271 while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) { 3272 pd = TAILQ_FIRST(&sc->sc_freeq[pq]); 3273 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link); 3274 if (pd->pd_dmam != NULL) { 3275 bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam); 3276 pd->pd_dmam = NULL; 3277 } 3278 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1); 3279 free(pd, M_DEVBUF); 3280 } 3281 } 3282 3283 int 3284 pgt_activate(struct device *self, int act) 3285 { 3286 struct pgt_softc *sc = (struct pgt_softc *)self; 3287 struct ifnet *ifp = &sc->sc_ic.ic_if; 3288 3289 DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why)); 3290 3291 switch (act) { 3292 case DVACT_SUSPEND: 3293 if (ifp->if_flags & IFF_RUNNING) { 3294 pgt_stop(sc, SC_NEEDS_RESET); 3295 pgt_update_hw_from_sw(sc, 0, 0); 3296 } 3297 if (sc->sc_power != NULL) 3298 (*sc->sc_power)(sc, act); 3299 break; 3300 case DVACT_RESUME: 3301 workq_queue_task(NULL, &sc->sc_resume_wqt, 0, 3302 pgt_resume, sc, NULL); 3303 break; 3304 } 3305 return 0; 3306 } 3307 3308 void 3309 pgt_resume(void *arg1, void *arg2) 3310 { 3311 struct pgt_softc *sc = arg1; 3312 struct ifnet *ifp = &sc->sc_ic.ic_if; 3313 3314 if (sc->sc_power != NULL) 3315 (*sc->sc_power)(sc, DVACT_RESUME); 3316 3317 pgt_stop(sc, SC_NEEDS_RESET); 3318 pgt_update_hw_from_sw(sc, 0, 0); 3319 3320 if (ifp->if_flags & IFF_UP) { 3321 pgt_init(ifp); 3322 pgt_update_hw_from_sw(sc, 0, 0); 3323 } 3324 } 3325