1 /* $OpenBSD: pgt.c,v 1.74 2014/07/12 18:48:17 tedu Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * Copyright (c) 2004 Fujitsu Laboratories of America, Inc. 22 * Copyright (c) 2004 Brian Fundakowski Feldman 23 * All rights reserved. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 37 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 44 * SUCH DAMAGE. 45 */ 46 47 #include "bpfilter.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/socket.h> 54 #include <sys/mbuf.h> 55 #include <sys/endian.h> 56 #include <sys/sockio.h> 57 #include <sys/kthread.h> 58 #include <sys/time.h> 59 #include <sys/ioctl.h> 60 #include <sys/device.h> 61 62 #include <machine/bus.h> 63 #include <machine/endian.h> 64 #include <machine/intr.h> 65 66 #include <net/if.h> 67 #include <net/if_arp.h> 68 #include <net/if_dl.h> 69 #include <net/if_llc.h> 70 #include <net/if_media.h> 71 #include <net/if_types.h> 72 73 #if NBPFILTER > 0 74 #include <net/bpf.h> 75 #endif 76 77 #ifdef INET 78 #include <netinet/in.h> 79 #include <netinet/in_systm.h> 80 #include <netinet/if_ether.h> 81 #include <netinet/ip.h> 82 #endif 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_radiotap.h> 86 87 #include <dev/ic/pgtreg.h> 88 #include <dev/ic/pgtvar.h> 89 90 #include <dev/ic/if_wireg.h> 91 #include <dev/ic/if_wi_ieee.h> 92 #include <dev/ic/if_wivar.h> 93 94 #ifdef PGT_DEBUG 95 #define DPRINTF(x) do { printf x; } while (0) 96 #else 97 #define DPRINTF(x) 98 #endif 99 100 #define SETOID(oid, var, size) { \ 101 if (pgt_oid_set(sc, oid, var, size) != 0) \ 102 break; \ 103 } 104 105 /* 106 * This is a driver for the Intersil Prism family of 802.11g network cards, 107 * based upon version 1.2 of the Linux driver and firmware found at 108 * http://www.prism54.org/. 109 */ 110 111 #define SCAN_TIMEOUT 5 /* 5 seconds */ 112 113 struct cfdriver pgt_cd = { 114 NULL, "pgt", DV_IFNET 115 }; 116 117 void pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr); 118 int pgt_media_change(struct ifnet *ifp); 119 void pgt_write_memory_barrier(struct pgt_softc *); 120 uint32_t pgt_read_4(struct pgt_softc *, uint16_t); 121 void pgt_write_4(struct pgt_softc *, uint16_t, uint32_t); 122 void pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t); 123 void pgt_debug_events(struct pgt_softc *, const char *); 124 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue); 125 void pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *); 126 int pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue, 127 struct pgt_desc *); 128 void pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *); 129 int pgt_load_firmware(struct pgt_softc *); 130 void pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue, 131 struct pgt_frag *); 132 int pgt_reset(struct pgt_softc *); 133 void pgt_stop(struct pgt_softc *, unsigned int); 134 void pgt_reboot(struct pgt_softc *); 135 void pgt_init_intr(struct pgt_softc *); 136 void pgt_update_intr(struct pgt_softc *, int); 137 struct mbuf 138 *pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *, 139 struct mbuf *, struct ieee80211_node **); 140 void pgt_input_frames(struct pgt_softc *, struct mbuf *); 141 void pgt_wakeup_intr(struct pgt_softc *); 142 void pgt_sleep_intr(struct pgt_softc *); 143 void pgt_empty_traps(struct pgt_softc_kthread *); 144 void pgt_per_device_kthread(void *); 145 void pgt_async_reset(struct pgt_softc *); 146 void pgt_async_update(struct pgt_softc *); 147 void pgt_txdone(struct pgt_softc *, enum pgt_queue); 148 void pgt_rxdone(struct pgt_softc *, enum pgt_queue); 149 void pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t); 150 void pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *); 151 struct mbuf 152 *pgt_datarx_completion(struct pgt_softc *, enum pgt_queue); 153 int pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t); 154 int pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t); 155 int pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t); 156 void pgt_state_dump(struct pgt_softc *); 157 int pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *); 158 void pgt_desc_transmit(struct pgt_softc *, enum pgt_queue, 159 struct pgt_desc *, uint16_t, int); 160 void pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue); 161 struct ieee80211_node 162 *pgt_ieee80211_node_alloc(struct ieee80211com *); 163 void pgt_ieee80211_newassoc(struct ieee80211com *, 164 struct ieee80211_node *, int); 165 void pgt_ieee80211_node_free(struct ieee80211com *, 166 struct ieee80211_node *); 167 void pgt_ieee80211_node_copy(struct ieee80211com *, 168 struct ieee80211_node *, 169 const struct ieee80211_node *); 170 int pgt_ieee80211_send_mgmt(struct ieee80211com *, 171 struct ieee80211_node *, int, int, int); 172 int pgt_net_attach(struct pgt_softc *); 173 void pgt_start(struct ifnet *); 174 int pgt_ioctl(struct ifnet *, u_long, caddr_t); 175 void pgt_obj_bss2scanres(struct pgt_softc *, 176 struct pgt_obj_bss *, struct wi_scan_res *, uint32_t); 177 void node_mark_active_ap(void *, struct ieee80211_node *); 178 void node_mark_active_adhoc(void *, struct ieee80211_node *); 179 void pgt_watchdog(struct ifnet *); 180 int pgt_init(struct ifnet *); 181 void pgt_update_hw_from_sw(struct pgt_softc *, int, int); 182 void pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t, 183 struct pgt_obj_mlme *); 184 void pgt_update_sw_from_hw(struct pgt_softc *, 185 struct pgt_async_trap *, struct mbuf *); 186 int pgt_newstate(struct ieee80211com *, enum ieee80211_state, int); 187 int pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue); 188 int pgt_dma_alloc(struct pgt_softc *); 189 int pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq); 190 void pgt_dma_free(struct pgt_softc *); 191 void pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq); 192 void pgt_wakeup(struct pgt_softc *); 193 194 void 195 pgt_write_memory_barrier(struct pgt_softc *sc) 196 { 197 bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0, 198 BUS_SPACE_BARRIER_WRITE); 199 } 200 201 u_int32_t 202 pgt_read_4(struct pgt_softc *sc, uint16_t offset) 203 { 204 return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset)); 205 } 206 207 void 208 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value) 209 { 210 bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value); 211 } 212 213 /* 214 * Write out 4 bytes and cause a PCI flush by reading back in on a 215 * harmless register. 216 */ 217 void 218 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value) 219 { 220 bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value); 221 (void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN); 222 } 223 224 /* 225 * Print the state of events in the queues from an interrupt or a trigger. 226 */ 227 void 228 pgt_debug_events(struct pgt_softc *sc, const char *when) 229 { 230 #define COUNT(i) \ 231 letoh32(sc->sc_cb->pcb_driver_curfrag[i]) - \ 232 letoh32(sc->sc_cb->pcb_device_curfrag[i]) 233 if (sc->sc_debug & SC_DEBUG_EVENTS) 234 DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n", 235 sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2), 236 COUNT(3), COUNT(4), COUNT(5))); 237 #undef COUNT 238 } 239 240 uint32_t 241 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq) 242 { 243 return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) - 244 letoh32(sc->sc_cb->pcb_device_curfrag[pq])); 245 } 246 247 void 248 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd) 249 { 250 pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr); 251 pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE); 252 pd->pd_fragp->pf_flags = 0; 253 254 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize, 255 BUS_DMASYNC_POSTWRITE); 256 } 257 258 int 259 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq, 260 struct pgt_desc *pd) 261 { 262 int error; 263 264 error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem, 265 PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT); 266 if (error) { 267 DPRINTF(("%s: unable to load %s tx DMA: %d\n", 268 sc->sc_dev.dv_xname, 269 pgt_queue_is_data(pq) ? "data" : "mgmt", error)); 270 return (error); 271 } 272 pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr; 273 pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr); 274 pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE); 275 pd->pd_fragp->pf_flags = htole16(0); 276 277 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize, 278 BUS_DMASYNC_POSTWRITE); 279 280 return (0); 281 } 282 283 void 284 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd) 285 { 286 bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam); 287 pd->pd_dmaaddr = 0; 288 } 289 290 int 291 pgt_load_firmware(struct pgt_softc *sc) 292 { 293 int error, reg, dirreg, fwoff, ucodeoff, fwlen; 294 uint8_t *ucode; 295 uint32_t *uc; 296 size_t size; 297 char *name; 298 299 if (sc->sc_flags & SC_ISL3877) 300 name = "pgt-isl3877"; 301 else 302 name = "pgt-isl3890"; /* includes isl3880 */ 303 304 error = loadfirmware(name, &ucode, &size); 305 306 if (error != 0) { 307 DPRINTF(("%s: error %d, could not read firmware %s\n", 308 sc->sc_dev.dv_xname, error, name)); 309 return (EIO); 310 } 311 312 if (size & 3) { 313 DPRINTF(("%s: bad firmware size %u\n", 314 sc->sc_dev.dv_xname, size)); 315 free(ucode, M_DEVBUF, 0); 316 return (EINVAL); 317 } 318 319 pgt_reboot(sc); 320 321 fwoff = 0; 322 ucodeoff = 0; 323 uc = (uint32_t *)ucode; 324 reg = PGT_FIRMWARE_INTERNAL_OFFSET; 325 while (fwoff < size) { 326 pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg); 327 328 if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE) 329 fwlen = PGT_DIRECT_MEMORY_SIZE; 330 else 331 fwlen = size - fwoff; 332 333 dirreg = PGT_DIRECT_MEMORY_OFFSET; 334 while (fwlen > 4) { 335 pgt_write_4(sc, dirreg, uc[ucodeoff]); 336 fwoff += 4; 337 dirreg += 4; 338 reg += 4; 339 fwlen -= 4; 340 ucodeoff++; 341 } 342 pgt_write_4_flush(sc, dirreg, uc[ucodeoff]); 343 fwoff += 4; 344 dirreg += 4; 345 reg += 4; 346 fwlen -= 4; 347 ucodeoff++; 348 } 349 DPRINTF(("%s: %d bytes microcode loaded from %s\n", 350 sc->sc_dev.dv_xname, fwoff, name)); 351 352 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 353 reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN); 354 reg |= PGT_CTRL_STAT_RAMBOOT; 355 pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg); 356 pgt_write_memory_barrier(sc); 357 DELAY(PGT_WRITEIO_DELAY); 358 359 reg |= PGT_CTRL_STAT_RESET; 360 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 361 pgt_write_memory_barrier(sc); 362 DELAY(PGT_WRITEIO_DELAY); 363 364 reg &= ~PGT_CTRL_STAT_RESET; 365 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 366 pgt_write_memory_barrier(sc); 367 DELAY(PGT_WRITEIO_DELAY); 368 369 free(ucode, M_DEVBUF, 0); 370 371 return (0); 372 } 373 374 void 375 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq, 376 struct pgt_frag *pqfrags) 377 { 378 struct pgt_desc *pd; 379 unsigned int i; 380 381 sc->sc_cb->pcb_device_curfrag[pq] = 0; 382 i = 0; 383 /* XXX why only freeq ??? */ 384 TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) { 385 pd->pd_fragnum = i; 386 pd->pd_fragp = &pqfrags[i]; 387 if (pgt_queue_is_rx(pq)) 388 pgt_reinit_rx_desc_frag(sc, pd); 389 i++; 390 } 391 sc->sc_freeq_count[pq] = i; 392 /* 393 * The ring buffer describes how many free buffers are available from 394 * the host (for receive queues) or how many are pending (for 395 * transmit queues). 396 */ 397 if (pgt_queue_is_rx(pq)) 398 sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i); 399 else 400 sc->sc_cb->pcb_driver_curfrag[pq] = 0; 401 } 402 403 /* 404 * Turn off interrupts, reset the device (possibly loading firmware), 405 * and put everything in a known state. 406 */ 407 int 408 pgt_reset(struct pgt_softc *sc) 409 { 410 int error; 411 412 /* disable all interrupts */ 413 pgt_write_4_flush(sc, PGT_REG_INT_EN, 0); 414 DELAY(PGT_WRITEIO_DELAY); 415 416 /* 417 * Set up the management receive queue, assuming there are no 418 * requests in progress. 419 */ 420 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 421 sc->sc_cbdmam->dm_mapsize, 422 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 423 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX, 424 &sc->sc_cb->pcb_data_low_rx[0]); 425 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX, 426 &sc->sc_cb->pcb_data_low_tx[0]); 427 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX, 428 &sc->sc_cb->pcb_data_high_rx[0]); 429 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX, 430 &sc->sc_cb->pcb_data_high_tx[0]); 431 pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX, 432 &sc->sc_cb->pcb_mgmt_rx[0]); 433 pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX, 434 &sc->sc_cb->pcb_mgmt_tx[0]); 435 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 436 sc->sc_cbdmam->dm_mapsize, 437 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 438 439 /* load firmware */ 440 if (sc->sc_flags & SC_NEEDS_FIRMWARE) { 441 error = pgt_load_firmware(sc); 442 if (error) { 443 printf("%s: firmware load failed\n", 444 sc->sc_dev.dv_xname); 445 return (error); 446 } 447 sc->sc_flags &= ~SC_NEEDS_FIRMWARE; 448 DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname)); 449 } 450 451 /* upload the control block's DMA address */ 452 pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE, 453 htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr)); 454 DELAY(PGT_WRITEIO_DELAY); 455 456 /* send a reset event */ 457 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET); 458 DELAY(PGT_WRITEIO_DELAY); 459 460 /* await only the initialization interrupt */ 461 pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT); 462 DELAY(PGT_WRITEIO_DELAY); 463 464 return (0); 465 } 466 467 /* 468 * If we're trying to reset and the device has seemingly not been detached, 469 * we'll spend a minute seeing if we can't do the reset. 470 */ 471 void 472 pgt_stop(struct pgt_softc *sc, unsigned int flag) 473 { 474 struct ieee80211com *ic; 475 unsigned int wokeup; 476 int tryagain = 0; 477 478 ic = &sc->sc_ic; 479 480 ic->ic_if.if_flags &= ~IFF_RUNNING; 481 sc->sc_flags |= SC_UNINITIALIZED; 482 sc->sc_flags |= flag; 483 484 pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX); 485 pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX); 486 pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX); 487 488 trying_again: 489 /* disable all interrupts */ 490 pgt_write_4_flush(sc, PGT_REG_INT_EN, 0); 491 DELAY(PGT_WRITEIO_DELAY); 492 493 /* reboot card */ 494 pgt_reboot(sc); 495 496 do { 497 wokeup = 0; 498 /* 499 * We don't expect to be woken up, just to drop the lock 500 * and time out. Only tx queues can have anything valid 501 * on them outside of an interrupt. 502 */ 503 while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) { 504 struct pgt_mgmt_desc *pmd; 505 506 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog); 507 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 508 pmd->pmd_error = ENETRESET; 509 wakeup_one(pmd); 510 if (sc->sc_debug & SC_DEBUG_MGMT) 511 DPRINTF(("%s: queue: mgmt %p <- %#x " 512 "(drained)\n", sc->sc_dev.dv_xname, 513 pmd, pmd->pmd_oid)); 514 wokeup++; 515 } 516 if (wokeup > 0) { 517 if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) { 518 sc->sc_flags &= ~flag; 519 return; 520 } 521 } 522 } while (wokeup > 0); 523 524 if (flag == SC_NEEDS_RESET) { 525 int error; 526 527 DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname)); 528 sc->sc_flags &= ~SC_POWERSAVE; 529 sc->sc_flags |= SC_NEEDS_FIRMWARE; 530 error = pgt_reset(sc); 531 if (error == 0) { 532 tsleep(&sc->sc_flags, 0, "pgtres", hz); 533 if (sc->sc_flags & SC_UNINITIALIZED) { 534 printf("%s: not responding\n", 535 sc->sc_dev.dv_xname); 536 /* Thud. It was probably removed. */ 537 if (tryagain) 538 panic("pgt went for lunch"); /* XXX */ 539 tryagain = 1; 540 } else { 541 /* await all interrupts */ 542 pgt_write_4_flush(sc, PGT_REG_INT_EN, 543 PGT_INT_STAT_SOURCES); 544 DELAY(PGT_WRITEIO_DELAY); 545 ic->ic_if.if_flags |= IFF_RUNNING; 546 } 547 } 548 549 if (tryagain) 550 goto trying_again; 551 552 sc->sc_flags &= ~flag; 553 if (ic->ic_if.if_flags & IFF_RUNNING) 554 pgt_update_hw_from_sw(sc, 555 ic->ic_state != IEEE80211_S_INIT, 556 ic->ic_opmode != IEEE80211_M_MONITOR); 557 } 558 559 ic->ic_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 560 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 561 } 562 563 void 564 pgt_attach(void *xsc) 565 { 566 struct pgt_softc *sc = xsc; 567 int error; 568 569 /* debug flags */ 570 //sc->sc_debug |= SC_DEBUG_QUEUES; /* super verbose */ 571 //sc->sc_debug |= SC_DEBUG_MGMT; 572 sc->sc_debug |= SC_DEBUG_UNEXPECTED; 573 //sc->sc_debug |= SC_DEBUG_TRIGGER; /* verbose */ 574 //sc->sc_debug |= SC_DEBUG_EVENTS; /* super verbose */ 575 //sc->sc_debug |= SC_DEBUG_POWER; 576 sc->sc_debug |= SC_DEBUG_TRAP; 577 sc->sc_debug |= SC_DEBUG_LINK; 578 //sc->sc_debug |= SC_DEBUG_RXANNEX; 579 //sc->sc_debug |= SC_DEBUG_RXFRAG; 580 //sc->sc_debug |= SC_DEBUG_RXETHER; 581 582 /* enable card if possible */ 583 if (sc->sc_enable != NULL) 584 (*sc->sc_enable)(sc); 585 586 error = pgt_dma_alloc(sc); 587 if (error) 588 return; 589 590 sc->sc_ic.ic_if.if_softc = sc; 591 TAILQ_INIT(&sc->sc_mgmtinprog); 592 TAILQ_INIT(&sc->sc_kthread.sck_traps); 593 sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED; 594 sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN; 595 596 error = pgt_reset(sc); 597 if (error) 598 return; 599 600 tsleep(&sc->sc_flags, 0, "pgtres", hz); 601 if (sc->sc_flags & SC_UNINITIALIZED) { 602 printf("%s: not responding\n", sc->sc_dev.dv_xname); 603 sc->sc_flags |= SC_NEEDS_FIRMWARE; 604 return; 605 } else { 606 /* await all interrupts */ 607 pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES); 608 DELAY(PGT_WRITEIO_DELAY); 609 } 610 611 error = pgt_net_attach(sc); 612 if (error) 613 return; 614 615 if (kthread_create(pgt_per_device_kthread, sc, NULL, 616 sc->sc_dev.dv_xname) != 0) 617 return; 618 619 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 620 } 621 622 int 623 pgt_detach(struct pgt_softc *sc) 624 { 625 if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED) 626 /* device was not initialized correctly, so leave early */ 627 goto out; 628 629 /* stop card */ 630 pgt_stop(sc, SC_DYING); 631 pgt_reboot(sc); 632 633 ieee80211_ifdetach(&sc->sc_ic.ic_if); 634 if_detach(&sc->sc_ic.ic_if); 635 636 out: 637 /* disable card if possible */ 638 if (sc->sc_disable != NULL) 639 (*sc->sc_disable)(sc); 640 641 pgt_dma_free(sc); 642 643 return (0); 644 } 645 646 void 647 pgt_reboot(struct pgt_softc *sc) 648 { 649 uint32_t reg; 650 651 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 652 reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT); 653 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 654 pgt_write_memory_barrier(sc); 655 DELAY(PGT_WRITEIO_DELAY); 656 657 reg |= PGT_CTRL_STAT_RESET; 658 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 659 pgt_write_memory_barrier(sc); 660 DELAY(PGT_WRITEIO_DELAY); 661 662 reg &= ~PGT_CTRL_STAT_RESET; 663 pgt_write_4(sc, PGT_REG_CTRL_STAT, reg); 664 pgt_write_memory_barrier(sc); 665 DELAY(PGT_RESET_DELAY); 666 } 667 668 void 669 pgt_init_intr(struct pgt_softc *sc) 670 { 671 if ((sc->sc_flags & SC_UNINITIALIZED) == 0) { 672 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 673 DPRINTF(("%s: spurious initialization\n", 674 sc->sc_dev.dv_xname)); 675 } else { 676 sc->sc_flags &= ~SC_UNINITIALIZED; 677 wakeup(&sc->sc_flags); 678 } 679 } 680 681 /* 682 * If called with a NULL last_nextpkt, only the mgmt queue will be checked 683 * for new packets. 684 */ 685 void 686 pgt_update_intr(struct pgt_softc *sc, int hack) 687 { 688 /* priority order */ 689 enum pgt_queue pqs[PGT_QUEUE_COUNT] = { 690 PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX, 691 PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX, 692 PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX 693 }; 694 struct mbuf *m; 695 uint32_t npend; 696 unsigned int dirtycount; 697 int i; 698 699 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 700 sc->sc_cbdmam->dm_mapsize, 701 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 702 pgt_debug_events(sc, "intr"); 703 /* 704 * Check for completion of tx in their dirty queues. 705 * Check completion of rx into their dirty queues. 706 */ 707 for (i = 0; i < PGT_QUEUE_COUNT; i++) { 708 size_t qdirty, qfree; 709 710 qdirty = sc->sc_dirtyq_count[pqs[i]]; 711 qfree = sc->sc_freeq_count[pqs[i]]; 712 /* 713 * We want the wrap-around here. 714 */ 715 if (pgt_queue_is_rx(pqs[i])) { 716 int data; 717 718 data = pgt_queue_is_data(pqs[i]); 719 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY 720 if (hack && data) 721 continue; 722 #endif 723 npend = pgt_queue_frags_pending(sc, pqs[i]); 724 /* 725 * Receive queues clean up below, so qdirty must 726 * always be 0. 727 */ 728 if (npend > qfree) { 729 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 730 DPRINTF(("%s: rx queue [%u] " 731 "overflowed by %u\n", 732 sc->sc_dev.dv_xname, pqs[i], 733 npend - qfree)); 734 sc->sc_flags |= SC_INTR_RESET; 735 break; 736 } 737 while (qfree-- > npend) 738 pgt_rxdone(sc, pqs[i]); 739 } else { 740 npend = pgt_queue_frags_pending(sc, pqs[i]); 741 if (npend > qdirty) { 742 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 743 DPRINTF(("%s: tx queue [%u] " 744 "underflowed by %u\n", 745 sc->sc_dev.dv_xname, pqs[i], 746 npend - qdirty)); 747 sc->sc_flags |= SC_INTR_RESET; 748 break; 749 } 750 /* 751 * If the free queue was empty, or the data transmit 752 * queue just became empty, wake up any waiters. 753 */ 754 if (qdirty > npend) { 755 if (pgt_queue_is_data(pqs[i])) { 756 sc->sc_ic.ic_if.if_timer = 0; 757 sc->sc_ic.ic_if.if_flags &= 758 ~IFF_OACTIVE; 759 } 760 while (qdirty-- > npend) 761 pgt_txdone(sc, pqs[i]); 762 } 763 } 764 } 765 766 /* 767 * This is the deferred completion for received management frames 768 * and where we queue network frames for stack input. 769 */ 770 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]; 771 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) { 772 struct pgt_mgmt_desc *pmd; 773 774 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog); 775 /* 776 * If there is no mgmt request in progress or the operation 777 * returned is explicitly a trap, this pmd will essentially 778 * be ignored. 779 */ 780 pgt_mgmtrx_completion(sc, pmd); 781 } 782 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] = 783 htole32(dirtycount + 784 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX])); 785 786 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX]; 787 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) { 788 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX))) 789 pgt_input_frames(sc, m); 790 } 791 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] = 792 htole32(dirtycount + 793 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX])); 794 795 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX]; 796 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) { 797 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX))) 798 pgt_input_frames(sc, m); 799 } 800 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] = 801 htole32(dirtycount + 802 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX])); 803 804 /* 805 * Write out what we've finished with. 806 */ 807 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 808 sc->sc_cbdmam->dm_mapsize, 809 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 810 } 811 812 struct mbuf * 813 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh, 814 struct mbuf *m, struct ieee80211_node **ni) 815 { 816 struct ieee80211com *ic; 817 struct ieee80211_frame *frame; 818 struct llc *snap; 819 820 ic = &sc->sc_ic; 821 if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) { 822 *ni = ieee80211_ref_node(ic->ic_bss); 823 (*ni)->ni_inact = 0; 824 return (m); 825 } 826 827 M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT); 828 if (m == NULL) 829 return (m); 830 if (m->m_len < sizeof(*frame) + sizeof(*snap)) { 831 m = m_pullup(m, sizeof(*frame) + sizeof(*snap)); 832 if (m == NULL) 833 return (m); 834 } 835 frame = mtod(m, struct ieee80211_frame *); 836 snap = (struct llc *)&frame[1]; 837 if (ni != NULL) { 838 if (ic->ic_opmode == IEEE80211_M_STA) { 839 *ni = ieee80211_ref_node(ic->ic_bss); 840 } 841 #ifndef IEEE80211_STA_ONLY 842 else { 843 *ni = ieee80211_find_node(ic, eh->ether_shost); 844 /* 845 * Make up associations for ad-hoc mode. To support 846 * ad-hoc WPA, we'll need to maintain a bounded 847 * pool of ad-hoc stations. 848 */ 849 if (*ni == NULL && 850 ic->ic_opmode != IEEE80211_M_HOSTAP) { 851 *ni = ieee80211_dup_bss(ic, eh->ether_shost); 852 if (*ni != NULL) { 853 (*ni)->ni_associd = 1; 854 ic->ic_newassoc(ic, *ni, 1); 855 } 856 } 857 if (*ni == NULL) { 858 m_freem(m); 859 return (NULL); 860 } 861 } 862 #endif 863 (*ni)->ni_inact = 0; 864 } 865 snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP; 866 snap->llc_control = LLC_UI; 867 snap->llc_snap.org_code[0] = 0; 868 snap->llc_snap.org_code[1] = 0; 869 snap->llc_snap.org_code[2] = 0; 870 snap->llc_snap.ether_type = eh->ether_type; 871 frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; 872 /* Doesn't look like much of the 802.11 header is available. */ 873 *(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0; 874 /* 875 * Translate the addresses; WDS is not handled. 876 */ 877 switch (ic->ic_opmode) { 878 case IEEE80211_M_STA: 879 frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; 880 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost); 881 IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid); 882 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost); 883 break; 884 #ifndef IEEE80211_STA_ONLY 885 case IEEE80211_M_IBSS: 886 case IEEE80211_M_AHDEMO: 887 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS; 888 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost); 889 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost); 890 IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid); 891 break; 892 case IEEE80211_M_HOSTAP: 893 /* HostAP forwarding defaults to being done on firmware. */ 894 frame->i_fc[1] = IEEE80211_FC1_DIR_TODS; 895 IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid); 896 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost); 897 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost); 898 break; 899 #endif 900 default: 901 break; 902 } 903 return (m); 904 } 905 906 void 907 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m) 908 { 909 struct ether_header eh; 910 struct ifnet *ifp; 911 struct ieee80211_channel *chan; 912 struct ieee80211_rxinfo rxi; 913 struct ieee80211_node *ni; 914 struct ieee80211com *ic; 915 struct pgt_rx_annex *pra; 916 struct pgt_rx_header *pha; 917 struct mbuf *next; 918 unsigned int n; 919 uint32_t rstamp; 920 uint8_t rssi; 921 922 ic = &sc->sc_ic; 923 ifp = &ic->ic_if; 924 for (next = m; m != NULL; m = next) { 925 next = m->m_nextpkt; 926 m->m_nextpkt = NULL; 927 928 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 929 if (m->m_len < sizeof(*pha)) { 930 m = m_pullup(m, sizeof(*pha)); 931 if (m == NULL) { 932 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 933 DPRINTF(("%s: m_pullup " 934 "failure\n", 935 sc->sc_dev.dv_xname)); 936 ifp->if_ierrors++; 937 continue; 938 } 939 } 940 pha = mtod(m, struct pgt_rx_header *); 941 pra = NULL; 942 goto input; 943 } 944 945 if (m->m_len < sizeof(*pra)) { 946 m = m_pullup(m, sizeof(*pra)); 947 if (m == NULL) { 948 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 949 DPRINTF(("%s: m_pullup failure\n", 950 sc->sc_dev.dv_xname)); 951 ifp->if_ierrors++; 952 continue; 953 } 954 } 955 pra = mtod(m, struct pgt_rx_annex *); 956 pha = &pra->pra_header; 957 if (sc->sc_debug & SC_DEBUG_RXANNEX) 958 DPRINTF(("%s: rx annex: ? %04x " 959 "len %u clock %u flags %02x ? %02x rate %u ? %02x " 960 "freq %u ? %04x rssi %u pad %02x%02x%02x\n", 961 sc->sc_dev.dv_xname, 962 letoh16(pha->pra_unknown0), 963 letoh16(pha->pra_length), 964 letoh32(pha->pra_clock), pha->pra_flags, 965 pha->pra_unknown1, pha->pra_rate, 966 pha->pra_unknown2, letoh32(pha->pra_frequency), 967 pha->pra_unknown3, pha->pra_rssi, 968 pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2])); 969 if (sc->sc_debug & SC_DEBUG_RXETHER) 970 DPRINTF(("%s: rx ether: %s < %s 0x%04x\n", 971 sc->sc_dev.dv_xname, 972 ether_sprintf(pra->pra_ether_dhost), 973 ether_sprintf(pra->pra_ether_shost), 974 ntohs(pra->pra_ether_type))); 975 976 memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN); 977 memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN); 978 eh.ether_type = pra->pra_ether_type; 979 980 input: 981 /* 982 * This flag is set if e.g. packet could not be decrypted. 983 */ 984 if (pha->pra_flags & PRA_FLAG_BAD) { 985 ifp->if_ierrors++; 986 m_freem(m); 987 continue; 988 } 989 990 /* 991 * After getting what we want, chop off the annex, then 992 * turn into something that looks like it really was 993 * 802.11. 994 */ 995 rssi = pha->pra_rssi; 996 rstamp = letoh32(pha->pra_clock); 997 n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0); 998 if (n <= IEEE80211_CHAN_MAX) 999 chan = &ic->ic_channels[n]; 1000 else 1001 chan = ic->ic_bss->ni_chan; 1002 /* Send to 802.3 listeners. */ 1003 if (pra) { 1004 m_adj(m, sizeof(*pra)); 1005 } else 1006 m_adj(m, sizeof(*pha)); 1007 1008 m = pgt_ieee80211_encap(sc, &eh, m, &ni); 1009 if (m != NULL) { 1010 #if NBPFILTER > 0 1011 if (sc->sc_drvbpf != NULL) { 1012 struct mbuf mb; 1013 struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap; 1014 1015 tap->wr_flags = 0; 1016 tap->wr_chan_freq = htole16(chan->ic_freq); 1017 tap->wr_chan_flags = htole16(chan->ic_flags); 1018 tap->wr_rssi = rssi; 1019 tap->wr_max_rssi = ic->ic_max_rssi; 1020 1021 mb.m_data = (caddr_t)tap; 1022 mb.m_len = sc->sc_rxtap_len; 1023 mb.m_next = m; 1024 mb.m_nextpkt = NULL; 1025 mb.m_type = 0; 1026 mb.m_flags = 0; 1027 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 1028 } 1029 #endif 1030 rxi.rxi_flags = 0; 1031 ni->ni_rssi = rxi.rxi_rssi = rssi; 1032 ni->ni_rstamp = rxi.rxi_tstamp = rstamp; 1033 ieee80211_input(ifp, m, ni, &rxi); 1034 /* 1035 * The frame may have caused the node to be marked for 1036 * reclamation (e.g. in response to a DEAUTH message) 1037 * so use free_node here instead of unref_node. 1038 */ 1039 if (ni == ic->ic_bss) 1040 ieee80211_unref_node(&ni); 1041 else 1042 ieee80211_release_node(&sc->sc_ic, ni); 1043 } else { 1044 ifp->if_ierrors++; 1045 } 1046 } 1047 } 1048 1049 void 1050 pgt_wakeup_intr(struct pgt_softc *sc) 1051 { 1052 int shouldupdate; 1053 int i; 1054 1055 shouldupdate = 0; 1056 /* Check for any queues being empty before updating. */ 1057 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1058 sc->sc_cbdmam->dm_mapsize, 1059 BUS_DMASYNC_POSTREAD); 1060 for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) { 1061 if (pgt_queue_is_tx(i)) 1062 shouldupdate = pgt_queue_frags_pending(sc, i); 1063 else 1064 shouldupdate = pgt_queue_frags_pending(sc, i) < 1065 sc->sc_freeq_count[i]; 1066 } 1067 if (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) 1068 shouldupdate = 1; 1069 if (sc->sc_debug & SC_DEBUG_POWER) 1070 DPRINTF(("%s: wakeup interrupt (update = %d)\n", 1071 sc->sc_dev.dv_xname, shouldupdate)); 1072 sc->sc_flags &= ~SC_POWERSAVE; 1073 if (shouldupdate) { 1074 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE); 1075 DELAY(PGT_WRITEIO_DELAY); 1076 } 1077 } 1078 1079 void 1080 pgt_sleep_intr(struct pgt_softc *sc) 1081 { 1082 int allowed; 1083 int i; 1084 1085 allowed = 1; 1086 /* Check for any queues not being empty before allowing. */ 1087 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1088 sc->sc_cbdmam->dm_mapsize, 1089 BUS_DMASYNC_POSTREAD); 1090 for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) { 1091 if (pgt_queue_is_tx(i)) 1092 allowed = pgt_queue_frags_pending(sc, i) == 0; 1093 else 1094 allowed = pgt_queue_frags_pending(sc, i) >= 1095 sc->sc_freeq_count[i]; 1096 } 1097 if (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) 1098 allowed = 0; 1099 if (sc->sc_debug & SC_DEBUG_POWER) 1100 DPRINTF(("%s: sleep interrupt (allowed = %d)\n", 1101 sc->sc_dev.dv_xname, allowed)); 1102 if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) { 1103 sc->sc_flags |= SC_POWERSAVE; 1104 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP); 1105 DELAY(PGT_WRITEIO_DELAY); 1106 } 1107 } 1108 1109 void 1110 pgt_empty_traps(struct pgt_softc_kthread *sck) 1111 { 1112 struct pgt_async_trap *pa; 1113 struct mbuf *m; 1114 1115 while (!TAILQ_EMPTY(&sck->sck_traps)) { 1116 pa = TAILQ_FIRST(&sck->sck_traps); 1117 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link); 1118 m = pa->pa_mbuf; 1119 m_freem(m); 1120 } 1121 } 1122 1123 void 1124 pgt_per_device_kthread(void *argp) 1125 { 1126 struct pgt_softc *sc; 1127 struct pgt_softc_kthread *sck; 1128 struct pgt_async_trap *pa; 1129 struct mbuf *m; 1130 int s; 1131 1132 sc = argp; 1133 sck = &sc->sc_kthread; 1134 while (!sck->sck_exit) { 1135 if (!sck->sck_update && !sck->sck_reset && 1136 TAILQ_EMPTY(&sck->sck_traps)) 1137 tsleep(&sc->sc_kthread, 0, "pgtkth", 0); 1138 if (sck->sck_reset) { 1139 DPRINTF(("%s: [thread] async reset\n", 1140 sc->sc_dev.dv_xname)); 1141 sck->sck_reset = 0; 1142 sck->sck_update = 0; 1143 pgt_empty_traps(sck); 1144 s = splnet(); 1145 pgt_stop(sc, SC_NEEDS_RESET); 1146 splx(s); 1147 } else if (!TAILQ_EMPTY(&sck->sck_traps)) { 1148 DPRINTF(("%s: [thread] got a trap\n", 1149 sc->sc_dev.dv_xname)); 1150 pa = TAILQ_FIRST(&sck->sck_traps); 1151 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link); 1152 m = pa->pa_mbuf; 1153 m_adj(m, sizeof(*pa)); 1154 pgt_update_sw_from_hw(sc, pa, m); 1155 m_freem(m); 1156 } else if (sck->sck_update) { 1157 sck->sck_update = 0; 1158 pgt_update_sw_from_hw(sc, NULL, NULL); 1159 } 1160 } 1161 pgt_empty_traps(sck); 1162 kthread_exit(0); 1163 } 1164 1165 void 1166 pgt_async_reset(struct pgt_softc *sc) 1167 { 1168 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) 1169 return; 1170 sc->sc_kthread.sck_reset = 1; 1171 wakeup(&sc->sc_kthread); 1172 } 1173 1174 void 1175 pgt_async_update(struct pgt_softc *sc) 1176 { 1177 if (sc->sc_flags & SC_DYING) 1178 return; 1179 sc->sc_kthread.sck_update = 1; 1180 wakeup(&sc->sc_kthread); 1181 } 1182 1183 int 1184 pgt_intr(void *arg) 1185 { 1186 struct pgt_softc *sc; 1187 struct ifnet *ifp; 1188 u_int32_t reg; 1189 1190 sc = arg; 1191 ifp = &sc->sc_ic.ic_if; 1192 1193 /* 1194 * Here the Linux driver ands in the value of the INT_EN register, 1195 * and masks off everything but the documented interrupt bits. Why? 1196 * 1197 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some 1198 * other times. 1199 */ 1200 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON && 1201 sc->sc_flags & SC_POWERSAVE) { 1202 /* 1203 * Don't try handling the interrupt in sleep mode. 1204 */ 1205 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 1206 if (reg & PGT_CTRL_STAT_SLEEPMODE) 1207 return (0); 1208 } 1209 reg = pgt_read_4(sc, PGT_REG_INT_STAT); 1210 if (reg == 0) 1211 return (0); /* This interrupt is not from us */ 1212 1213 pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg); 1214 if (reg & PGT_INT_STAT_INIT) 1215 pgt_init_intr(sc); 1216 if (reg & PGT_INT_STAT_UPDATE) { 1217 pgt_update_intr(sc, 0); 1218 /* 1219 * If we got an update, it's not really asleep. 1220 */ 1221 sc->sc_flags &= ~SC_POWERSAVE; 1222 /* 1223 * Pretend I have any idea what the documentation 1224 * would say, and just give it a shot sending an 1225 * "update" after acknowledging the interrupt 1226 * bits and writing out the new control block. 1227 */ 1228 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE); 1229 DELAY(PGT_WRITEIO_DELAY); 1230 } 1231 if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP)) 1232 pgt_sleep_intr(sc); 1233 if (reg & PGT_INT_STAT_WAKEUP) 1234 pgt_wakeup_intr(sc); 1235 1236 if (sc->sc_flags & SC_INTR_RESET) { 1237 sc->sc_flags &= ~SC_INTR_RESET; 1238 pgt_async_reset(sc); 1239 } 1240 1241 if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) { 1242 DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n", 1243 sc->sc_dev.dv_xname, 1244 reg & ~PGT_INT_STAT_SOURCES, 1245 pgt_read_4(sc, PGT_REG_CTRL_STAT))); 1246 } 1247 1248 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1249 pgt_start(ifp); 1250 1251 return (1); 1252 } 1253 1254 void 1255 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq) 1256 { 1257 struct pgt_desc *pd; 1258 1259 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]); 1260 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link); 1261 sc->sc_dirtyq_count[pq]--; 1262 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 1263 sc->sc_freeq_count[pq]++; 1264 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, 1265 pd->pd_dmam->dm_mapsize, 1266 BUS_DMASYNC_POSTREAD); 1267 /* Management frames want completion information. */ 1268 if (sc->sc_debug & SC_DEBUG_QUEUES) { 1269 DPRINTF(("%s: queue: tx %u <- [%u]\n", 1270 sc->sc_dev.dv_xname, pd->pd_fragnum, pq)); 1271 if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) { 1272 struct pgt_mgmt_frame *pmf; 1273 1274 pmf = (struct pgt_mgmt_frame *)pd->pd_mem; 1275 DPRINTF(("%s: queue: txmgmt %p <- " 1276 "(ver %u, op %u, flags %#x)\n", 1277 sc->sc_dev.dv_xname, 1278 pd, pmf->pmf_version, pmf->pmf_operation, 1279 pmf->pmf_flags)); 1280 } 1281 } 1282 pgt_unload_tx_desc_frag(sc, pd); 1283 } 1284 1285 void 1286 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq) 1287 { 1288 struct pgt_desc *pd; 1289 1290 pd = TAILQ_FIRST(&sc->sc_freeq[pq]); 1291 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link); 1292 sc->sc_freeq_count[pq]--; 1293 TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link); 1294 sc->sc_dirtyq_count[pq]++; 1295 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, 1296 pd->pd_dmam->dm_mapsize, 1297 BUS_DMASYNC_POSTREAD); 1298 if (sc->sc_debug & SC_DEBUG_QUEUES) 1299 DPRINTF(("%s: queue: rx %u <- [%u]\n", 1300 sc->sc_dev.dv_xname, pd->pd_fragnum, pq)); 1301 if (sc->sc_debug & SC_DEBUG_UNEXPECTED && 1302 pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF)) 1303 DPRINTF(("%s: unknown flags on rx [%u]: %#x\n", 1304 sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags))); 1305 } 1306 1307 /* 1308 * Traps are generally used for the firmware to report changes in state 1309 * back to the host. Mostly this processes changes in link state, but 1310 * it needs to also be used to initiate WPA and other authentication 1311 * schemes in terms of client (station) or server (access point). 1312 */ 1313 void 1314 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata, 1315 size_t size) 1316 { 1317 struct pgt_async_trap *pa; 1318 struct mbuf *m; 1319 char *p; 1320 size_t total; 1321 1322 if (sc->sc_flags & SC_DYING) 1323 return; 1324 1325 total = sizeof(oid) + size + sizeof(struct pgt_async_trap); 1326 if (total > MLEN) { 1327 MGETHDR(m, M_DONTWAIT, MT_DATA); 1328 if (m == NULL) 1329 return; 1330 MCLGET(m, M_DONTWAIT); 1331 if (!(m->m_flags & M_EXT)) { 1332 m_freem(m); 1333 m = NULL; 1334 } 1335 } else 1336 m = m_get(M_DONTWAIT, MT_DATA); 1337 1338 if (m == NULL) 1339 return; 1340 else 1341 m->m_len = total; 1342 1343 pa = mtod(m, struct pgt_async_trap *); 1344 p = mtod(m, char *) + sizeof(*pa); 1345 *(uint32_t *)p = oid; 1346 p += sizeof(uint32_t); 1347 memcpy(p, trapdata, size); 1348 pa->pa_mbuf = m; 1349 1350 TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link); 1351 wakeup(&sc->sc_kthread); 1352 } 1353 1354 /* 1355 * Process a completed management response (all requests should be 1356 * responded to, quickly) or an event (trap). 1357 */ 1358 void 1359 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd) 1360 { 1361 struct pgt_desc *pd; 1362 struct pgt_mgmt_frame *pmf; 1363 uint32_t oid, size; 1364 1365 pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]); 1366 TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link); 1367 sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--; 1368 TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX], 1369 pd, pd_link); 1370 sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++; 1371 if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) { 1372 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1373 DPRINTF(("%s: mgmt desc too small: %u\n", 1374 sc->sc_dev.dv_xname, 1375 letoh16(pd->pd_fragp->pf_size))); 1376 goto out_nopmd; 1377 } 1378 pmf = (struct pgt_mgmt_frame *)pd->pd_mem; 1379 if (pmf->pmf_version != PMF_VER) { 1380 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1381 DPRINTF(("%s: unknown mgmt version %u\n", 1382 sc->sc_dev.dv_xname, pmf->pmf_version)); 1383 goto out_nopmd; 1384 } 1385 if (pmf->pmf_device != PMF_DEV) { 1386 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1387 DPRINTF(("%s: unknown mgmt dev %u\n", 1388 sc->sc_dev.dv_xname, pmf->pmf_device)); 1389 goto out; 1390 } 1391 if (pmf->pmf_flags & ~PMF_FLAG_VALID) { 1392 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1393 DPRINTF(("%s: unknown mgmt flags %x\n", 1394 sc->sc_dev.dv_xname, 1395 pmf->pmf_flags & ~PMF_FLAG_VALID)); 1396 goto out; 1397 } 1398 if (pmf->pmf_flags & PMF_FLAG_LE) { 1399 oid = letoh32(pmf->pmf_oid); 1400 size = letoh32(pmf->pmf_size); 1401 } else { 1402 oid = betoh32(pmf->pmf_oid); 1403 size = betoh32(pmf->pmf_size); 1404 } 1405 if (pmf->pmf_operation == PMF_OP_TRAP) { 1406 pmd = NULL; /* ignored */ 1407 DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n", 1408 sc->sc_dev.dv_xname, 1409 pmf->pmf_operation, oid, size)); 1410 pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf), 1411 min(size, PGT_FRAG_SIZE - sizeof(*pmf))); 1412 goto out_nopmd; 1413 } 1414 if (pmd == NULL) { 1415 if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT)) 1416 DPRINTF(("%s: spurious mgmt received " 1417 "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname, 1418 pmf->pmf_operation, oid, size)); 1419 goto out_nopmd; 1420 } 1421 switch (pmf->pmf_operation) { 1422 case PMF_OP_RESPONSE: 1423 pmd->pmd_error = 0; 1424 break; 1425 case PMF_OP_ERROR: 1426 pmd->pmd_error = EPERM; 1427 goto out; 1428 default: 1429 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1430 DPRINTF(("%s: unknown mgmt op %u\n", 1431 sc->sc_dev.dv_xname, pmf->pmf_operation)); 1432 pmd->pmd_error = EIO; 1433 goto out; 1434 } 1435 if (oid != pmd->pmd_oid) { 1436 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1437 DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n", 1438 sc->sc_dev.dv_xname, pmd->pmd_oid, oid)); 1439 pmd->pmd_oid = oid; 1440 } 1441 if (pmd->pmd_recvbuf != NULL) { 1442 if (size > PGT_FRAG_SIZE) { 1443 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1444 DPRINTF(("%s: mgmt oid %#x has bad size %u\n", 1445 sc->sc_dev.dv_xname, oid, size)); 1446 pmd->pmd_error = EIO; 1447 goto out; 1448 } 1449 if (size > pmd->pmd_len) 1450 pmd->pmd_error = ENOMEM; 1451 else 1452 memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf), 1453 size); 1454 pmd->pmd_len = size; 1455 } 1456 1457 out: 1458 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 1459 wakeup_one(pmd); 1460 if (sc->sc_debug & SC_DEBUG_MGMT) 1461 DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n", 1462 sc->sc_dev.dv_xname, pmd, pmf->pmf_operation, 1463 pmd->pmd_oid, pmd->pmd_len)); 1464 out_nopmd: 1465 pgt_reinit_rx_desc_frag(sc, pd); 1466 } 1467 1468 /* 1469 * Queue packets for reception and defragmentation. I don't know now 1470 * whether the rx queue being full enough to start, but not finish, 1471 * queueing a fragmented packet, can happen. 1472 */ 1473 struct mbuf * 1474 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq) 1475 { 1476 struct ifnet *ifp; 1477 struct pgt_desc *pd; 1478 struct mbuf *top, **mp, *m; 1479 size_t datalen; 1480 uint16_t morefrags, dataoff; 1481 int tlen = 0; 1482 1483 ifp = &sc->sc_ic.ic_if; 1484 m = NULL; 1485 top = NULL; 1486 mp = ⊤ 1487 1488 while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) { 1489 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link); 1490 sc->sc_dirtyq_count[pq]--; 1491 datalen = letoh16(pd->pd_fragp->pf_size); 1492 dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr; 1493 morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF); 1494 1495 if (sc->sc_debug & SC_DEBUG_RXFRAG) 1496 DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n", 1497 sc->sc_dev.dv_xname, datalen, dataoff, 1498 pd->pd_fragp->pf_flags)); 1499 1500 /* Add the (two+?) bytes for the header. */ 1501 if (datalen + dataoff > PGT_FRAG_SIZE) { 1502 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1503 DPRINTF(("%s data rx too big: %u\n", 1504 sc->sc_dev.dv_xname, datalen)); 1505 goto fail; 1506 } 1507 1508 if (m == NULL) 1509 MGETHDR(m, M_DONTWAIT, MT_DATA); 1510 else 1511 m = m_get(M_DONTWAIT, MT_DATA); 1512 1513 if (m == NULL) 1514 goto fail; 1515 if (datalen > MHLEN) { 1516 MCLGET(m, M_DONTWAIT); 1517 if (!(m->m_flags & M_EXT)) { 1518 m_free(m); 1519 goto fail; 1520 } 1521 } 1522 bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen); 1523 m->m_len = datalen; 1524 tlen += datalen; 1525 1526 *mp = m; 1527 mp = &m->m_next; 1528 1529 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 1530 sc->sc_freeq_count[pq]++; 1531 pgt_reinit_rx_desc_frag(sc, pd); 1532 1533 if (!morefrags) 1534 break; 1535 } 1536 1537 if (top) { 1538 top->m_pkthdr.len = tlen; 1539 top->m_pkthdr.rcvif = ifp; 1540 } 1541 return (top); 1542 1543 fail: 1544 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 1545 sc->sc_freeq_count[pq]++; 1546 pgt_reinit_rx_desc_frag(sc, pd); 1547 1548 ifp->if_ierrors++; 1549 if (top) 1550 m_freem(top); 1551 return (NULL); 1552 } 1553 1554 int 1555 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid, 1556 void *arg, size_t arglen) 1557 { 1558 struct pgt_mgmt_desc pmd; 1559 int error; 1560 1561 bzero(&pmd, sizeof(pmd)); 1562 pmd.pmd_recvbuf = arg; 1563 pmd.pmd_len = arglen; 1564 pmd.pmd_oid = oid; 1565 1566 error = pgt_mgmt_request(sc, &pmd); 1567 if (error == 0) 1568 error = pmd.pmd_error; 1569 if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED) 1570 DPRINTF(("%s: failure getting oid %#x: %d\n", 1571 sc->sc_dev.dv_xname, oid, error)); 1572 1573 return (error); 1574 } 1575 1576 int 1577 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid, 1578 void *arg, size_t arglen) 1579 { 1580 struct pgt_mgmt_desc pmd; 1581 int error; 1582 1583 bzero(&pmd, sizeof(pmd)); 1584 pmd.pmd_sendbuf = arg; 1585 pmd.pmd_recvbuf = arg; 1586 pmd.pmd_len = arglen; 1587 pmd.pmd_oid = oid; 1588 1589 error = pgt_mgmt_request(sc, &pmd); 1590 if (error == 0) 1591 error = pmd.pmd_error; 1592 if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED) 1593 DPRINTF(("%s: failure retrieving oid %#x: %d\n", 1594 sc->sc_dev.dv_xname, oid, error)); 1595 1596 return (error); 1597 } 1598 1599 int 1600 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid, 1601 const void *arg, size_t arglen) 1602 { 1603 struct pgt_mgmt_desc pmd; 1604 int error; 1605 1606 bzero(&pmd, sizeof(pmd)); 1607 pmd.pmd_sendbuf = arg; 1608 pmd.pmd_len = arglen; 1609 pmd.pmd_oid = oid; 1610 1611 error = pgt_mgmt_request(sc, &pmd); 1612 if (error == 0) 1613 error = pmd.pmd_error; 1614 if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED) 1615 DPRINTF(("%s: failure setting oid %#x: %d\n", 1616 sc->sc_dev.dv_xname, oid, error)); 1617 1618 return (error); 1619 } 1620 1621 void 1622 pgt_state_dump(struct pgt_softc *sc) 1623 { 1624 printf("%s: state dump: control 0x%08x interrupt 0x%08x\n", 1625 sc->sc_dev.dv_xname, 1626 pgt_read_4(sc, PGT_REG_CTRL_STAT), 1627 pgt_read_4(sc, PGT_REG_INT_STAT)); 1628 1629 printf("%s: state dump: driver curfrag[]\n", 1630 sc->sc_dev.dv_xname); 1631 1632 printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1633 sc->sc_dev.dv_xname, 1634 letoh32(sc->sc_cb->pcb_driver_curfrag[0]), 1635 letoh32(sc->sc_cb->pcb_driver_curfrag[1]), 1636 letoh32(sc->sc_cb->pcb_driver_curfrag[2]), 1637 letoh32(sc->sc_cb->pcb_driver_curfrag[3]), 1638 letoh32(sc->sc_cb->pcb_driver_curfrag[4]), 1639 letoh32(sc->sc_cb->pcb_driver_curfrag[5])); 1640 1641 printf("%s: state dump: device curfrag[]\n", 1642 sc->sc_dev.dv_xname); 1643 1644 printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1645 sc->sc_dev.dv_xname, 1646 letoh32(sc->sc_cb->pcb_device_curfrag[0]), 1647 letoh32(sc->sc_cb->pcb_device_curfrag[1]), 1648 letoh32(sc->sc_cb->pcb_device_curfrag[2]), 1649 letoh32(sc->sc_cb->pcb_device_curfrag[3]), 1650 letoh32(sc->sc_cb->pcb_device_curfrag[4]), 1651 letoh32(sc->sc_cb->pcb_device_curfrag[5])); 1652 } 1653 1654 int 1655 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd) 1656 { 1657 struct pgt_desc *pd; 1658 struct pgt_mgmt_frame *pmf; 1659 int error, i; 1660 1661 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) 1662 return (EIO); 1663 if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf)) 1664 return (ENOMEM); 1665 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]); 1666 if (pd == NULL) 1667 return (ENOMEM); 1668 error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd); 1669 if (error) 1670 return (error); 1671 pmf = (struct pgt_mgmt_frame *)pd->pd_mem; 1672 pmf->pmf_version = PMF_VER; 1673 /* "get" and "retrieve" operations look the same */ 1674 if (pmd->pmd_recvbuf != NULL) 1675 pmf->pmf_operation = PMF_OP_GET; 1676 else 1677 pmf->pmf_operation = PMF_OP_SET; 1678 pmf->pmf_oid = htobe32(pmd->pmd_oid); 1679 pmf->pmf_device = PMF_DEV; 1680 pmf->pmf_flags = 0; 1681 pmf->pmf_size = htobe32(pmd->pmd_len); 1682 /* "set" and "retrieve" operations both send data */ 1683 if (pmd->pmd_sendbuf != NULL) 1684 memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len); 1685 else 1686 bzero(pmf + 1, pmd->pmd_len); 1687 pmd->pmd_error = EINPROGRESS; 1688 TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link); 1689 if (sc->sc_debug & SC_DEBUG_MGMT) 1690 DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n", 1691 sc->sc_dev.dv_xname, 1692 pmd, pmf->pmf_operation, 1693 pmd->pmd_oid, pmd->pmd_len)); 1694 pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd, 1695 sizeof(*pmf) + pmd->pmd_len, 0); 1696 /* 1697 * Try for one second, triggering 10 times. 1698 * 1699 * Do our best to work around seemingly buggy CardBus controllers 1700 * on Soekris 4521 that fail to get interrupts with alarming 1701 * regularity: run as if an interrupt occurred and service every 1702 * queue except for mbuf reception. 1703 */ 1704 i = 0; 1705 do { 1706 if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK) 1707 break; 1708 if (pmd->pmd_error != EINPROGRESS) 1709 break; 1710 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) { 1711 pmd->pmd_error = EIO; 1712 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 1713 break; 1714 } 1715 if (i != 9) 1716 pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX); 1717 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY 1718 pgt_update_intr(sc, 0); 1719 #endif 1720 } while (i++ < 10); 1721 1722 if (pmd->pmd_error == EINPROGRESS) { 1723 printf("%s: timeout waiting for management " 1724 "packet response to %#x\n", 1725 sc->sc_dev.dv_xname, pmd->pmd_oid); 1726 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link); 1727 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1728 pgt_state_dump(sc); 1729 pgt_async_reset(sc); 1730 error = ETIMEDOUT; 1731 } else 1732 error = 0; 1733 1734 return (error); 1735 } 1736 1737 void 1738 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd, 1739 uint16_t len, int morecoming) 1740 { 1741 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link); 1742 sc->sc_freeq_count[pq]--; 1743 TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link); 1744 sc->sc_dirtyq_count[pq]++; 1745 if (sc->sc_debug & SC_DEBUG_QUEUES) 1746 DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname, 1747 pd->pd_fragnum, pq)); 1748 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1749 sc->sc_cbdmam->dm_mapsize, 1750 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 1751 if (morecoming) 1752 pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF); 1753 pd->pd_fragp->pf_size = htole16(len); 1754 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, 1755 pd->pd_dmam->dm_mapsize, 1756 BUS_DMASYNC_POSTWRITE); 1757 sc->sc_cb->pcb_driver_curfrag[pq] = 1758 htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1); 1759 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 1760 sc->sc_cbdmam->dm_mapsize, 1761 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 1762 if (!morecoming) 1763 pgt_maybe_trigger(sc, pq); 1764 } 1765 1766 void 1767 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq) 1768 { 1769 unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */ 1770 uint32_t reg; 1771 1772 if (sc->sc_debug & SC_DEBUG_TRIGGER) 1773 DPRINTF(("%s: triggered by queue [%u]\n", 1774 sc->sc_dev.dv_xname, pq)); 1775 pgt_debug_events(sc, "trig"); 1776 if (sc->sc_flags & SC_POWERSAVE) { 1777 /* Magic values ahoy? */ 1778 if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) { 1779 do { 1780 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT); 1781 if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) 1782 DELAY(PGT_WRITEIO_DELAY); 1783 } while (tries-- != 0); 1784 if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) { 1785 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 1786 DPRINTF(("%s: timeout triggering from " 1787 "sleep mode\n", 1788 sc->sc_dev.dv_xname)); 1789 pgt_async_reset(sc); 1790 return; 1791 } 1792 } 1793 pgt_write_4_flush(sc, PGT_REG_DEV_INT, 1794 PGT_DEV_INT_WAKEUP); 1795 DELAY(PGT_WRITEIO_DELAY); 1796 /* read the status back in */ 1797 (void)pgt_read_4(sc, PGT_REG_CTRL_STAT); 1798 DELAY(PGT_WRITEIO_DELAY); 1799 } else { 1800 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE); 1801 DELAY(PGT_WRITEIO_DELAY); 1802 } 1803 } 1804 1805 struct ieee80211_node * 1806 pgt_ieee80211_node_alloc(struct ieee80211com *ic) 1807 { 1808 struct pgt_ieee80211_node *pin; 1809 1810 pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO); 1811 if (pin != NULL) { 1812 pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED; 1813 } 1814 return (struct ieee80211_node *)pin; 1815 } 1816 1817 void 1818 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, 1819 int reallynew) 1820 { 1821 ieee80211_ref_node(ni); 1822 } 1823 1824 void 1825 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni) 1826 { 1827 struct pgt_ieee80211_node *pin; 1828 1829 pin = (struct pgt_ieee80211_node *)ni; 1830 free(pin, M_DEVBUF, 0); 1831 } 1832 1833 void 1834 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst, 1835 const struct ieee80211_node *src) 1836 { 1837 const struct pgt_ieee80211_node *psrc; 1838 struct pgt_ieee80211_node *pdst; 1839 1840 psrc = (const struct pgt_ieee80211_node *)src; 1841 pdst = (struct pgt_ieee80211_node *)dst; 1842 bcopy(psrc, pdst, sizeof(*psrc)); 1843 } 1844 1845 int 1846 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni, 1847 int type, int arg1, int arg2) 1848 { 1849 return (EOPNOTSUPP); 1850 } 1851 1852 int 1853 pgt_net_attach(struct pgt_softc *sc) 1854 { 1855 struct ieee80211com *ic = &sc->sc_ic; 1856 struct ifnet *ifp = &ic->ic_if; 1857 struct ieee80211_rateset *rs; 1858 uint8_t rates[IEEE80211_RATE_MAXSIZE]; 1859 struct pgt_obj_buffer psbuffer; 1860 struct pgt_obj_frequencies *freqs; 1861 uint32_t phymode, country; 1862 unsigned int chan, i, j, firstchan = -1; 1863 int error; 1864 1865 psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT); 1866 psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr); 1867 error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country)); 1868 if (error) 1869 return (error); 1870 error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode)); 1871 if (error) 1872 return (error); 1873 error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr, 1874 sizeof(ic->ic_myaddr)); 1875 if (error) 1876 return (error); 1877 error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country)); 1878 if (error) 1879 return (error); 1880 1881 ifp->if_softc = sc; 1882 ifp->if_ioctl = pgt_ioctl; 1883 ifp->if_start = pgt_start; 1884 ifp->if_watchdog = pgt_watchdog; 1885 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 1886 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 1887 1888 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1889 IFQ_SET_READY(&ifp->if_snd); 1890 1891 /* 1892 * Set channels 1893 * 1894 * Prism hardware likes to report supported frequencies that are 1895 * not actually available for the country of origin. 1896 */ 1897 j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t); 1898 freqs = malloc(j, M_DEVBUF, M_WAITOK); 1899 error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j); 1900 if (error) { 1901 free(freqs, M_DEVBUF, 0); 1902 return (error); 1903 } 1904 1905 for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) { 1906 chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]), 1907 0); 1908 1909 if (chan > IEEE80211_CHAN_MAX) { 1910 printf("%s: reported bogus channel (%uMHz)\n", 1911 sc->sc_dev.dv_xname, chan); 1912 free(freqs, M_DEVBUF, 0); 1913 return (EIO); 1914 } 1915 1916 if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) { 1917 if (!(phymode & htole32(PGT_OID_PHY_2400MHZ))) 1918 continue; 1919 if (country == letoh32(PGT_COUNTRY_USA)) { 1920 if (chan >= 12 && chan <= 14) 1921 continue; 1922 } 1923 if (chan <= 14) 1924 ic->ic_channels[chan].ic_flags |= 1925 IEEE80211_CHAN_B; 1926 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG; 1927 } else { 1928 if (!(phymode & htole32(PGT_OID_PHY_5000MHZ))) 1929 continue; 1930 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A; 1931 } 1932 1933 ic->ic_channels[chan].ic_freq = 1934 letoh16(freqs->pof_freqlist_mhz[i]); 1935 1936 if (firstchan == -1) 1937 firstchan = chan; 1938 1939 DPRINTF(("%s: set channel %d to freq %uMHz\n", 1940 sc->sc_dev.dv_xname, chan, 1941 letoh16(freqs->pof_freqlist_mhz[i]))); 1942 } 1943 free(freqs, M_DEVBUF, 0); 1944 if (firstchan == -1) { 1945 printf("%s: no channels found\n", sc->sc_dev.dv_xname); 1946 return (EIO); 1947 } 1948 1949 /* 1950 * Set rates 1951 */ 1952 bzero(rates, sizeof(rates)); 1953 error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates)); 1954 if (error) 1955 return (error); 1956 for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) { 1957 switch (rates[i]) { 1958 case 2: 1959 case 4: 1960 case 11: 1961 case 22: 1962 case 44: /* maybe */ 1963 if (phymode & htole32(PGT_OID_PHY_2400MHZ)) { 1964 rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; 1965 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1966 } 1967 default: 1968 if (phymode & htole32(PGT_OID_PHY_2400MHZ)) { 1969 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 1970 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1971 } 1972 if (phymode & htole32(PGT_OID_PHY_5000MHZ)) { 1973 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 1974 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1975 } 1976 rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO]; 1977 rs->rs_rates[rs->rs_nrates++] = rates[i]; 1978 } 1979 } 1980 1981 ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT | 1982 IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR; 1983 #ifndef IEEE80211_STA_ONLY 1984 ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP; 1985 #endif 1986 ic->ic_opmode = IEEE80211_M_STA; 1987 ic->ic_state = IEEE80211_S_INIT; 1988 1989 if_attach(ifp); 1990 ieee80211_ifattach(ifp); 1991 1992 /* setup post-attach/pre-lateattach vector functions */ 1993 sc->sc_newstate = ic->ic_newstate; 1994 ic->ic_newstate = pgt_newstate; 1995 ic->ic_node_alloc = pgt_ieee80211_node_alloc; 1996 ic->ic_newassoc = pgt_ieee80211_newassoc; 1997 ic->ic_node_free = pgt_ieee80211_node_free; 1998 ic->ic_node_copy = pgt_ieee80211_node_copy; 1999 ic->ic_send_mgmt = pgt_ieee80211_send_mgmt; 2000 ic->ic_max_rssi = 255; /* rssi is a u_int8_t */ 2001 2002 /* let net80211 handle switching around the media + resetting */ 2003 ieee80211_media_init(ifp, pgt_media_change, pgt_media_status); 2004 2005 #if NBPFILTER > 0 2006 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 2007 sizeof(struct ieee80211_frame) + 64); 2008 2009 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 2010 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 2011 sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT); 2012 2013 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 2014 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 2015 sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT); 2016 #endif 2017 return (0); 2018 } 2019 2020 int 2021 pgt_media_change(struct ifnet *ifp) 2022 { 2023 struct pgt_softc *sc = ifp->if_softc; 2024 int error; 2025 2026 error = ieee80211_media_change(ifp); 2027 if (error == ENETRESET) { 2028 pgt_update_hw_from_sw(sc, 0, 0); 2029 error = 0; 2030 } 2031 2032 return (error); 2033 } 2034 2035 void 2036 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr) 2037 { 2038 struct pgt_softc *sc = ifp->if_softc; 2039 struct ieee80211com *ic = &sc->sc_ic; 2040 uint32_t rate; 2041 int s; 2042 2043 imr->ifm_status = 0; 2044 imr->ifm_active = IFM_IEEE80211 | IFM_NONE; 2045 2046 if (!(ifp->if_flags & IFF_UP)) 2047 return; 2048 2049 s = splnet(); 2050 2051 if (ic->ic_fixed_rate != -1) { 2052 rate = ic->ic_sup_rates[ic->ic_curmode]. 2053 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 2054 } else { 2055 if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate))) 2056 goto out; 2057 rate = letoh32(rate); 2058 if (sc->sc_debug & SC_DEBUG_LINK) { 2059 DPRINTF(("%s: %s: link rate %u\n", 2060 sc->sc_dev.dv_xname, __func__, rate)); 2061 } 2062 if (rate == 0) 2063 goto out; 2064 } 2065 2066 imr->ifm_status = IFM_AVALID; 2067 imr->ifm_active = IFM_IEEE80211; 2068 if (ic->ic_state == IEEE80211_S_RUN) 2069 imr->ifm_status |= IFM_ACTIVE; 2070 2071 imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode); 2072 2073 switch (ic->ic_opmode) { 2074 case IEEE80211_M_STA: 2075 break; 2076 #ifndef IEEE80211_STA_ONLY 2077 case IEEE80211_M_IBSS: 2078 imr->ifm_active |= IFM_IEEE80211_ADHOC; 2079 break; 2080 case IEEE80211_M_AHDEMO: 2081 imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0; 2082 break; 2083 case IEEE80211_M_HOSTAP: 2084 imr->ifm_active |= IFM_IEEE80211_HOSTAP; 2085 break; 2086 #endif 2087 case IEEE80211_M_MONITOR: 2088 imr->ifm_active |= IFM_IEEE80211_MONITOR; 2089 break; 2090 default: 2091 break; 2092 } 2093 2094 out: 2095 splx(s); 2096 } 2097 2098 /* 2099 * Start data frames. Critical sections surround the boundary of 2100 * management frame transmission / transmission acknowledgement / response 2101 * and data frame transmission / transmission acknowledgement. 2102 */ 2103 void 2104 pgt_start(struct ifnet *ifp) 2105 { 2106 struct pgt_softc *sc; 2107 struct ieee80211com *ic; 2108 struct pgt_desc *pd; 2109 struct mbuf *m; 2110 int error; 2111 2112 sc = ifp->if_softc; 2113 ic = &sc->sc_ic; 2114 2115 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) || 2116 !(ifp->if_flags & IFF_RUNNING) || 2117 ic->ic_state != IEEE80211_S_RUN) { 2118 return; 2119 } 2120 2121 /* 2122 * Management packets should probably be MLME frames 2123 * (i.e. hostap "managed" mode); we don't touch the 2124 * net80211 management queue. 2125 */ 2126 for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] < 2127 PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) { 2128 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]); 2129 IFQ_POLL(&ifp->if_snd, m); 2130 if (m == NULL) 2131 break; 2132 if (m->m_pkthdr.len <= PGT_FRAG_SIZE) { 2133 error = pgt_load_tx_desc_frag(sc, 2134 PGT_QUEUE_DATA_LOW_TX, pd); 2135 if (error) 2136 break; 2137 IFQ_DEQUEUE(&ifp->if_snd, m); 2138 m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem); 2139 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX, 2140 pd, m->m_pkthdr.len, 0); 2141 } else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) { 2142 struct pgt_desc *pd2; 2143 2144 /* 2145 * Transmit a fragmented frame if there is 2146 * not enough room in one fragment; limit 2147 * to two fragments (802.11 itself couldn't 2148 * even support a full two.) 2149 */ 2150 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 > 2151 PGT_QUEUE_FULL_THRESHOLD) 2152 break; 2153 pd2 = TAILQ_NEXT(pd, pd_link); 2154 error = pgt_load_tx_desc_frag(sc, 2155 PGT_QUEUE_DATA_LOW_TX, pd); 2156 if (error == 0) { 2157 error = pgt_load_tx_desc_frag(sc, 2158 PGT_QUEUE_DATA_LOW_TX, pd2); 2159 if (error) { 2160 pgt_unload_tx_desc_frag(sc, pd); 2161 TAILQ_INSERT_HEAD(&sc->sc_freeq[ 2162 PGT_QUEUE_DATA_LOW_TX], pd, 2163 pd_link); 2164 } 2165 } 2166 if (error) 2167 break; 2168 IFQ_DEQUEUE(&ifp->if_snd, m); 2169 m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem); 2170 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX, 2171 pd, PGT_FRAG_SIZE, 1); 2172 m_copydata(m, PGT_FRAG_SIZE, 2173 m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem); 2174 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX, 2175 pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0); 2176 } else { 2177 IFQ_DEQUEUE(&ifp->if_snd, m); 2178 ifp->if_oerrors++; 2179 m_freem(m); 2180 m = NULL; 2181 } 2182 if (m != NULL) { 2183 struct ieee80211_node *ni; 2184 #if NBPFILTER > 0 2185 if (ifp->if_bpf != NULL) 2186 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 2187 #endif 2188 ifp->if_opackets++; 2189 ifp->if_timer = 1; 2190 sc->sc_txtimer = 5; 2191 ni = ieee80211_find_txnode(&sc->sc_ic, 2192 mtod(m, struct ether_header *)->ether_dhost); 2193 if (ni != NULL) { 2194 ni->ni_inact = 0; 2195 if (ni != ic->ic_bss) 2196 ieee80211_release_node(&sc->sc_ic, ni); 2197 } 2198 #if NBPFILTER > 0 2199 if (sc->sc_drvbpf != NULL) { 2200 struct mbuf mb; 2201 struct ether_header eh; 2202 struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap; 2203 2204 bcopy(mtod(m, struct ether_header *), &eh, 2205 sizeof(eh)); 2206 m_adj(m, sizeof(eh)); 2207 m = pgt_ieee80211_encap(sc, &eh, m, NULL); 2208 2209 tap->wt_flags = 0; 2210 //tap->wt_rate = rate; 2211 tap->wt_rate = 0; 2212 tap->wt_chan_freq = 2213 htole16(ic->ic_bss->ni_chan->ic_freq); 2214 tap->wt_chan_flags = 2215 htole16(ic->ic_bss->ni_chan->ic_flags); 2216 2217 if (m != NULL) { 2218 mb.m_data = (caddr_t)tap; 2219 mb.m_len = sc->sc_txtap_len; 2220 mb.m_next = m; 2221 mb.m_nextpkt = NULL; 2222 mb.m_type = 0; 2223 mb.m_flags = 0; 2224 2225 bpf_mtap(sc->sc_drvbpf, &mb, 2226 BPF_DIRECTION_OUT); 2227 } 2228 } 2229 #endif 2230 if (m != NULL) 2231 m_freem(m); 2232 } 2233 } 2234 } 2235 2236 int 2237 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req) 2238 { 2239 struct pgt_softc *sc = ifp->if_softc; 2240 struct ifaddr *ifa; 2241 struct ifreq *ifr; 2242 struct wi_req *wreq; 2243 struct ieee80211_nodereq_all *na; 2244 struct ieee80211com *ic; 2245 struct pgt_obj_bsslist *pob; 2246 struct wi_scan_p2_hdr *p2hdr; 2247 struct wi_scan_res *res; 2248 uint32_t noise; 2249 int maxscan, i, j, s, error = 0; 2250 2251 ic = &sc->sc_ic; 2252 ifr = (struct ifreq *)req; 2253 2254 s = splnet(); 2255 switch (cmd) { 2256 case SIOCS80211SCAN: 2257 /* 2258 * This chip scans always as soon as it gets initialized. 2259 */ 2260 2261 /* 2262 * Give us a bit time to scan in case we were not 2263 * initialized before and let the userland process wait. 2264 */ 2265 tsleep(&sc->sc_flags, 0, "pgtsca", hz * SCAN_TIMEOUT); 2266 2267 break; 2268 case SIOCG80211ALLNODES: { 2269 struct ieee80211_nodereq *nr = NULL; 2270 na = (struct ieee80211_nodereq_all *)req; 2271 wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO); 2272 2273 maxscan = PGT_OBJ_BSSLIST_NBSS; 2274 pob = malloc(sizeof(*pob) + 2275 sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK); 2276 error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, 2277 sizeof(noise)); 2278 2279 if (error == 0) { 2280 noise = letoh32(noise); 2281 error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob, 2282 sizeof(*pob) + 2283 sizeof(struct pgt_obj_bss) * maxscan); 2284 } 2285 2286 if (error == 0) { 2287 maxscan = min(PGT_OBJ_BSSLIST_NBSS, 2288 letoh32(pob->pob_count)); 2289 maxscan = min(maxscan, 2290 (sizeof(wreq->wi_val) - sizeof(*p2hdr)) / 2291 WI_PRISM2_RES_SIZE); 2292 p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val; 2293 p2hdr->wi_rsvd = 0; 2294 p2hdr->wi_reason = 1; 2295 wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 + 2296 sizeof(*p2hdr) / 2; 2297 wreq->wi_type = WI_RID_SCAN_RES; 2298 } 2299 2300 for (na->na_nodes = j = i = 0; i < maxscan && 2301 (na->na_size >= j + sizeof(struct ieee80211_nodereq)); 2302 i++) { 2303 /* allocate node space */ 2304 if (nr == NULL) 2305 nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK); 2306 2307 /* get next BSS scan result */ 2308 res = (struct wi_scan_res *) 2309 ((char *)&wreq->wi_val + sizeof(*p2hdr) + 2310 i * WI_PRISM2_RES_SIZE); 2311 pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i], 2312 res, noise); 2313 2314 /* copy it to node structure for ifconfig to read */ 2315 bzero(nr, sizeof(*nr)); 2316 IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid); 2317 IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid); 2318 nr->nr_channel = letoh16(res->wi_chan); 2319 nr->nr_chan_flags = IEEE80211_CHAN_B; 2320 nr->nr_rssi = letoh16(res->wi_signal); 2321 nr->nr_max_rssi = 0; /* XXX */ 2322 nr->nr_nwid_len = letoh16(res->wi_ssid_len); 2323 bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len); 2324 nr->nr_intval = letoh16(res->wi_interval); 2325 nr->nr_capinfo = letoh16(res->wi_capinfo); 2326 nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 : 2327 (res->wi_rate == WI_WAVELAN_RES_2M ? 4 : 2328 (res->wi_rate == WI_WAVELAN_RES_5M ? 11 : 2329 (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0))); 2330 nr->nr_nrates = 0; 2331 while (res->wi_srates[nr->nr_nrates] != 0) { 2332 nr->nr_rates[nr->nr_nrates] = 2333 res->wi_srates[nr->nr_nrates] & 2334 WI_VAR_SRATES_MASK; 2335 nr->nr_nrates++; 2336 } 2337 nr->nr_flags = 0; 2338 if (bcmp(nr->nr_macaddr, nr->nr_bssid, 2339 IEEE80211_ADDR_LEN) == 0) 2340 nr->nr_flags |= IEEE80211_NODEREQ_AP; 2341 error = copyout(nr, (caddr_t)na->na_node + j, 2342 sizeof(struct ieee80211_nodereq)); 2343 if (error) 2344 break; 2345 2346 /* point to next node entry */ 2347 j += sizeof(struct ieee80211_nodereq); 2348 na->na_nodes++; 2349 } 2350 if (nr) 2351 free(nr, M_DEVBUF, 0); 2352 free(pob, M_DEVBUF, 0); 2353 free(wreq, M_DEVBUF, 0); 2354 break; 2355 } 2356 case SIOCSIFADDR: 2357 ifa = (struct ifaddr *)req; 2358 ifp->if_flags |= IFF_UP; 2359 #ifdef INET 2360 if (ifa->ifa_addr->sa_family == AF_INET) 2361 arp_ifinit(&sc->sc_ic.ic_ac, ifa); 2362 #endif 2363 /* FALLTHROUGH */ 2364 case SIOCSIFFLAGS: 2365 if (ifp->if_flags & IFF_UP) { 2366 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2367 pgt_init(ifp); 2368 error = ENETRESET; 2369 } 2370 } else { 2371 if (ifp->if_flags & IFF_RUNNING) { 2372 pgt_stop(sc, SC_NEEDS_RESET); 2373 error = ENETRESET; 2374 } 2375 } 2376 break; 2377 case SIOCADDMULTI: 2378 case SIOCDELMULTI: 2379 error = (cmd == SIOCADDMULTI) ? 2380 ether_addmulti(ifr, &ic->ic_ac) : 2381 ether_delmulti(ifr, &ic->ic_ac); 2382 2383 if (error == ENETRESET) 2384 error = 0; 2385 break; 2386 case SIOCSIFMTU: 2387 if (ifr->ifr_mtu > PGT_FRAG_SIZE) { 2388 error = EINVAL; 2389 break; 2390 } 2391 /* FALLTHROUGH */ 2392 default: 2393 error = ieee80211_ioctl(ifp, cmd, req); 2394 break; 2395 } 2396 2397 if (error == ENETRESET) { 2398 pgt_update_hw_from_sw(sc, 0, 0); 2399 error = 0; 2400 } 2401 splx(s); 2402 2403 return (error); 2404 } 2405 2406 void 2407 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob, 2408 struct wi_scan_res *scanres, uint32_t noise) 2409 { 2410 struct ieee80211_rateset *rs; 2411 struct wi_scan_res ap; 2412 unsigned int i, n; 2413 2414 rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO]; 2415 bzero(&ap, sizeof(ap)); 2416 ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0); 2417 ap.wi_noise = noise; 2418 ap.wi_signal = letoh16(pob->pob_rssi); 2419 IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address); 2420 ap.wi_interval = letoh16(pob->pob_beacon_period); 2421 ap.wi_capinfo = letoh16(pob->pob_capinfo); 2422 ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length); 2423 memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len); 2424 n = 0; 2425 for (i = 0; i < 16; i++) { 2426 if (letoh16(pob->pob_rates) & (1 << i)) { 2427 if (i > rs->rs_nrates) 2428 break; 2429 ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i]; 2430 if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0])) 2431 break; 2432 } 2433 } 2434 memcpy(scanres, &ap, WI_PRISM2_RES_SIZE); 2435 } 2436 2437 void 2438 node_mark_active_ap(void *arg, struct ieee80211_node *ni) 2439 { 2440 /* 2441 * HostAP mode lets all nodes stick around unless 2442 * the firmware AP kicks them off. 2443 */ 2444 ni->ni_inact = 0; 2445 } 2446 2447 void 2448 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni) 2449 { 2450 struct pgt_ieee80211_node *pin; 2451 2452 /* 2453 * As there is no association in ad-hoc, we let links just 2454 * time out naturally as long they are not holding any private 2455 * configuration, such as 802.1x authorization. 2456 */ 2457 pin = (struct pgt_ieee80211_node *)ni; 2458 if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED) 2459 pin->pin_node.ni_inact = 0; 2460 } 2461 2462 void 2463 pgt_watchdog(struct ifnet *ifp) 2464 { 2465 struct pgt_softc *sc; 2466 2467 sc = ifp->if_softc; 2468 /* 2469 * Check for timed out transmissions (and make sure to set 2470 * this watchdog to fire again if there is still data in the 2471 * output device queue). 2472 */ 2473 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) { 2474 int count; 2475 2476 ifp->if_timer = 1; 2477 if (sc->sc_txtimer && --sc->sc_txtimer == 0) { 2478 count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX); 2479 if (sc->sc_debug & SC_DEBUG_UNEXPECTED) 2480 DPRINTF(("%s: timeout %d data transmissions\n", 2481 sc->sc_dev.dv_xname, count)); 2482 } 2483 } 2484 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) 2485 return; 2486 /* 2487 * If we're goign to kick the device out of power-save mode 2488 * just to update the BSSID and such, we should not do it 2489 * very often; need to determine in what way to do that. 2490 */ 2491 if (ifp->if_flags & IFF_RUNNING && 2492 sc->sc_ic.ic_state != IEEE80211_S_INIT && 2493 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) 2494 pgt_async_update(sc); 2495 2496 #ifndef IEEE80211_STA_ONLY 2497 /* 2498 * As a firmware-based HostAP, we should not time out 2499 * nodes inside the driver additionally to the timeout 2500 * that exists in the firmware. The only things we 2501 * should have to deal with timing out when doing HostAP 2502 * are the privacy-related. 2503 */ 2504 switch (sc->sc_ic.ic_opmode) { 2505 case IEEE80211_M_HOSTAP: 2506 ieee80211_iterate_nodes(&sc->sc_ic, 2507 node_mark_active_ap, NULL); 2508 break; 2509 case IEEE80211_M_IBSS: 2510 ieee80211_iterate_nodes(&sc->sc_ic, 2511 node_mark_active_adhoc, NULL); 2512 break; 2513 default: 2514 break; 2515 } 2516 #endif 2517 ieee80211_watchdog(ifp); 2518 ifp->if_timer = 1; 2519 } 2520 2521 int 2522 pgt_init(struct ifnet *ifp) 2523 { 2524 struct pgt_softc *sc = ifp->if_softc; 2525 struct ieee80211com *ic = &sc->sc_ic; 2526 2527 /* set default channel */ 2528 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 2529 2530 if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED))) 2531 pgt_update_hw_from_sw(sc, 2532 ic->ic_state != IEEE80211_S_INIT, 2533 ic->ic_opmode != IEEE80211_M_MONITOR); 2534 2535 ifp->if_flags |= IFF_RUNNING; 2536 ifp->if_flags &= ~IFF_OACTIVE; 2537 2538 /* Begin background scanning */ 2539 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1); 2540 2541 return (0); 2542 } 2543 2544 /* 2545 * After most every configuration change, everything needs to be fully 2546 * reinitialized. For some operations (currently, WEP settings 2547 * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove 2548 * "associations," and allows EAP authorization to occur again. 2549 * If keepassoc is specified, the reset operation should try to go 2550 * back to the BSS had before. 2551 */ 2552 void 2553 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes) 2554 { 2555 struct ieee80211com *ic = &sc->sc_ic; 2556 struct arpcom *ac = &ic->ic_ac; 2557 struct ifnet *ifp = &ac->ac_if; 2558 struct pgt_obj_key keyobj; 2559 struct pgt_obj_ssid essid; 2560 uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1]; 2561 uint32_t mode, bsstype, config, profile, channel, slot, preamble; 2562 uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme; 2563 unsigned int i; 2564 int success, shouldbeup, s; 2565 2566 config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX; 2567 2568 /* 2569 * Promiscuous mode is currently a no-op since packets transmitted, 2570 * while in promiscuous mode, don't ever seem to go anywhere. 2571 */ 2572 shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP; 2573 2574 if (shouldbeup) { 2575 switch (ic->ic_opmode) { 2576 case IEEE80211_M_STA: 2577 if (ifp->if_flags & IFF_PROMISC) 2578 mode = PGT_MODE_CLIENT; /* what to do? */ 2579 else 2580 mode = PGT_MODE_CLIENT; 2581 bsstype = PGT_BSS_TYPE_STA; 2582 dot1x = PGT_DOT1X_AUTH_ENABLED; 2583 break; 2584 #ifndef IEEE80211_STA_ONLY 2585 case IEEE80211_M_IBSS: 2586 if (ifp->if_flags & IFF_PROMISC) 2587 mode = PGT_MODE_CLIENT; /* what to do? */ 2588 else 2589 mode = PGT_MODE_CLIENT; 2590 bsstype = PGT_BSS_TYPE_IBSS; 2591 dot1x = PGT_DOT1X_AUTH_ENABLED; 2592 break; 2593 case IEEE80211_M_HOSTAP: 2594 mode = PGT_MODE_AP; 2595 bsstype = PGT_BSS_TYPE_STA; 2596 /* 2597 * For IEEE 802.1x, we need to authenticate and 2598 * authorize hosts from here on or they remain 2599 * associated but without the ability to send or 2600 * receive normal traffic to us (courtesy the 2601 * firmware AP implementation). 2602 */ 2603 dot1x = PGT_DOT1X_AUTH_ENABLED; 2604 /* 2605 * WDS mode needs several things to work: 2606 * discovery of exactly how creating the WDS 2607 * links is meant to function, an interface 2608 * for this, and ability to encode or decode 2609 * the WDS frames. 2610 */ 2611 if (sc->sc_wds) 2612 config |= PGT_CONFIG_WDS; 2613 break; 2614 #endif 2615 case IEEE80211_M_MONITOR: 2616 mode = PGT_MODE_PROMISCUOUS; 2617 bsstype = PGT_BSS_TYPE_ANY; 2618 dot1x = PGT_DOT1X_AUTH_NONE; 2619 break; 2620 default: 2621 goto badopmode; 2622 } 2623 } else { 2624 badopmode: 2625 mode = PGT_MODE_CLIENT; 2626 bsstype = PGT_BSS_TYPE_NONE; 2627 } 2628 2629 DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname)); 2630 switch (ic->ic_curmode) { 2631 case IEEE80211_MODE_11A: 2632 profile = PGT_PROFILE_A_ONLY; 2633 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC; 2634 DPRINTF(("IEEE80211_MODE_11A\n")); 2635 break; 2636 case IEEE80211_MODE_11B: 2637 profile = PGT_PROFILE_B_ONLY; 2638 preamble = PGT_OID_PREAMBLE_MODE_LONG; 2639 DPRINTF(("IEEE80211_MODE_11B\n")); 2640 break; 2641 case IEEE80211_MODE_11G: 2642 profile = PGT_PROFILE_G_ONLY; 2643 preamble = PGT_OID_PREAMBLE_MODE_SHORT; 2644 DPRINTF(("IEEE80211_MODE_11G\n")); 2645 break; 2646 case IEEE80211_MODE_TURBO: /* not handled */ 2647 /* FALLTHROUGH */ 2648 case IEEE80211_MODE_AUTO: 2649 profile = PGT_PROFILE_MIXED_G_WIFI; 2650 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC; 2651 DPRINTF(("IEEE80211_MODE_AUTO\n")); 2652 break; 2653 default: 2654 panic("unknown mode %d", ic->ic_curmode); 2655 } 2656 2657 switch (sc->sc_80211_ioc_auth) { 2658 case IEEE80211_AUTH_NONE: 2659 auth = PGT_AUTH_MODE_NONE; 2660 break; 2661 case IEEE80211_AUTH_OPEN: 2662 auth = PGT_AUTH_MODE_OPEN; 2663 break; 2664 default: 2665 auth = PGT_AUTH_MODE_SHARED; 2666 break; 2667 } 2668 2669 if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) { 2670 wep = 1; 2671 exunencrypted = 1; 2672 } else { 2673 wep = 0; 2674 exunencrypted = 0; 2675 } 2676 2677 mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO); 2678 wep = htole32(wep); 2679 exunencrypted = htole32(exunencrypted); 2680 profile = htole32(profile); 2681 preamble = htole32(preamble); 2682 bsstype = htole32(bsstype); 2683 config = htole32(config); 2684 mode = htole32(mode); 2685 2686 if (!wep || !sc->sc_dot1x) 2687 dot1x = PGT_DOT1X_AUTH_NONE; 2688 dot1x = htole32(dot1x); 2689 auth = htole32(auth); 2690 2691 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2692 slot = htole32(PGT_OID_SLOT_MODE_SHORT); 2693 else 2694 slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC); 2695 2696 if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) { 2697 if (keepassoc) 2698 channel = 0; 2699 else 2700 channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan); 2701 } else 2702 channel = ieee80211_chan2ieee(ic, ic->ic_des_chan); 2703 2704 DPRINTF(("%s: set rates", sc->sc_dev.dv_xname)); 2705 for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) { 2706 availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i]; 2707 DPRINTF((" %d", availrates[i])); 2708 } 2709 DPRINTF(("\n")); 2710 availrates[i++] = 0; 2711 2712 essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid)); 2713 memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length); 2714 2715 s = splnet(); 2716 for (success = 0; success == 0; success = 1) { 2717 SETOID(PGT_OID_PROFILE, &profile, sizeof(profile)); 2718 SETOID(PGT_OID_CONFIG, &config, sizeof(config)); 2719 SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme)); 2720 2721 if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) { 2722 SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr, 2723 sizeof(ac->ac_enaddr)); 2724 IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr); 2725 } 2726 2727 SETOID(PGT_OID_MODE, &mode, sizeof(mode)); 2728 SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype)); 2729 2730 if (channel != 0 && channel != IEEE80211_CHAN_ANY) 2731 SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel)); 2732 2733 if (ic->ic_flags & IEEE80211_F_DESBSSID) { 2734 SETOID(PGT_OID_BSSID, ic->ic_des_bssid, 2735 sizeof(ic->ic_des_bssid)); 2736 } else if (keepassoc) { 2737 SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid, 2738 sizeof(ic->ic_bss->ni_bssid)); 2739 } 2740 2741 SETOID(PGT_OID_SSID, &essid, sizeof(essid)); 2742 2743 if (ic->ic_des_esslen > 0) 2744 SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid)); 2745 2746 SETOID(PGT_OID_RATES, &availrates, i); 2747 SETOID(PGT_OID_EXTENDED_RATES, &availrates, i); 2748 SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble)); 2749 SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot)); 2750 SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth)); 2751 SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted, 2752 sizeof(exunencrypted)); 2753 SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x)); 2754 SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep)); 2755 /* 2756 * Setting WEP key(s) 2757 */ 2758 if (letoh32(wep) != 0) { 2759 keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP; 2760 /* key 1 */ 2761 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2762 IEEE80211_KEYBUF_SIZE); 2763 keyobj.pok_length = min(keyobj.pok_length, 2764 ic->ic_nw_keys[0].k_len); 2765 bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key, 2766 keyobj.pok_length); 2767 SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj)); 2768 /* key 2 */ 2769 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2770 IEEE80211_KEYBUF_SIZE); 2771 keyobj.pok_length = min(keyobj.pok_length, 2772 ic->ic_nw_keys[1].k_len); 2773 bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key, 2774 keyobj.pok_length); 2775 SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj)); 2776 /* key 3 */ 2777 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2778 IEEE80211_KEYBUF_SIZE); 2779 keyobj.pok_length = min(keyobj.pok_length, 2780 ic->ic_nw_keys[2].k_len); 2781 bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key, 2782 keyobj.pok_length); 2783 SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj)); 2784 /* key 4 */ 2785 keyobj.pok_length = min(sizeof(keyobj.pok_key), 2786 IEEE80211_KEYBUF_SIZE); 2787 keyobj.pok_length = min(keyobj.pok_length, 2788 ic->ic_nw_keys[3].k_len); 2789 bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key, 2790 keyobj.pok_length); 2791 SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj)); 2792 2793 wepkey = htole32(ic->ic_wep_txkey); 2794 SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey)); 2795 } 2796 /* set mode again to commit */ 2797 SETOID(PGT_OID_MODE, &mode, sizeof(mode)); 2798 } 2799 splx(s); 2800 2801 if (success) { 2802 if (shouldbeup && keepnodes) 2803 sc->sc_flags |= SC_NOFREE_ALLNODES; 2804 if (shouldbeup) 2805 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2806 else 2807 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2808 } else { 2809 printf("%s: problem setting modes\n", sc->sc_dev.dv_xname); 2810 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2811 } 2812 } 2813 2814 void 2815 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid, 2816 struct pgt_obj_mlme *mlme) 2817 { 2818 struct ieee80211com *ic = &sc->sc_ic; 2819 struct pgt_ieee80211_node *pin; 2820 struct ieee80211_node *ni; 2821 2822 ni = ieee80211_find_node(ic, mlme->pom_address); 2823 pin = (struct pgt_ieee80211_node *)ni; 2824 switch (oid) { 2825 case PGT_OID_DISASSOCIATE: 2826 if (ni != NULL) 2827 ieee80211_release_node(&sc->sc_ic, ni); 2828 break; 2829 case PGT_OID_ASSOCIATE: 2830 if (ni == NULL) { 2831 ni = ieee80211_dup_bss(ic, mlme->pom_address); 2832 if (ni == NULL) 2833 break; 2834 ic->ic_newassoc(ic, ni, 1); 2835 pin = (struct pgt_ieee80211_node *)ni; 2836 } 2837 ni->ni_associd = letoh16(mlme->pom_id); 2838 pin->pin_mlme_state = letoh16(mlme->pom_state); 2839 break; 2840 default: 2841 if (pin != NULL) 2842 pin->pin_mlme_state = letoh16(mlme->pom_state); 2843 break; 2844 } 2845 } 2846 2847 /* 2848 * Either in response to an event or after a certain amount of time, 2849 * synchronize our idea of the network we're part of from the hardware. 2850 */ 2851 void 2852 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa, 2853 struct mbuf *args) 2854 { 2855 struct ieee80211com *ic = &sc->sc_ic; 2856 struct pgt_obj_ssid ssid; 2857 struct pgt_obj_bss bss; 2858 uint32_t channel, noise, ls; 2859 int error, s; 2860 2861 if (pa != NULL) { 2862 struct pgt_obj_mlme *mlme; 2863 uint32_t oid; 2864 2865 oid = *mtod(args, uint32_t *); 2866 m_adj(args, sizeof(uint32_t)); 2867 if (sc->sc_debug & SC_DEBUG_TRAP) 2868 DPRINTF(("%s: trap: oid %#x len %u\n", 2869 sc->sc_dev.dv_xname, oid, args->m_len)); 2870 switch (oid) { 2871 case PGT_OID_LINK_STATE: 2872 if (args->m_len < sizeof(uint32_t)) 2873 break; 2874 ls = letoh32(*mtod(args, uint32_t *)); 2875 if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK)) 2876 DPRINTF(("%s: %s: link rate %u\n", 2877 sc->sc_dev.dv_xname, __func__, ls)); 2878 if (ls) 2879 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2880 else 2881 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2882 goto gotlinkstate; 2883 case PGT_OID_DEAUTHENTICATE: 2884 case PGT_OID_AUTHENTICATE: 2885 case PGT_OID_DISASSOCIATE: 2886 case PGT_OID_ASSOCIATE: 2887 if (args->m_len < sizeof(struct pgt_obj_mlme)) 2888 break; 2889 mlme = mtod(args, struct pgt_obj_mlme *); 2890 if (sc->sc_debug & SC_DEBUG_TRAP) 2891 DPRINTF(("%s: mlme: address " 2892 "%s id 0x%02x state 0x%02x code 0x%02x\n", 2893 sc->sc_dev.dv_xname, 2894 ether_sprintf(mlme->pom_address), 2895 letoh16(mlme->pom_id), 2896 letoh16(mlme->pom_state), 2897 letoh16(mlme->pom_code))); 2898 #ifndef IEEE80211_STA_ONLY 2899 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 2900 pgt_hostap_handle_mlme(sc, oid, mlme); 2901 #endif 2902 break; 2903 } 2904 return; 2905 } 2906 if (ic->ic_state == IEEE80211_S_SCAN) { 2907 s = splnet(); 2908 error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls)); 2909 splx(s); 2910 if (error) 2911 return; 2912 DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname, 2913 htole32(ls))); 2914 if (ls != 0) 2915 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2916 } 2917 2918 gotlinkstate: 2919 s = splnet(); 2920 if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0) 2921 goto out; 2922 sc->sc_noise = letoh32(noise); 2923 if (ic->ic_state == IEEE80211_S_RUN) { 2924 if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel, 2925 sizeof(channel)) != 0) 2926 goto out; 2927 channel = min(letoh32(channel), IEEE80211_CHAN_MAX); 2928 ic->ic_bss->ni_chan = &ic->ic_channels[channel]; 2929 if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid, 2930 sizeof(ic->ic_bss->ni_bssid)) != 0) 2931 goto out; 2932 IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid); 2933 error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss, 2934 sizeof(bss)); 2935 if (error == 0) 2936 ic->ic_bss->ni_rssi = bss.pob_rssi; 2937 else if (error != EPERM) 2938 goto out; 2939 error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid)); 2940 if (error) 2941 goto out; 2942 ic->ic_bss->ni_esslen = min(ssid.pos_length, 2943 sizeof(ic->ic_bss->ni_essid)); 2944 memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid, 2945 ssid.pos_length); 2946 } 2947 2948 out: 2949 splx(s); 2950 } 2951 2952 int 2953 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 2954 { 2955 struct pgt_softc *sc = ic->ic_if.if_softc; 2956 enum ieee80211_state ostate; 2957 2958 ostate = ic->ic_state; 2959 2960 DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname, 2961 ieee80211_state_name[ostate], ieee80211_state_name[nstate])); 2962 2963 switch (nstate) { 2964 case IEEE80211_S_INIT: 2965 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0) 2966 ic->ic_if.if_timer = 0; 2967 ic->ic_mgt_timer = 0; 2968 ic->ic_flags &= ~IEEE80211_F_SIBSS; 2969 ieee80211_free_allnodes(ic); 2970 break; 2971 case IEEE80211_S_SCAN: 2972 ic->ic_if.if_timer = 1; 2973 ic->ic_mgt_timer = 0; 2974 if (sc->sc_flags & SC_NOFREE_ALLNODES) 2975 sc->sc_flags &= ~SC_NOFREE_ALLNODES; 2976 else 2977 ieee80211_free_allnodes(ic); 2978 2979 #ifndef IEEE80211_STA_ONLY 2980 /* Just use any old channel; we override it anyway. */ 2981 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 2982 ieee80211_create_ibss(ic, ic->ic_ibss_chan); 2983 #endif 2984 break; 2985 case IEEE80211_S_RUN: 2986 ic->ic_if.if_timer = 1; 2987 break; 2988 default: 2989 break; 2990 } 2991 2992 return (sc->sc_newstate(ic, nstate, arg)); 2993 } 2994 2995 int 2996 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq) 2997 { 2998 int wokeup = 0; 2999 3000 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 3001 sc->sc_cbdmam->dm_mapsize, 3002 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE); 3003 sc->sc_cb->pcb_device_curfrag[pq] = 3004 sc->sc_cb->pcb_driver_curfrag[pq]; 3005 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0, 3006 sc->sc_cbdmam->dm_mapsize, 3007 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD); 3008 while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) { 3009 struct pgt_desc *pd; 3010 3011 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]); 3012 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link); 3013 sc->sc_dirtyq_count[pq]--; 3014 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 3015 sc->sc_freeq_count[pq]++; 3016 pgt_unload_tx_desc_frag(sc, pd); 3017 if (sc->sc_debug & SC_DEBUG_QUEUES) 3018 DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n", 3019 sc->sc_dev.dv_xname, pd->pd_fragnum, pq)); 3020 wokeup++; 3021 if (pgt_queue_is_data(pq)) 3022 sc->sc_ic.ic_if.if_oerrors++; 3023 } 3024 3025 return (wokeup); 3026 } 3027 3028 int 3029 pgt_dma_alloc(struct pgt_softc *sc) 3030 { 3031 size_t size; 3032 int i, error, nsegs; 3033 3034 for (i = 0; i < PGT_QUEUE_COUNT; i++) { 3035 TAILQ_INIT(&sc->sc_freeq[i]); 3036 TAILQ_INIT(&sc->sc_dirtyq[i]); 3037 } 3038 3039 /* 3040 * control block 3041 */ 3042 size = sizeof(struct pgt_control_block); 3043 3044 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 3045 BUS_DMA_NOWAIT, &sc->sc_cbdmam); 3046 if (error != 0) { 3047 printf("%s: can not create DMA tag for control block\n", 3048 sc->sc_dev.dv_xname); 3049 goto out; 3050 } 3051 3052 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 3053 0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 3054 if (error != 0) { 3055 printf("%s: can not allocate DMA memory for control block\n", 3056 sc->sc_dev.dv_xname); 3057 goto out; 3058 } 3059 3060 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs, 3061 size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT); 3062 if (error != 0) { 3063 printf("%s: can not map DMA memory for control block\n", 3064 sc->sc_dev.dv_xname); 3065 goto out; 3066 } 3067 3068 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam, 3069 sc->sc_cb, size, NULL, BUS_DMA_NOWAIT); 3070 if (error != 0) { 3071 printf("%s: can not load DMA map for control block\n", 3072 sc->sc_dev.dv_xname); 3073 goto out; 3074 } 3075 3076 /* 3077 * powersave 3078 */ 3079 size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT; 3080 3081 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 3082 BUS_DMA_ALLOCNOW, &sc->sc_psmdmam); 3083 if (error != 0) { 3084 printf("%s: can not create DMA tag for powersave\n", 3085 sc->sc_dev.dv_xname); 3086 goto out; 3087 } 3088 3089 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 3090 0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 3091 if (error != 0) { 3092 printf("%s: can not allocate DMA memory for powersave\n", 3093 sc->sc_dev.dv_xname); 3094 goto out; 3095 } 3096 3097 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs, 3098 size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT); 3099 if (error != 0) { 3100 printf("%s: can not map DMA memory for powersave\n", 3101 sc->sc_dev.dv_xname); 3102 goto out; 3103 } 3104 3105 error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam, 3106 sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK); 3107 if (error != 0) { 3108 printf("%s: can not load DMA map for powersave\n", 3109 sc->sc_dev.dv_xname); 3110 goto out; 3111 } 3112 3113 /* 3114 * fragments 3115 */ 3116 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX); 3117 if (error != 0) 3118 goto out; 3119 3120 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX); 3121 if (error != 0) 3122 goto out; 3123 3124 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX); 3125 if (error != 0) 3126 goto out; 3127 3128 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX); 3129 if (error != 0) 3130 goto out; 3131 3132 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX); 3133 if (error != 0) 3134 goto out; 3135 3136 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX); 3137 if (error != 0) 3138 goto out; 3139 3140 out: 3141 if (error) { 3142 printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname); 3143 pgt_dma_free(sc); 3144 } 3145 3146 return (error); 3147 } 3148 3149 int 3150 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq) 3151 { 3152 struct pgt_desc *pd; 3153 size_t i, qsize; 3154 int error, nsegs; 3155 3156 switch (pq) { 3157 case PGT_QUEUE_DATA_LOW_RX: 3158 qsize = PGT_QUEUE_DATA_RX_SIZE; 3159 break; 3160 case PGT_QUEUE_DATA_LOW_TX: 3161 qsize = PGT_QUEUE_DATA_TX_SIZE; 3162 break; 3163 case PGT_QUEUE_DATA_HIGH_RX: 3164 qsize = PGT_QUEUE_DATA_RX_SIZE; 3165 break; 3166 case PGT_QUEUE_DATA_HIGH_TX: 3167 qsize = PGT_QUEUE_DATA_TX_SIZE; 3168 break; 3169 case PGT_QUEUE_MGMT_RX: 3170 qsize = PGT_QUEUE_MGMT_SIZE; 3171 break; 3172 case PGT_QUEUE_MGMT_TX: 3173 qsize = PGT_QUEUE_MGMT_SIZE; 3174 break; 3175 default: 3176 return (EINVAL); 3177 } 3178 3179 for (i = 0; i < qsize; i++) { 3180 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK); 3181 3182 error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1, 3183 PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam); 3184 if (error != 0) { 3185 printf("%s: can not create DMA tag for fragment\n", 3186 sc->sc_dev.dv_xname); 3187 free(pd, M_DEVBUF, 0); 3188 break; 3189 } 3190 3191 error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE, 3192 0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK); 3193 if (error != 0) { 3194 printf("%s: error alloc frag %zu on queue %u\n", 3195 sc->sc_dev.dv_xname, i, pq); 3196 free(pd, M_DEVBUF, 0); 3197 break; 3198 } 3199 3200 error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs, 3201 PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK); 3202 if (error != 0) { 3203 printf("%s: error map frag %zu on queue %u\n", 3204 sc->sc_dev.dv_xname, i, pq); 3205 free(pd, M_DEVBUF, 0); 3206 break; 3207 } 3208 3209 if (pgt_queue_is_rx(pq)) { 3210 error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, 3211 pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT); 3212 if (error != 0) { 3213 printf("%s: error load frag %zu on queue %u\n", 3214 sc->sc_dev.dv_xname, i, pq); 3215 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 3216 nsegs); 3217 free(pd, M_DEVBUF, 0); 3218 break; 3219 } 3220 pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr; 3221 } 3222 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link); 3223 } 3224 3225 return (error); 3226 } 3227 3228 void 3229 pgt_dma_free(struct pgt_softc *sc) 3230 { 3231 /* 3232 * fragments 3233 */ 3234 if (sc->sc_dmat != NULL) { 3235 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX); 3236 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX); 3237 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX); 3238 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX); 3239 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX); 3240 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX); 3241 } 3242 3243 /* 3244 * powersave 3245 */ 3246 if (sc->sc_psmbuf != NULL) { 3247 bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam); 3248 bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1); 3249 sc->sc_psmbuf = NULL; 3250 sc->sc_psmdmam = NULL; 3251 } 3252 3253 /* 3254 * control block 3255 */ 3256 if (sc->sc_cb != NULL) { 3257 bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam); 3258 bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1); 3259 sc->sc_cb = NULL; 3260 sc->sc_cbdmam = NULL; 3261 } 3262 } 3263 3264 void 3265 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq) 3266 { 3267 struct pgt_desc *pd; 3268 3269 while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) { 3270 pd = TAILQ_FIRST(&sc->sc_freeq[pq]); 3271 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link); 3272 if (pd->pd_dmam != NULL) { 3273 bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam); 3274 pd->pd_dmam = NULL; 3275 } 3276 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1); 3277 free(pd, M_DEVBUF, 0); 3278 } 3279 } 3280 3281 int 3282 pgt_activate(struct device *self, int act) 3283 { 3284 struct pgt_softc *sc = (struct pgt_softc *)self; 3285 struct ifnet *ifp = &sc->sc_ic.ic_if; 3286 3287 DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why)); 3288 3289 switch (act) { 3290 case DVACT_SUSPEND: 3291 if (ifp->if_flags & IFF_RUNNING) { 3292 pgt_stop(sc, SC_NEEDS_RESET); 3293 pgt_update_hw_from_sw(sc, 0, 0); 3294 } 3295 if (sc->sc_power != NULL) 3296 (*sc->sc_power)(sc, act); 3297 break; 3298 case DVACT_WAKEUP: 3299 pgt_wakeup(sc); 3300 break; 3301 } 3302 return 0; 3303 } 3304 3305 void 3306 pgt_wakeup(struct pgt_softc *sc) 3307 { 3308 struct ifnet *ifp = &sc->sc_ic.ic_if; 3309 3310 if (sc->sc_power != NULL) 3311 (*sc->sc_power)(sc, DVACT_RESUME); 3312 3313 pgt_stop(sc, SC_NEEDS_RESET); 3314 pgt_update_hw_from_sw(sc, 0, 0); 3315 3316 if (ifp->if_flags & IFF_UP) { 3317 pgt_init(ifp); 3318 pgt_update_hw_from_sw(sc, 0, 0); 3319 } 3320 } 3321