1 /* $OpenBSD: if_rtwn.c,v 1.24 2016/07/26 13:00:28 stsp Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * PCI front-end for Realtek RTL8188CE driver. 22 */ 23 24 #include "bpfilter.h" 25 26 #include <sys/param.h> 27 #include <sys/sockio.h> 28 #include <sys/mbuf.h> 29 #include <sys/kernel.h> 30 #include <sys/socket.h> 31 #include <sys/systm.h> 32 #include <sys/task.h> 33 #include <sys/timeout.h> 34 #include <sys/conf.h> 35 #include <sys/device.h> 36 #include <sys/endian.h> 37 38 #include <machine/bus.h> 39 #include <machine/intr.h> 40 41 #if NBPFILTER > 0 42 #include <net/bpf.h> 43 #endif 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #include <netinet/in.h> 49 #include <netinet/if_ether.h> 50 51 #include <net80211/ieee80211_var.h> 52 #include <net80211/ieee80211_radiotap.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 #include <dev/pci/pcidevs.h> 57 58 #include <dev/ic/r92creg.h> 59 #include <dev/ic/rtwnvar.h> 60 61 /* 62 * Driver definitions. 63 */ 64 65 #define R92C_PUBQ_NPAGES 176 66 #define R92C_HPQ_NPAGES 41 67 #define R92C_LPQ_NPAGES 28 68 #define R92C_TXPKTBUF_COUNT 256 69 #define R92C_TX_PAGE_COUNT \ 70 (R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES) 71 #define R92C_TX_PAGE_BOUNDARY (R92C_TX_PAGE_COUNT + 1) 72 73 #define RTWN_NTXQUEUES 9 74 #define RTWN_RX_LIST_COUNT 256 75 #define RTWN_TX_LIST_COUNT 256 76 77 /* TX queue indices. */ 78 #define RTWN_BK_QUEUE 0 79 #define RTWN_BE_QUEUE 1 80 #define RTWN_VI_QUEUE 2 81 #define RTWN_VO_QUEUE 3 82 #define RTWN_BEACON_QUEUE 4 83 #define RTWN_TXCMD_QUEUE 5 84 #define RTWN_MGNT_QUEUE 6 85 #define RTWN_HIGH_QUEUE 7 86 #define RTWN_HCCA_QUEUE 8 87 88 struct rtwn_rx_radiotap_header { 89 struct ieee80211_radiotap_header wr_ihdr; 90 uint8_t wr_flags; 91 uint8_t wr_rate; 92 uint16_t wr_chan_freq; 93 uint16_t wr_chan_flags; 94 uint8_t wr_dbm_antsignal; 95 } __packed; 96 97 #define RTWN_RX_RADIOTAP_PRESENT \ 98 (1 << IEEE80211_RADIOTAP_FLAGS | \ 99 1 << IEEE80211_RADIOTAP_RATE | \ 100 1 << IEEE80211_RADIOTAP_CHANNEL | \ 101 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) 102 103 struct rtwn_tx_radiotap_header { 104 struct ieee80211_radiotap_header wt_ihdr; 105 uint8_t wt_flags; 106 uint16_t wt_chan_freq; 107 uint16_t wt_chan_flags; 108 } __packed; 109 110 #define RTWN_TX_RADIOTAP_PRESENT \ 111 (1 << IEEE80211_RADIOTAP_FLAGS | \ 112 1 << IEEE80211_RADIOTAP_CHANNEL) 113 114 struct rtwn_rx_data { 115 bus_dmamap_t map; 116 struct mbuf *m; 117 }; 118 119 struct rtwn_rx_ring { 120 struct r92c_rx_desc_pci *desc; 121 bus_dmamap_t map; 122 bus_dma_segment_t seg; 123 int nsegs; 124 struct rtwn_rx_data rx_data[RTWN_RX_LIST_COUNT]; 125 126 }; 127 struct rtwn_tx_data { 128 bus_dmamap_t map; 129 struct mbuf *m; 130 struct ieee80211_node *ni; 131 }; 132 133 struct rtwn_tx_ring { 134 bus_dmamap_t map; 135 bus_dma_segment_t seg; 136 int nsegs; 137 struct r92c_tx_desc_pci *desc; 138 struct rtwn_tx_data tx_data[RTWN_TX_LIST_COUNT]; 139 int queued; 140 int cur; 141 }; 142 143 struct rtwn_pci_softc { 144 struct device sc_dev; 145 struct rtwn_softc sc_sc; 146 147 struct rtwn_rx_ring rx_ring; 148 struct rtwn_tx_ring tx_ring[RTWN_NTXQUEUES]; 149 uint32_t qfullmsk; 150 151 struct timeout calib_to; 152 struct timeout scan_to; 153 154 /* PCI specific goo. */ 155 bus_dma_tag_t sc_dmat; 156 pci_chipset_tag_t sc_pc; 157 pcitag_t sc_tag; 158 void *sc_ih; 159 bus_space_tag_t sc_st; 160 bus_space_handle_t sc_sh; 161 bus_size_t sc_mapsize; 162 int sc_cap_off; 163 164 #if NBPFILTER > 0 165 caddr_t sc_drvbpf; 166 167 union { 168 struct rtwn_rx_radiotap_header th; 169 uint8_t pad[64]; 170 } sc_rxtapu; 171 #define sc_rxtap sc_rxtapu.th 172 int sc_rxtap_len; 173 174 union { 175 struct rtwn_tx_radiotap_header th; 176 uint8_t pad[64]; 177 } sc_txtapu; 178 #define sc_txtap sc_txtapu.th 179 int sc_txtap_len; 180 #endif 181 }; 182 183 #ifdef RTWN_DEBUG 184 #define DPRINTF(x) do { if (rtwn_debug) printf x; } while (0) 185 #define DPRINTFN(n, x) do { if (rtwn_debug >= (n)) printf x; } while (0) 186 extern int rtwn_debug; 187 #else 188 #define DPRINTF(x) 189 #define DPRINTFN(n, x) 190 #endif 191 192 /* 193 * PCI configuration space registers. 194 */ 195 #define RTWN_PCI_IOBA 0x10 /* i/o mapped base */ 196 #define RTWN_PCI_MMBA 0x18 /* memory mapped base */ 197 198 static const struct pci_matchid rtwn_pci_devices[] = { 199 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8188 } 200 }; 201 202 int rtwn_pci_match(struct device *, void *, void *); 203 void rtwn_pci_attach(struct device *, struct device *, void *); 204 int rtwn_pci_detach(struct device *, int); 205 int rtwn_pci_activate(struct device *, int); 206 int rtwn_alloc_rx_list(struct rtwn_pci_softc *); 207 void rtwn_reset_rx_list(struct rtwn_pci_softc *); 208 void rtwn_free_rx_list(struct rtwn_pci_softc *); 209 void rtwn_setup_rx_desc(struct rtwn_pci_softc *, 210 struct r92c_rx_desc_pci *, bus_addr_t, size_t, int); 211 int rtwn_alloc_tx_list(struct rtwn_pci_softc *, int); 212 void rtwn_reset_tx_list(struct rtwn_pci_softc *, int); 213 void rtwn_free_tx_list(struct rtwn_pci_softc *, int); 214 void rtwn_pci_write_1(void *, uint16_t, uint8_t); 215 void rtwn_pci_write_2(void *, uint16_t, uint16_t); 216 void rtwn_pci_write_4(void *, uint16_t, uint32_t); 217 uint8_t rtwn_pci_read_1(void *, uint16_t); 218 uint16_t rtwn_pci_read_2(void *, uint16_t); 219 uint32_t rtwn_pci_read_4(void *, uint16_t); 220 void rtwn_rx_frame(struct rtwn_pci_softc *, 221 struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int); 222 int rtwn_tx(void *, struct mbuf *, struct ieee80211_node *); 223 void rtwn_tx_done(struct rtwn_pci_softc *, int); 224 int rtwn_alloc_buffers(void *); 225 int rtwn_pci_init(void *); 226 void rtwn_pci_stop(void *); 227 int rtwn_intr(void *); 228 int rtwn_is_oactive(void *); 229 int rtwn_power_on(void *); 230 int rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t); 231 int rtwn_llt_init(struct rtwn_pci_softc *); 232 int rtwn_dma_init(void *); 233 int rtwn_fw_loadpage(void *, int, uint8_t *, int); 234 int rtwn_pci_load_firmware(void *, u_char **, size_t *); 235 void rtwn_mac_init(void *); 236 void rtwn_bb_init(void *); 237 void rtwn_calib_to(void *); 238 void rtwn_next_calib(void *); 239 void rtwn_cancel_calib(void *); 240 void rtwn_scan_to(void *); 241 void rtwn_pci_next_scan(void *); 242 void rtwn_cancel_scan(void *); 243 void rtwn_wait_async(void *); 244 245 /* Aliases. */ 246 #define rtwn_bb_write rtwn_pci_write_4 247 #define rtwn_bb_read rtwn_pci_read_4 248 249 struct cfdriver rtwn_cd = { 250 NULL, "rtwn", DV_IFNET 251 }; 252 253 const struct cfattach rtwn_pci_ca = { 254 sizeof(struct rtwn_pci_softc), 255 rtwn_pci_match, 256 rtwn_pci_attach, 257 rtwn_pci_detach, 258 rtwn_pci_activate 259 }; 260 261 int 262 rtwn_pci_match(struct device *parent, void *match, void *aux) 263 { 264 return (pci_matchbyid(aux, rtwn_pci_devices, 265 nitems(rtwn_pci_devices))); 266 } 267 268 void 269 rtwn_pci_attach(struct device *parent, struct device *self, void *aux) 270 { 271 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self; 272 struct pci_attach_args *pa = aux; 273 struct ifnet *ifp; 274 int i, error; 275 pcireg_t memtype; 276 pci_intr_handle_t ih; 277 const char *intrstr; 278 279 sc->sc_dmat = pa->pa_dmat; 280 sc->sc_pc = pa->pa_pc; 281 sc->sc_tag = pa->pa_tag; 282 283 timeout_set(&sc->calib_to, rtwn_calib_to, sc); 284 timeout_set(&sc->scan_to, rtwn_scan_to, sc); 285 286 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 287 288 /* Map control/status registers. */ 289 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA); 290 error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st, 291 &sc->sc_sh, NULL, &sc->sc_mapsize, 0); 292 if (error != 0) { 293 printf(": can't map mem space\n"); 294 return; 295 } 296 297 if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) { 298 printf(": can't map interrupt\n"); 299 return; 300 } 301 intrstr = pci_intr_string(sc->sc_pc, ih); 302 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET, 303 rtwn_intr, sc, sc->sc_dev.dv_xname); 304 if (sc->sc_ih == NULL) { 305 printf(": can't establish interrupt"); 306 if (intrstr != NULL) 307 printf(" at %s", intrstr); 308 printf("\n"); 309 return; 310 } 311 printf(": %s\n", intrstr); 312 313 /* Disable PCIe Active State Power Management (ASPM). */ 314 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 315 &sc->sc_cap_off, NULL)) { 316 uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag, 317 sc->sc_cap_off + PCI_PCIE_LCSR); 318 lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1); 319 pci_conf_write(sc->sc_pc, sc->sc_tag, 320 sc->sc_cap_off + PCI_PCIE_LCSR, lcsr); 321 } 322 323 /* Allocate Tx/Rx buffers. */ 324 error = rtwn_alloc_rx_list(sc); 325 if (error != 0) { 326 printf("%s: could not allocate Rx buffers\n", 327 sc->sc_dev.dv_xname); 328 return; 329 } 330 for (i = 0; i < RTWN_NTXQUEUES; i++) { 331 error = rtwn_alloc_tx_list(sc, i); 332 if (error != 0) { 333 printf("%s: could not allocate Tx buffers\n", 334 sc->sc_dev.dv_xname); 335 rtwn_free_rx_list(sc); 336 return; 337 } 338 } 339 340 /* Attach the bus-agnostic driver. */ 341 sc->sc_sc.sc_ops.cookie = sc; 342 sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1; 343 sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2; 344 sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4; 345 sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1; 346 sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2; 347 sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4; 348 sc->sc_sc.sc_ops.tx = rtwn_tx; 349 sc->sc_sc.sc_ops.power_on = rtwn_power_on; 350 sc->sc_sc.sc_ops.dma_init = rtwn_dma_init; 351 sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware; 352 sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage; 353 sc->sc_sc.sc_ops.mac_init = rtwn_mac_init; 354 sc->sc_sc.sc_ops.bb_init = rtwn_bb_init; 355 sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers; 356 sc->sc_sc.sc_ops.init = rtwn_pci_init; 357 sc->sc_sc.sc_ops.stop = rtwn_pci_stop; 358 sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive; 359 sc->sc_sc.sc_ops.next_calib = rtwn_next_calib; 360 sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib; 361 sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan; 362 sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan; 363 sc->sc_sc.sc_ops.wait_async = rtwn_wait_async; 364 error = rtwn_attach(&sc->sc_dev, &sc->sc_sc, 365 RTWN_CHIP_88C | RTWN_CHIP_PCI); 366 if (error != 0) { 367 rtwn_free_rx_list(sc); 368 for (i = 0; i < RTWN_NTXQUEUES; i++) 369 rtwn_free_tx_list(sc, i); 370 return; 371 } 372 373 /* ifp is now valid */ 374 ifp = &sc->sc_sc.sc_ic.ic_if; 375 #if NBPFILTER > 0 376 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 377 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 378 379 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 380 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 381 sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT); 382 383 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 384 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 385 sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT); 386 #endif 387 } 388 389 int 390 rtwn_pci_detach(struct device *self, int flags) 391 { 392 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 393 int s, i; 394 395 s = splnet(); 396 397 if (timeout_initialized(&sc->calib_to)) 398 timeout_del(&sc->calib_to); 399 if (timeout_initialized(&sc->scan_to)) 400 timeout_del(&sc->scan_to); 401 402 rtwn_detach(&sc->sc_sc, flags); 403 404 /* Free Tx/Rx buffers. */ 405 for (i = 0; i < RTWN_NTXQUEUES; i++) 406 rtwn_free_tx_list(sc, i); 407 rtwn_free_rx_list(sc); 408 splx(s); 409 410 return (0); 411 } 412 413 int 414 rtwn_pci_activate(struct device *self, int act) 415 { 416 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 417 418 return rtwn_activate(&sc->sc_sc, act); 419 } 420 421 void 422 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc, 423 bus_addr_t addr, size_t len, int idx) 424 { 425 memset(desc, 0, sizeof(*desc)); 426 desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) | 427 ((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0)); 428 desc->rxbufaddr = htole32(addr); 429 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 430 BUS_SPACE_BARRIER_WRITE); 431 desc->rxdw0 |= htole32(R92C_RXDW0_OWN); 432 } 433 434 int 435 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc) 436 { 437 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 438 struct rtwn_rx_data *rx_data; 439 size_t size; 440 int i, error = 0; 441 442 /* Allocate Rx descriptors. */ 443 size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT; 444 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 445 &rx_ring->map); 446 if (error != 0) { 447 printf("%s: could not create rx desc DMA map\n", 448 sc->sc_dev.dv_xname); 449 rx_ring->map = NULL; 450 goto fail; 451 } 452 453 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1, 454 &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 455 if (error != 0) { 456 printf("%s: could not allocate rx desc\n", 457 sc->sc_dev.dv_xname); 458 goto fail; 459 } 460 461 error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs, 462 size, (caddr_t *)&rx_ring->desc, 463 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 464 if (error != 0) { 465 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs); 466 rx_ring->desc = NULL; 467 printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname); 468 goto fail; 469 } 470 471 error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg, 472 1, size, BUS_DMA_NOWAIT); 473 if (error != 0) { 474 printf("%s: could not load rx desc\n", 475 sc->sc_dev.dv_xname); 476 goto fail; 477 } 478 479 bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size, 480 BUS_DMASYNC_PREWRITE); 481 482 /* Allocate Rx buffers. */ 483 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 484 rx_data = &rx_ring->rx_data[i]; 485 486 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 487 0, BUS_DMA_NOWAIT, &rx_data->map); 488 if (error != 0) { 489 printf("%s: could not create rx buf DMA map\n", 490 sc->sc_dev.dv_xname); 491 goto fail; 492 } 493 494 rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 495 if (rx_data->m == NULL) { 496 printf("%s: could not allocate rx mbuf\n", 497 sc->sc_dev.dv_xname); 498 error = ENOMEM; 499 goto fail; 500 } 501 502 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 503 mtod(rx_data->m, void *), MCLBYTES, NULL, 504 BUS_DMA_NOWAIT | BUS_DMA_READ); 505 if (error != 0) { 506 printf("%s: could not load rx buf DMA map\n", 507 sc->sc_dev.dv_xname); 508 goto fail; 509 } 510 511 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 512 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 513 } 514 fail: if (error != 0) 515 rtwn_free_rx_list(sc); 516 return (error); 517 } 518 519 void 520 rtwn_reset_rx_list(struct rtwn_pci_softc *sc) 521 { 522 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 523 struct rtwn_rx_data *rx_data; 524 int i; 525 526 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 527 rx_data = &rx_ring->rx_data[i]; 528 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 529 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 530 } 531 } 532 533 void 534 rtwn_free_rx_list(struct rtwn_pci_softc *sc) 535 { 536 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 537 struct rtwn_rx_data *rx_data; 538 int i, s; 539 540 s = splnet(); 541 542 if (rx_ring->map) { 543 if (rx_ring->desc) { 544 bus_dmamap_unload(sc->sc_dmat, rx_ring->map); 545 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc, 546 sizeof (struct r92c_rx_desc_pci) * 547 RTWN_RX_LIST_COUNT); 548 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, 549 rx_ring->nsegs); 550 rx_ring->desc = NULL; 551 } 552 bus_dmamap_destroy(sc->sc_dmat, rx_ring->map); 553 rx_ring->map = NULL; 554 } 555 556 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 557 rx_data = &rx_ring->rx_data[i]; 558 559 if (rx_data->m != NULL) { 560 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 561 m_freem(rx_data->m); 562 rx_data->m = NULL; 563 } 564 bus_dmamap_destroy(sc->sc_dmat, rx_data->map); 565 rx_data->map = NULL; 566 } 567 568 splx(s); 569 } 570 571 int 572 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid) 573 { 574 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 575 struct rtwn_tx_data *tx_data; 576 int i = 0, error = 0; 577 578 error = bus_dmamap_create(sc->sc_dmat, 579 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1, 580 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0, 581 BUS_DMA_NOWAIT, &tx_ring->map); 582 if (error != 0) { 583 printf("%s: could not create tx ring DMA map\n", 584 sc->sc_dev.dv_xname); 585 goto fail; 586 } 587 588 error = bus_dmamem_alloc(sc->sc_dmat, 589 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0, 590 &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 591 if (error != 0) { 592 printf("%s: could not allocate tx ring DMA memory\n", 593 sc->sc_dev.dv_xname); 594 goto fail; 595 } 596 597 error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs, 598 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 599 (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT); 600 if (error != 0) { 601 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 602 printf("%s: can't map tx ring DMA memory\n", 603 sc->sc_dev.dv_xname); 604 goto fail; 605 } 606 607 error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc, 608 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL, 609 BUS_DMA_NOWAIT); 610 if (error != 0) { 611 printf("%s: could not load tx ring DMA map\n", 612 sc->sc_dev.dv_xname); 613 goto fail; 614 } 615 616 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 617 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 618 619 /* setup tx desc */ 620 desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr 621 + sizeof(struct r92c_tx_desc_pci) 622 * ((i + 1) % RTWN_TX_LIST_COUNT)); 623 624 tx_data = &tx_ring->tx_data[i]; 625 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 626 0, BUS_DMA_NOWAIT, &tx_data->map); 627 if (error != 0) { 628 printf("%s: could not create tx buf DMA map\n", 629 sc->sc_dev.dv_xname); 630 goto fail; 631 } 632 tx_data->m = NULL; 633 tx_data->ni = NULL; 634 } 635 fail: 636 if (error != 0) 637 rtwn_free_tx_list(sc, qid); 638 return (error); 639 } 640 641 void 642 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid) 643 { 644 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 645 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 646 int i; 647 648 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 649 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 650 struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i]; 651 652 memset(desc, 0, sizeof(*desc) - 653 (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) + 654 sizeof(desc->nextdescaddr))); 655 656 if (tx_data->m != NULL) { 657 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 658 m_freem(tx_data->m); 659 tx_data->m = NULL; 660 ieee80211_release_node(ic, tx_data->ni); 661 tx_data->ni = NULL; 662 } 663 } 664 665 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 666 BUS_DMASYNC_POSTWRITE); 667 668 sc->qfullmsk &= ~(1 << qid); 669 tx_ring->queued = 0; 670 tx_ring->cur = 0; 671 } 672 673 void 674 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid) 675 { 676 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 677 struct rtwn_tx_data *tx_data; 678 int i; 679 680 if (tx_ring->map != NULL) { 681 if (tx_ring->desc != NULL) { 682 bus_dmamap_unload(sc->sc_dmat, tx_ring->map); 683 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc, 684 sizeof (struct r92c_tx_desc_pci) * 685 RTWN_TX_LIST_COUNT); 686 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 687 } 688 bus_dmamap_destroy(sc->sc_dmat, tx_ring->map); 689 } 690 691 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 692 tx_data = &tx_ring->tx_data[i]; 693 694 if (tx_data->m != NULL) { 695 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 696 m_freem(tx_data->m); 697 tx_data->m = NULL; 698 } 699 bus_dmamap_destroy(sc->sc_dmat, tx_data->map); 700 } 701 702 sc->qfullmsk &= ~(1 << qid); 703 tx_ring->queued = 0; 704 tx_ring->cur = 0; 705 } 706 707 void 708 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val) 709 { 710 struct rtwn_pci_softc *sc = cookie; 711 712 bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val); 713 } 714 715 void 716 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val) 717 { 718 struct rtwn_pci_softc *sc = cookie; 719 720 val = htole16(val); 721 bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val); 722 } 723 724 void 725 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val) 726 { 727 struct rtwn_pci_softc *sc = cookie; 728 729 val = htole32(val); 730 bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val); 731 } 732 733 uint8_t 734 rtwn_pci_read_1(void *cookie, uint16_t addr) 735 { 736 struct rtwn_pci_softc *sc = cookie; 737 738 return bus_space_read_1(sc->sc_st, sc->sc_sh, addr); 739 } 740 741 uint16_t 742 rtwn_pci_read_2(void *cookie, uint16_t addr) 743 { 744 struct rtwn_pci_softc *sc = cookie; 745 uint16_t val; 746 747 val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr); 748 return le16toh(val); 749 } 750 751 uint32_t 752 rtwn_pci_read_4(void *cookie, uint16_t addr) 753 { 754 struct rtwn_pci_softc *sc = cookie; 755 uint32_t val; 756 757 val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr); 758 return le32toh(val); 759 } 760 761 void 762 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc, 763 struct rtwn_rx_data *rx_data, int desc_idx) 764 { 765 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 766 struct ifnet *ifp = &ic->ic_if; 767 struct ieee80211_rxinfo rxi; 768 struct ieee80211_frame *wh; 769 struct ieee80211_node *ni; 770 struct r92c_rx_phystat *phy = NULL; 771 uint32_t rxdw0, rxdw3; 772 struct mbuf *m, *m1; 773 uint8_t rate; 774 int8_t rssi = 0; 775 int infosz, pktlen, shift, error; 776 777 rxdw0 = letoh32(rx_desc->rxdw0); 778 rxdw3 = letoh32(rx_desc->rxdw3); 779 780 if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) { 781 /* 782 * This should not happen since we setup our Rx filter 783 * to not receive these frames. 784 */ 785 ifp->if_ierrors++; 786 return; 787 } 788 789 pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); 790 if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) { 791 ifp->if_ierrors++; 792 return; 793 } 794 795 rate = MS(rxdw3, R92C_RXDW3_RATE); 796 infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; 797 if (infosz > sizeof(struct r92c_rx_phystat)) 798 infosz = sizeof(struct r92c_rx_phystat); 799 shift = MS(rxdw0, R92C_RXDW0_SHIFT); 800 801 /* Get RSSI from PHY status descriptor if present. */ 802 if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { 803 phy = mtod(rx_data->m, struct r92c_rx_phystat *); 804 rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy); 805 /* Update our average RSSI. */ 806 rtwn_update_avgrssi(&sc->sc_sc, rate, rssi); 807 } 808 809 DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n", 810 pktlen, rate, infosz, shift, rssi)); 811 812 m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 813 if (m1 == NULL) { 814 ifp->if_ierrors++; 815 return; 816 } 817 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 818 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 819 mtod(m1, void *), MCLBYTES, NULL, 820 BUS_DMA_NOWAIT | BUS_DMA_READ); 821 if (error != 0) { 822 m_freem(m1); 823 824 if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, 825 rx_data->m, BUS_DMA_NOWAIT)) 826 panic("%s: could not load old RX mbuf", 827 sc->sc_dev.dv_xname); 828 829 /* Physical address may have changed. */ 830 rtwn_setup_rx_desc(sc, rx_desc, 831 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx); 832 833 ifp->if_ierrors++; 834 return; 835 } 836 837 /* Finalize mbuf. */ 838 m = rx_data->m; 839 rx_data->m = m1; 840 m->m_pkthdr.len = m->m_len = pktlen + infosz + shift; 841 842 /* Update RX descriptor. */ 843 rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr, 844 MCLBYTES, desc_idx); 845 846 /* Get ieee80211 frame header. */ 847 if (rxdw0 & R92C_RXDW0_PHYST) 848 m_adj(m, infosz + shift); 849 else 850 m_adj(m, shift); 851 wh = mtod(m, struct ieee80211_frame *); 852 853 #if NBPFILTER > 0 854 if (__predict_false(sc->sc_drvbpf != NULL)) { 855 struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap; 856 struct mbuf mb; 857 858 tap->wr_flags = 0; 859 /* Map HW rate index to 802.11 rate. */ 860 tap->wr_flags = 2; 861 if (!(rxdw3 & R92C_RXDW3_HT)) { 862 switch (rate) { 863 /* CCK. */ 864 case 0: tap->wr_rate = 2; break; 865 case 1: tap->wr_rate = 4; break; 866 case 2: tap->wr_rate = 11; break; 867 case 3: tap->wr_rate = 22; break; 868 /* OFDM. */ 869 case 4: tap->wr_rate = 12; break; 870 case 5: tap->wr_rate = 18; break; 871 case 6: tap->wr_rate = 24; break; 872 case 7: tap->wr_rate = 36; break; 873 case 8: tap->wr_rate = 48; break; 874 case 9: tap->wr_rate = 72; break; 875 case 10: tap->wr_rate = 96; break; 876 case 11: tap->wr_rate = 108; break; 877 } 878 } else if (rate >= 12) { /* MCS0~15. */ 879 /* Bit 7 set means HT MCS instead of rate. */ 880 tap->wr_rate = 0x80 | (rate - 12); 881 } 882 tap->wr_dbm_antsignal = rssi; 883 tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq); 884 tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags); 885 886 mb.m_data = (caddr_t)tap; 887 mb.m_len = sc->sc_rxtap_len; 888 mb.m_next = m; 889 mb.m_nextpkt = NULL; 890 mb.m_type = 0; 891 mb.m_flags = 0; 892 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 893 } 894 #endif 895 896 ni = ieee80211_find_rxnode(ic, wh); 897 rxi.rxi_flags = 0; 898 rxi.rxi_rssi = rssi; 899 rxi.rxi_tstamp = 0; /* Unused. */ 900 ieee80211_input(ifp, m, ni, &rxi); 901 /* Node is no longer needed. */ 902 ieee80211_release_node(ic, ni); 903 } 904 905 int 906 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni) 907 { 908 struct rtwn_pci_softc *sc = cookie; 909 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 910 struct ieee80211_frame *wh; 911 struct ieee80211_key *k = NULL; 912 struct rtwn_tx_ring *tx_ring; 913 struct rtwn_tx_data *data; 914 struct r92c_tx_desc_pci *txd; 915 uint16_t qos; 916 uint8_t raid, type, tid, qid; 917 int hasqos, error; 918 919 wh = mtod(m, struct ieee80211_frame *); 920 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 921 922 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 923 k = ieee80211_get_txkey(ic, wh, ni); 924 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 925 return (ENOBUFS); 926 wh = mtod(m, struct ieee80211_frame *); 927 } 928 929 if ((hasqos = ieee80211_has_qos(wh))) { 930 qos = ieee80211_get_qos(wh); 931 tid = qos & IEEE80211_QOS_TID; 932 qid = ieee80211_up_to_ac(ic, tid); 933 } else if (type != IEEE80211_FC0_TYPE_DATA) { 934 qid = RTWN_VO_QUEUE; 935 } else 936 qid = RTWN_BE_QUEUE; 937 938 /* Grab a Tx buffer from the ring. */ 939 tx_ring = &sc->tx_ring[qid]; 940 data = &tx_ring->tx_data[tx_ring->cur]; 941 if (data->m != NULL) { 942 m_freem(m); 943 return (ENOBUFS); 944 } 945 946 /* Fill Tx descriptor. */ 947 txd = &tx_ring->desc[tx_ring->cur]; 948 if (htole32(txd->txdw0) & R92C_RXDW0_OWN) { 949 m_freem(m); 950 return (ENOBUFS); 951 } 952 txd->txdw0 = htole32( 953 SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) | 954 SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | 955 R92C_TXDW0_FSG | R92C_TXDW0_LSG); 956 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 957 txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); 958 959 txd->txdw1 = 0; 960 #ifdef notyet 961 if (k != NULL) { 962 switch (k->k_cipher) { 963 case IEEE80211_CIPHER_WEP40: 964 case IEEE80211_CIPHER_WEP104: 965 case IEEE80211_CIPHER_TKIP: 966 cipher = R92C_TXDW1_CIPHER_RC4; 967 break; 968 case IEEE80211_CIPHER_CCMP: 969 cipher = R92C_TXDW1_CIPHER_AES; 970 break; 971 default: 972 cipher = R92C_TXDW1_CIPHER_NONE; 973 } 974 txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); 975 } 976 #endif 977 txd->txdw4 = 0; 978 txd->txdw5 = 0; 979 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 980 type == IEEE80211_FC0_TYPE_DATA) { 981 if (ic->ic_curmode == IEEE80211_MODE_11B || 982 (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B)) 983 raid = R92C_RAID_11B; 984 else 985 raid = R92C_RAID_11BG; 986 txd->txdw1 |= htole32( 987 SM(R92C_TXDW1_MACID, R92C_MACID_BSS) | 988 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) | 989 SM(R92C_TXDW1_RAID, raid) | 990 R92C_TXDW1_AGGBK); 991 992 if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 993 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 994 R92C_TXDW4_HWRTSEN); 995 } else if (ic->ic_flags & IEEE80211_F_USEPROT) { 996 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 997 txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF | 998 R92C_TXDW4_HWRTSEN); 999 } else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1000 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 1001 R92C_TXDW4_HWRTSEN); 1002 } 1003 } 1004 /* Send RTS at OFDM24. */ 1005 txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8)); 1006 txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf)); 1007 /* Send data at OFDM54. */ 1008 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 11)); 1009 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f)); 1010 1011 } else { 1012 txd->txdw1 |= htole32( 1013 SM(R92C_TXDW1_MACID, 0) | 1014 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) | 1015 SM(R92C_TXDW1_RAID, R92C_RAID_11B)); 1016 1017 /* Force CCK1. */ 1018 txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); 1019 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0)); 1020 } 1021 /* Set sequence number (already little endian). */ 1022 txd->txdseq = *(uint16_t *)wh->i_seq; 1023 1024 if (!hasqos) { 1025 /* Use HW sequence numbering for non-QoS frames. */ 1026 txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ); 1027 txd->txdseq |= htole16(0x8000); /* WTF? */ 1028 } else 1029 txd->txdw4 |= htole32(R92C_TXDW4_QOS); 1030 1031 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1032 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1033 if (error && error != EFBIG) { 1034 printf("%s: can't map mbuf (error %d)\n", 1035 sc->sc_dev.dv_xname, error); 1036 m_freem(m); 1037 return error; 1038 } 1039 if (error != 0) { 1040 /* Too many DMA segments, linearize mbuf. */ 1041 if (m_defrag(m, M_DONTWAIT)) { 1042 m_freem(m); 1043 return ENOBUFS; 1044 } 1045 1046 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1047 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1048 if (error != 0) { 1049 printf("%s: can't map mbuf (error %d)\n", 1050 sc->sc_dev.dv_xname, error); 1051 m_freem(m); 1052 return error; 1053 } 1054 } 1055 1056 txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr); 1057 txd->txbufsize = htole16(m->m_pkthdr.len); 1058 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 1059 BUS_SPACE_BARRIER_WRITE); 1060 txd->txdw0 |= htole32(R92C_TXDW0_OWN); 1061 1062 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1063 BUS_DMASYNC_POSTWRITE); 1064 bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES, 1065 BUS_DMASYNC_POSTWRITE); 1066 1067 data->m = m; 1068 data->ni = ni; 1069 1070 #if NBPFILTER > 0 1071 if (__predict_false(sc->sc_drvbpf != NULL)) { 1072 struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap; 1073 struct mbuf mb; 1074 1075 tap->wt_flags = 0; 1076 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1077 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1078 1079 mb.m_data = (caddr_t)tap; 1080 mb.m_len = sc->sc_txtap_len; 1081 mb.m_next = m; 1082 mb.m_nextpkt = NULL; 1083 mb.m_type = 0; 1084 mb.m_flags = 0; 1085 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1086 } 1087 #endif 1088 1089 tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT; 1090 tx_ring->queued++; 1091 1092 if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1)) 1093 sc->qfullmsk |= (1 << qid); 1094 1095 /* Kick TX. */ 1096 rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid)); 1097 1098 return (0); 1099 } 1100 1101 void 1102 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid) 1103 { 1104 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 1105 struct ifnet *ifp = &ic->ic_if; 1106 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 1107 struct rtwn_tx_data *tx_data; 1108 struct r92c_tx_desc_pci *tx_desc; 1109 int i; 1110 1111 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1112 BUS_DMASYNC_POSTREAD); 1113 1114 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 1115 tx_data = &tx_ring->tx_data[i]; 1116 if (tx_data->m == NULL) 1117 continue; 1118 1119 tx_desc = &tx_ring->desc[i]; 1120 if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN) 1121 continue; 1122 1123 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 1124 m_freem(tx_data->m); 1125 tx_data->m = NULL; 1126 ieee80211_release_node(ic, tx_data->ni); 1127 tx_data->ni = NULL; 1128 1129 ifp->if_opackets++; 1130 sc->sc_sc.sc_tx_timer = 0; 1131 tx_ring->queued--; 1132 } 1133 1134 if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1)) 1135 sc->qfullmsk &= ~(1 << qid); 1136 1137 if (sc->qfullmsk == 0) { 1138 ifq_clr_oactive(&ifp->if_snd); 1139 (*ifp->if_start)(ifp); 1140 } 1141 } 1142 1143 int 1144 rtwn_alloc_buffers(void *cookie) 1145 { 1146 /* Tx/Rx buffers were already allocated in rtwn_pci_attach() */ 1147 return (0); 1148 } 1149 1150 int 1151 rtwn_pci_init(void *cookie) 1152 { 1153 /* nothing to do */ 1154 return (0); 1155 } 1156 1157 void 1158 rtwn_pci_stop(void *cookie) 1159 { 1160 struct rtwn_pci_softc *sc = cookie; 1161 uint16_t reg; 1162 int i, s; 1163 1164 s = splnet(); 1165 1166 /* Disable interrupts. */ 1167 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1168 1169 /* Stop hardware. */ 1170 rtwn_pci_write_1(sc, R92C_TXPAUSE, 0xff); 1171 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1172 reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN); 1173 reg |= R92C_SYS_FUNC_EN_BB_GLB_RST; 1174 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1175 reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST; 1176 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1177 reg = rtwn_pci_read_2(sc, R92C_CR); 1178 reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1179 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1180 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1181 R92C_CR_ENSEC); 1182 rtwn_pci_write_2(sc, R92C_CR, reg); 1183 if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) 1184 rtwn_fw_reset(&sc->sc_sc); 1185 /* TODO: linux does additional btcoex stuff here */ 1186 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */ 1187 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */ 1188 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */ 1189 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x0e); 1190 rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN); 1191 1192 for (i = 0; i < RTWN_NTXQUEUES; i++) 1193 rtwn_reset_tx_list(sc, i); 1194 rtwn_reset_rx_list(sc); 1195 1196 splx(s); 1197 } 1198 1199 int 1200 rtwn_intr(void *xsc) 1201 { 1202 struct rtwn_pci_softc *sc = xsc; 1203 u_int32_t status; 1204 int i; 1205 1206 status = rtwn_pci_read_4(sc, R92C_HISR); 1207 if (status == 0 || status == 0xffffffff) 1208 return (0); 1209 1210 /* Disable interrupts. */ 1211 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1212 1213 /* Ack interrupts. */ 1214 rtwn_pci_write_4(sc, R92C_HISR, status); 1215 1216 /* Vendor driver treats RX errors like ROK... */ 1217 if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) { 1218 bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0, 1219 sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT, 1220 BUS_DMASYNC_POSTREAD); 1221 1222 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 1223 struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i]; 1224 struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i]; 1225 1226 if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN) 1227 continue; 1228 1229 rtwn_rx_frame(sc, rx_desc, rx_data, i); 1230 } 1231 } 1232 1233 if (status & R92C_IMR_BDOK) 1234 rtwn_tx_done(sc, RTWN_BEACON_QUEUE); 1235 if (status & R92C_IMR_HIGHDOK) 1236 rtwn_tx_done(sc, RTWN_HIGH_QUEUE); 1237 if (status & R92C_IMR_MGNTDOK) 1238 rtwn_tx_done(sc, RTWN_MGNT_QUEUE); 1239 if (status & R92C_IMR_BKDOK) 1240 rtwn_tx_done(sc, RTWN_BK_QUEUE); 1241 if (status & R92C_IMR_BEDOK) 1242 rtwn_tx_done(sc, RTWN_BE_QUEUE); 1243 if (status & R92C_IMR_VIDOK) 1244 rtwn_tx_done(sc, RTWN_VI_QUEUE); 1245 if (status & R92C_IMR_VODOK) 1246 rtwn_tx_done(sc, RTWN_VO_QUEUE); 1247 1248 /* Enable interrupts. */ 1249 rtwn_pci_write_4(sc, R92C_HIMR, RTWN_INT_ENABLE); 1250 1251 return (1); 1252 } 1253 1254 int 1255 rtwn_is_oactive(void *cookie) 1256 { 1257 struct rtwn_pci_softc *sc = cookie; 1258 1259 return (sc->qfullmsk != 0); 1260 } 1261 1262 int 1263 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data) 1264 { 1265 int ntries; 1266 1267 rtwn_pci_write_4(sc, R92C_LLT_INIT, 1268 SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | 1269 SM(R92C_LLT_INIT_ADDR, addr) | 1270 SM(R92C_LLT_INIT_DATA, data)); 1271 /* Wait for write operation to complete. */ 1272 for (ntries = 0; ntries < 20; ntries++) { 1273 if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == 1274 R92C_LLT_INIT_OP_NO_ACTIVE) 1275 return (0); 1276 DELAY(5); 1277 } 1278 return (ETIMEDOUT); 1279 } 1280 1281 int 1282 rtwn_llt_init(struct rtwn_pci_softc *sc) 1283 { 1284 int i, error; 1285 1286 /* Reserve pages [0; R92C_TX_PAGE_COUNT]. */ 1287 for (i = 0; i < R92C_TX_PAGE_COUNT; i++) { 1288 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1289 return (error); 1290 } 1291 /* NB: 0xff indicates end-of-list. */ 1292 if ((error = rtwn_llt_write(sc, i, 0xff)) != 0) 1293 return (error); 1294 /* 1295 * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1] 1296 * as ring buffer. 1297 */ 1298 for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) { 1299 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1300 return (error); 1301 } 1302 /* Make the last page point to the beginning of the ring buffer. */ 1303 error = rtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1); 1304 return (error); 1305 } 1306 1307 int 1308 rtwn_power_on(void *cookie) 1309 { 1310 struct rtwn_pci_softc *sc = cookie; 1311 uint32_t reg; 1312 int ntries; 1313 1314 /* Wait for autoload done bit. */ 1315 for (ntries = 0; ntries < 1000; ntries++) { 1316 if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) & 1317 R92C_APS_FSMCO_PFM_ALDN) 1318 break; 1319 DELAY(5); 1320 } 1321 if (ntries == 1000) { 1322 printf("%s: timeout waiting for chip autoload\n", 1323 sc->sc_dev.dv_xname); 1324 return (ETIMEDOUT); 1325 } 1326 1327 /* Unlock ISO/CLK/Power control register. */ 1328 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0); 1329 1330 /* TODO: check if we need this for 8188CE */ 1331 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1332 /* bt coex */ 1333 reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO); 1334 reg |= (R92C_APS_FSMCO_SOP_ABG | 1335 R92C_APS_FSMCO_SOP_AMB | 1336 R92C_APS_FSMCO_XOP_BTCK); 1337 rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg); 1338 } 1339 1340 /* Move SPS into PWM mode. */ 1341 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b); 1342 DELAY(100); 1343 1344 /* Set low byte to 0x0f, leave others unchanged. */ 1345 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, 1346 (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f); 1347 1348 /* TODO: check if we need this for 8188CE */ 1349 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1350 /* bt coex */ 1351 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL); 1352 reg &= (~0x00024800); /* XXX magic from linux */ 1353 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg); 1354 } 1355 1356 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1357 (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) | 1358 R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR); 1359 DELAY(200); 1360 1361 /* TODO: linux does additional btcoex stuff here */ 1362 1363 /* Auto enable WLAN. */ 1364 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1365 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); 1366 for (ntries = 0; ntries < 1000; ntries++) { 1367 if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1368 R92C_APS_FSMCO_APFM_ONMAC)) 1369 break; 1370 DELAY(5); 1371 } 1372 if (ntries == 1000) { 1373 printf("%s: timeout waiting for MAC auto ON\n", 1374 sc->sc_dev.dv_xname); 1375 return (ETIMEDOUT); 1376 } 1377 1378 /* Enable radio, GPIO and LED functions. */ 1379 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1380 R92C_APS_FSMCO_AFSM_PCIE | 1381 R92C_APS_FSMCO_PDN_EN | 1382 R92C_APS_FSMCO_PFM_ALDN); 1383 /* Release RF digital isolation. */ 1384 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1385 rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); 1386 1387 if (sc->sc_sc.chip & RTWN_CHIP_92C) 1388 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77); 1389 else 1390 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22); 1391 1392 rtwn_pci_write_4(sc, R92C_INT_MIG, 0); 1393 1394 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1395 /* bt coex */ 1396 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2); 1397 reg &= 0xfd; /* XXX magic from linux */ 1398 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg); 1399 } 1400 1401 rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG, 1402 rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL); 1403 1404 reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL); 1405 if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) { 1406 printf("%s: radio is disabled by hardware switch\n", 1407 sc->sc_dev.dv_xname); 1408 return (EPERM); /* :-) */ 1409 } 1410 1411 /* Initialize MAC. */ 1412 reg = rtwn_pci_read_1(sc, R92C_APSD_CTRL); 1413 rtwn_pci_write_1(sc, R92C_APSD_CTRL, 1414 rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); 1415 for (ntries = 0; ntries < 200; ntries++) { 1416 if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) & 1417 R92C_APSD_CTRL_OFF_STATUS)) 1418 break; 1419 DELAY(500); 1420 } 1421 if (ntries == 200) { 1422 printf("%s: timeout waiting for MAC initialization\n", 1423 sc->sc_dev.dv_xname); 1424 return (ETIMEDOUT); 1425 } 1426 1427 /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ 1428 reg = rtwn_pci_read_2(sc, R92C_CR); 1429 reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1430 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1431 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1432 R92C_CR_ENSEC; 1433 rtwn_pci_write_2(sc, R92C_CR, reg); 1434 1435 rtwn_pci_write_1(sc, 0xfe10, 0x19); 1436 1437 return (0); 1438 } 1439 1440 int 1441 rtwn_dma_init(void *cookie) 1442 { 1443 struct rtwn_pci_softc *sc = cookie; 1444 uint32_t reg; 1445 int error; 1446 1447 /* Initialize LLT table. */ 1448 error = rtwn_llt_init(sc); 1449 if (error != 0) 1450 return error; 1451 1452 /* Set number of pages for normal priority queue. */ 1453 rtwn_pci_write_2(sc, R92C_RQPN_NPQ, 0); 1454 rtwn_pci_write_4(sc, R92C_RQPN, 1455 /* Set number of pages for public queue. */ 1456 SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) | 1457 /* Set number of pages for high priority queue. */ 1458 SM(R92C_RQPN_HPQ, R92C_HPQ_NPAGES) | 1459 /* Set number of pages for low priority queue. */ 1460 SM(R92C_RQPN_LPQ, R92C_LPQ_NPAGES) | 1461 /* Load values. */ 1462 R92C_RQPN_LD); 1463 1464 rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY); 1465 rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY); 1466 rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, 1467 R92C_TX_PAGE_BOUNDARY); 1468 rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY); 1469 rtwn_pci_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY); 1470 1471 reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL); 1472 reg &= ~R92C_TRXDMA_CTRL_QMAP_M; 1473 reg |= 0xF771; 1474 rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg); 1475 1476 rtwn_pci_write_4(sc, R92C_TCR, 1477 R92C_TCR_CFENDFORM | (1 << 12) | (1 << 13)); 1478 1479 /* Configure Tx DMA. */ 1480 rtwn_pci_write_4(sc, R92C_BKQ_DESA, 1481 sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr); 1482 rtwn_pci_write_4(sc, R92C_BEQ_DESA, 1483 sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr); 1484 rtwn_pci_write_4(sc, R92C_VIQ_DESA, 1485 sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr); 1486 rtwn_pci_write_4(sc, R92C_VOQ_DESA, 1487 sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr); 1488 rtwn_pci_write_4(sc, R92C_BCNQ_DESA, 1489 sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr); 1490 rtwn_pci_write_4(sc, R92C_MGQ_DESA, 1491 sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr); 1492 rtwn_pci_write_4(sc, R92C_HQ_DESA, 1493 sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr); 1494 1495 /* Configure Rx DMA. */ 1496 rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr); 1497 1498 /* Set Tx/Rx transfer page boundary. */ 1499 rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff); 1500 1501 /* Set Tx/Rx transfer page size. */ 1502 rtwn_pci_write_1(sc, R92C_PBP, 1503 SM(R92C_PBP_PSRX, R92C_PBP_128) | 1504 SM(R92C_PBP_PSTX, R92C_PBP_128)); 1505 1506 return (0); 1507 } 1508 1509 int 1510 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len) 1511 { 1512 struct rtwn_pci_softc *sc = cookie; 1513 uint32_t reg; 1514 int off, mlen, error = 0, i; 1515 1516 reg = rtwn_pci_read_4(sc, R92C_MCUFWDL); 1517 reg = RW(reg, R92C_MCUFWDL_PAGE, page); 1518 rtwn_pci_write_4(sc, R92C_MCUFWDL, reg); 1519 1520 DELAY(5); 1521 1522 off = R92C_FW_START_ADDR; 1523 while (len > 0) { 1524 if (len > 196) 1525 mlen = 196; 1526 else if (len > 4) 1527 mlen = 4; 1528 else 1529 mlen = 1; 1530 for (i = 0; i < mlen; i++) 1531 rtwn_pci_write_1(sc, off++, buf[i]); 1532 buf += mlen; 1533 len -= mlen; 1534 } 1535 1536 return (error); 1537 } 1538 1539 int 1540 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len) 1541 { 1542 struct rtwn_pci_softc *sc = cookie; 1543 const char *name; 1544 int error; 1545 1546 if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) == 1547 RTWN_CHIP_UMC_A_CUT) 1548 name = "rtwn-rtl8192cfwU"; 1549 else 1550 name = "rtwn-rtl8192cfwU_B"; 1551 1552 error = loadfirmware(name, fw, len); 1553 if (error) 1554 printf("%s: could not read firmware %s (error %d)\n", 1555 sc->sc_dev.dv_xname, name, error); 1556 return (error); 1557 } 1558 1559 void 1560 rtwn_mac_init(void *cookie) 1561 { 1562 struct rtwn_pci_softc *sc = cookie; 1563 int i; 1564 1565 /* Write MAC initialization values. */ 1566 for (i = 0; i < nitems(rtl8192ce_mac); i++) 1567 rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg, 1568 rtl8192ce_mac[i].val); 1569 } 1570 1571 void 1572 rtwn_bb_init(void *cookie) 1573 { 1574 struct rtwn_pci_softc *sc = cookie; 1575 const struct r92c_bb_prog *prog; 1576 uint32_t reg; 1577 int i; 1578 1579 /* Enable BB and RF. */ 1580 rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN, 1581 rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) | 1582 R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | 1583 R92C_SYS_FUNC_EN_DIO_RF); 1584 1585 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); 1586 1587 rtwn_pci_write_1(sc, R92C_RF_CTRL, 1588 R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); 1589 1590 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1591 R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA | 1592 R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST | 1593 R92C_SYS_FUNC_EN_BBRSTB); 1594 1595 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); 1596 1597 rtwn_pci_write_4(sc, R92C_LEDCFG0, 1598 rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000); 1599 1600 /* Select BB programming. */ 1601 prog = (sc->sc_sc.chip & RTWN_CHIP_92C) ? 1602 &rtl8192ce_bb_prog_2t : &rtl8192ce_bb_prog_1t; 1603 1604 /* Write BB initialization values. */ 1605 for (i = 0; i < prog->count; i++) { 1606 rtwn_bb_write(sc, prog->regs[i], prog->vals[i]); 1607 DELAY(1); 1608 } 1609 1610 if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) { 1611 /* 8192C 1T only configuration. */ 1612 reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO); 1613 reg = (reg & ~0x00000003) | 0x2; 1614 rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); 1615 1616 reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO); 1617 reg = (reg & ~0x00300033) | 0x00200022; 1618 rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); 1619 1620 reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING); 1621 reg = (reg & ~0xff000000) | 0x45 << 24; 1622 rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); 1623 1624 reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); 1625 reg = (reg & ~0x000000ff) | 0x23; 1626 rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); 1627 1628 reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); 1629 reg = (reg & ~0x00000030) | 1 << 4; 1630 rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); 1631 1632 reg = rtwn_bb_read(sc, 0xe74); 1633 reg = (reg & ~0x0c000000) | 2 << 26; 1634 rtwn_bb_write(sc, 0xe74, reg); 1635 reg = rtwn_bb_read(sc, 0xe78); 1636 reg = (reg & ~0x0c000000) | 2 << 26; 1637 rtwn_bb_write(sc, 0xe78, reg); 1638 reg = rtwn_bb_read(sc, 0xe7c); 1639 reg = (reg & ~0x0c000000) | 2 << 26; 1640 rtwn_bb_write(sc, 0xe7c, reg); 1641 reg = rtwn_bb_read(sc, 0xe80); 1642 reg = (reg & ~0x0c000000) | 2 << 26; 1643 rtwn_bb_write(sc, 0xe80, reg); 1644 reg = rtwn_bb_read(sc, 0xe88); 1645 reg = (reg & ~0x0c000000) | 2 << 26; 1646 rtwn_bb_write(sc, 0xe88, reg); 1647 } 1648 1649 /* Write AGC values. */ 1650 for (i = 0; i < prog->agccount; i++) { 1651 rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, 1652 prog->agcvals[i]); 1653 DELAY(1); 1654 } 1655 1656 if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & 1657 R92C_HSSI_PARAM2_CCK_HIPWR) 1658 sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR; 1659 } 1660 1661 void 1662 rtwn_calib_to(void *arg) 1663 { 1664 struct rtwn_pci_softc *sc = arg; 1665 1666 rtwn_calib(&sc->sc_sc); 1667 } 1668 1669 void 1670 rtwn_next_calib(void *cookie) 1671 { 1672 struct rtwn_pci_softc *sc = cookie; 1673 1674 timeout_add_sec(&sc->calib_to, 2); 1675 } 1676 1677 void 1678 rtwn_cancel_calib(void *cookie) 1679 { 1680 struct rtwn_pci_softc *sc = cookie; 1681 1682 if (timeout_initialized(&sc->calib_to)) 1683 timeout_del(&sc->calib_to); 1684 } 1685 1686 void 1687 rtwn_scan_to(void *arg) 1688 { 1689 struct rtwn_pci_softc *sc = arg; 1690 1691 rtwn_next_scan(&sc->sc_sc); 1692 } 1693 1694 void 1695 rtwn_pci_next_scan(void *cookie) 1696 { 1697 struct rtwn_pci_softc *sc = cookie; 1698 1699 timeout_add_msec(&sc->scan_to, 200); 1700 } 1701 1702 void 1703 rtwn_cancel_scan(void *cookie) 1704 { 1705 struct rtwn_pci_softc *sc = cookie; 1706 1707 if (timeout_initialized(&sc->scan_to)) 1708 timeout_del(&sc->scan_to); 1709 } 1710 1711 void 1712 rtwn_wait_async(void *cookie) 1713 { 1714 /* nothing to do */ 1715 } 1716