1 /* $OpenBSD: if_rtwn.c,v 1.23 2016/07/21 08:38:33 stsp Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * PCI front-end for Realtek RTL8188CE driver. 22 */ 23 24 #include "bpfilter.h" 25 26 #include <sys/param.h> 27 #include <sys/sockio.h> 28 #include <sys/mbuf.h> 29 #include <sys/kernel.h> 30 #include <sys/socket.h> 31 #include <sys/systm.h> 32 #include <sys/task.h> 33 #include <sys/timeout.h> 34 #include <sys/conf.h> 35 #include <sys/device.h> 36 #include <sys/endian.h> 37 38 #include <machine/bus.h> 39 #include <machine/intr.h> 40 41 #if NBPFILTER > 0 42 #include <net/bpf.h> 43 #endif 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #include <netinet/in.h> 49 #include <netinet/if_ether.h> 50 51 #include <net80211/ieee80211_var.h> 52 #include <net80211/ieee80211_radiotap.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 #include <dev/pci/pcidevs.h> 57 58 #include <dev/ic/r92creg.h> 59 #include <dev/ic/rtwnvar.h> 60 61 /* 62 * Driver definitions. 63 */ 64 65 #define R92C_PUBQ_NPAGES 176 66 #define R92C_HPQ_NPAGES 41 67 #define R92C_LPQ_NPAGES 28 68 #define R92C_TXPKTBUF_COUNT 256 69 #define R92C_TX_PAGE_COUNT \ 70 (R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES) 71 #define R92C_TX_PAGE_BOUNDARY (R92C_TX_PAGE_COUNT + 1) 72 73 #define RTWN_NTXQUEUES 9 74 #define RTWN_RX_LIST_COUNT 256 75 #define RTWN_TX_LIST_COUNT 256 76 77 /* TX queue indices. */ 78 #define RTWN_BK_QUEUE 0 79 #define RTWN_BE_QUEUE 1 80 #define RTWN_VI_QUEUE 2 81 #define RTWN_VO_QUEUE 3 82 #define RTWN_BEACON_QUEUE 4 83 #define RTWN_TXCMD_QUEUE 5 84 #define RTWN_MGNT_QUEUE 6 85 #define RTWN_HIGH_QUEUE 7 86 #define RTWN_HCCA_QUEUE 8 87 88 struct rtwn_rx_radiotap_header { 89 struct ieee80211_radiotap_header wr_ihdr; 90 uint8_t wr_flags; 91 uint8_t wr_rate; 92 uint16_t wr_chan_freq; 93 uint16_t wr_chan_flags; 94 uint8_t wr_dbm_antsignal; 95 } __packed; 96 97 #define RTWN_RX_RADIOTAP_PRESENT \ 98 (1 << IEEE80211_RADIOTAP_FLAGS | \ 99 1 << IEEE80211_RADIOTAP_RATE | \ 100 1 << IEEE80211_RADIOTAP_CHANNEL | \ 101 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) 102 103 struct rtwn_tx_radiotap_header { 104 struct ieee80211_radiotap_header wt_ihdr; 105 uint8_t wt_flags; 106 uint16_t wt_chan_freq; 107 uint16_t wt_chan_flags; 108 } __packed; 109 110 #define RTWN_TX_RADIOTAP_PRESENT \ 111 (1 << IEEE80211_RADIOTAP_FLAGS | \ 112 1 << IEEE80211_RADIOTAP_CHANNEL) 113 114 struct rtwn_rx_data { 115 bus_dmamap_t map; 116 struct mbuf *m; 117 }; 118 119 struct rtwn_rx_ring { 120 struct r92c_rx_desc_pci *desc; 121 bus_dmamap_t map; 122 bus_dma_segment_t seg; 123 int nsegs; 124 struct rtwn_rx_data rx_data[RTWN_RX_LIST_COUNT]; 125 126 }; 127 struct rtwn_tx_data { 128 bus_dmamap_t map; 129 struct mbuf *m; 130 struct ieee80211_node *ni; 131 }; 132 133 struct rtwn_tx_ring { 134 bus_dmamap_t map; 135 bus_dma_segment_t seg; 136 int nsegs; 137 struct r92c_tx_desc_pci *desc; 138 struct rtwn_tx_data tx_data[RTWN_TX_LIST_COUNT]; 139 int queued; 140 int cur; 141 }; 142 143 struct rtwn_pci_softc { 144 struct device sc_dev; 145 struct rtwn_softc sc_sc; 146 147 struct rtwn_rx_ring rx_ring; 148 struct rtwn_tx_ring tx_ring[RTWN_NTXQUEUES]; 149 uint32_t qfullmsk; 150 151 struct timeout calib_to; 152 struct timeout scan_to; 153 154 /* PCI specific goo. */ 155 bus_dma_tag_t sc_dmat; 156 pci_chipset_tag_t sc_pc; 157 pcitag_t sc_tag; 158 void *sc_ih; 159 bus_space_tag_t sc_st; 160 bus_space_handle_t sc_sh; 161 bus_size_t sc_mapsize; 162 int sc_cap_off; 163 164 #if NBPFILTER > 0 165 caddr_t sc_drvbpf; 166 167 union { 168 struct rtwn_rx_radiotap_header th; 169 uint8_t pad[64]; 170 } sc_rxtapu; 171 #define sc_rxtap sc_rxtapu.th 172 int sc_rxtap_len; 173 174 union { 175 struct rtwn_tx_radiotap_header th; 176 uint8_t pad[64]; 177 } sc_txtapu; 178 #define sc_txtap sc_txtapu.th 179 int sc_txtap_len; 180 #endif 181 }; 182 183 #ifdef RTWN_DEBUG 184 #define DPRINTF(x) do { if (rtwn_debug) printf x; } while (0) 185 #define DPRINTFN(n, x) do { if (rtwn_debug >= (n)) printf x; } while (0) 186 extern int rtwn_debug; 187 #else 188 #define DPRINTF(x) 189 #define DPRINTFN(n, x) 190 #endif 191 192 /* 193 * PCI configuration space registers. 194 */ 195 #define RTWN_PCI_IOBA 0x10 /* i/o mapped base */ 196 #define RTWN_PCI_MMBA 0x18 /* memory mapped base */ 197 198 static const struct pci_matchid rtwn_pci_devices[] = { 199 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8188 } 200 }; 201 202 int rtwn_pci_match(struct device *, void *, void *); 203 void rtwn_pci_attach(struct device *, struct device *, void *); 204 int rtwn_pci_detach(struct device *, int); 205 int rtwn_pci_activate(struct device *, int); 206 int rtwn_alloc_rx_list(struct rtwn_pci_softc *); 207 void rtwn_reset_rx_list(struct rtwn_pci_softc *); 208 void rtwn_free_rx_list(struct rtwn_pci_softc *); 209 void rtwn_setup_rx_desc(struct rtwn_pci_softc *, 210 struct r92c_rx_desc_pci *, bus_addr_t, size_t, int); 211 int rtwn_alloc_tx_list(struct rtwn_pci_softc *, int); 212 void rtwn_reset_tx_list(struct rtwn_pci_softc *, int); 213 void rtwn_free_tx_list(struct rtwn_pci_softc *, int); 214 void rtwn_pci_write_1(void *, uint16_t, uint8_t); 215 void rtwn_pci_write_2(void *, uint16_t, uint16_t); 216 void rtwn_pci_write_4(void *, uint16_t, uint32_t); 217 uint8_t rtwn_pci_read_1(void *, uint16_t); 218 uint16_t rtwn_pci_read_2(void *, uint16_t); 219 uint32_t rtwn_pci_read_4(void *, uint16_t); 220 void rtwn_rx_frame(struct rtwn_pci_softc *, 221 struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int); 222 int rtwn_tx(void *, struct mbuf *, struct ieee80211_node *); 223 void rtwn_tx_done(struct rtwn_pci_softc *, int); 224 int rtwn_alloc_buffers(void *); 225 int rtwn_pci_init(void *); 226 void rtwn_pci_stop(void *); 227 int rtwn_intr(void *); 228 int rtwn_is_oactive(void *); 229 int rtwn_power_on(void *); 230 int rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t); 231 int rtwn_llt_init(struct rtwn_pci_softc *); 232 int rtwn_dma_init(void *); 233 int rtwn_fw_loadpage(void *, int, uint8_t *, int); 234 int rtwn_pci_load_firmware(void *, u_char **, size_t *); 235 void rtwn_mac_init(void *); 236 void rtwn_bb_init(void *); 237 void rtwn_calib_to(void *); 238 void rtwn_next_calib(void *); 239 void rtwn_cancel_calib(void *); 240 void rtwn_scan_to(void *); 241 void rtwn_pci_next_scan(void *); 242 void rtwn_cancel_scan(void *); 243 void rtwn_wait_async(void *); 244 245 /* Aliases. */ 246 #define rtwn_bb_write rtwn_pci_write_4 247 #define rtwn_bb_read rtwn_pci_read_4 248 249 struct cfdriver rtwn_cd = { 250 NULL, "rtwn", DV_IFNET 251 }; 252 253 const struct cfattach rtwn_pci_ca = { 254 sizeof(struct rtwn_pci_softc), 255 rtwn_pci_match, 256 rtwn_pci_attach, 257 rtwn_pci_detach, 258 rtwn_pci_activate 259 }; 260 261 int 262 rtwn_pci_match(struct device *parent, void *match, void *aux) 263 { 264 return (pci_matchbyid(aux, rtwn_pci_devices, 265 nitems(rtwn_pci_devices))); 266 } 267 268 void 269 rtwn_pci_attach(struct device *parent, struct device *self, void *aux) 270 { 271 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self; 272 struct pci_attach_args *pa = aux; 273 struct ifnet *ifp; 274 int i, error; 275 pcireg_t memtype; 276 pci_intr_handle_t ih; 277 const char *intrstr; 278 279 sc->sc_dmat = pa->pa_dmat; 280 sc->sc_pc = pa->pa_pc; 281 sc->sc_tag = pa->pa_tag; 282 283 timeout_set(&sc->calib_to, rtwn_calib_to, sc); 284 timeout_set(&sc->scan_to, rtwn_scan_to, sc); 285 286 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 287 288 /* Map control/status registers. */ 289 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA); 290 error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st, 291 &sc->sc_sh, NULL, &sc->sc_mapsize, 0); 292 if (error != 0) { 293 printf(": can't map mem space\n"); 294 return; 295 } 296 297 if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) { 298 printf(": can't map interrupt\n"); 299 return; 300 } 301 intrstr = pci_intr_string(sc->sc_pc, ih); 302 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET, 303 rtwn_intr, sc, sc->sc_dev.dv_xname); 304 if (sc->sc_ih == NULL) { 305 printf(": can't establish interrupt"); 306 if (intrstr != NULL) 307 printf(" at %s", intrstr); 308 printf("\n"); 309 return; 310 } 311 printf(": %s\n", intrstr); 312 313 /* Disable PCIe Active State Power Management (ASPM). */ 314 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 315 &sc->sc_cap_off, NULL)) { 316 uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag, 317 sc->sc_cap_off + PCI_PCIE_LCSR); 318 lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1); 319 pci_conf_write(sc->sc_pc, sc->sc_tag, 320 sc->sc_cap_off + PCI_PCIE_LCSR, lcsr); 321 } 322 323 /* Allocate Tx/Rx buffers. */ 324 error = rtwn_alloc_rx_list(sc); 325 if (error != 0) { 326 printf("%s: could not allocate Rx buffers\n", 327 sc->sc_dev.dv_xname); 328 return; 329 } 330 for (i = 0; i < RTWN_NTXQUEUES; i++) { 331 error = rtwn_alloc_tx_list(sc, i); 332 if (error != 0) { 333 printf("%s: could not allocate Tx buffers\n", 334 sc->sc_dev.dv_xname); 335 rtwn_free_rx_list(sc); 336 return; 337 } 338 } 339 340 /* Attach the bus-agnostic driver. */ 341 sc->sc_sc.sc_ops.cookie = sc; 342 sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1; 343 sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2; 344 sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4; 345 sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1; 346 sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2; 347 sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4; 348 sc->sc_sc.sc_ops.tx = rtwn_tx; 349 sc->sc_sc.sc_ops.power_on = rtwn_power_on; 350 sc->sc_sc.sc_ops.dma_init = rtwn_dma_init; 351 sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware; 352 sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage; 353 sc->sc_sc.sc_ops.mac_init = rtwn_mac_init; 354 sc->sc_sc.sc_ops.bb_init = rtwn_bb_init; 355 sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers; 356 sc->sc_sc.sc_ops.init = rtwn_pci_init; 357 sc->sc_sc.sc_ops.stop = rtwn_pci_stop; 358 sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive; 359 sc->sc_sc.sc_ops.next_calib = rtwn_next_calib; 360 sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib; 361 sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan; 362 sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan; 363 sc->sc_sc.sc_ops.wait_async = rtwn_wait_async; 364 error = rtwn_attach(&sc->sc_dev, &sc->sc_sc, 365 RTWN_CHIP_88C | RTWN_CHIP_PCI); 366 if (error != 0) { 367 rtwn_free_rx_list(sc); 368 for (i = 0; i < RTWN_NTXQUEUES; i++) 369 rtwn_free_tx_list(sc, i); 370 return; 371 } 372 373 /* ifp is now valid */ 374 ifp = &sc->sc_sc.sc_ic.ic_if; 375 #if NBPFILTER > 0 376 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 377 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 378 379 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 380 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 381 sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT); 382 383 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 384 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 385 sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT); 386 #endif 387 } 388 389 int 390 rtwn_pci_detach(struct device *self, int flags) 391 { 392 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 393 int s, i; 394 395 s = splnet(); 396 397 if (timeout_initialized(&sc->calib_to)) 398 timeout_del(&sc->calib_to); 399 if (timeout_initialized(&sc->scan_to)) 400 timeout_del(&sc->scan_to); 401 402 rtwn_detach(&sc->sc_sc, flags); 403 404 /* Free Tx/Rx buffers. */ 405 for (i = 0; i < RTWN_NTXQUEUES; i++) 406 rtwn_free_tx_list(sc, i); 407 rtwn_free_rx_list(sc); 408 splx(s); 409 410 return (0); 411 } 412 413 int 414 rtwn_pci_activate(struct device *self, int act) 415 { 416 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 417 418 return rtwn_activate(&sc->sc_sc, act); 419 } 420 421 void 422 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc, 423 bus_addr_t addr, size_t len, int idx) 424 { 425 memset(desc, 0, sizeof(*desc)); 426 desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) | 427 ((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0)); 428 desc->rxbufaddr = htole32(addr); 429 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 430 BUS_SPACE_BARRIER_WRITE); 431 desc->rxdw0 |= htole32(R92C_RXDW0_OWN); 432 } 433 434 int 435 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc) 436 { 437 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 438 struct rtwn_rx_data *rx_data; 439 size_t size; 440 int i, error = 0; 441 442 /* Allocate Rx descriptors. */ 443 size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT; 444 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 445 &rx_ring->map); 446 if (error != 0) { 447 printf("%s: could not create rx desc DMA map\n", 448 sc->sc_dev.dv_xname); 449 rx_ring->map = NULL; 450 goto fail; 451 } 452 453 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1, 454 &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 455 if (error != 0) { 456 printf("%s: could not allocate rx desc\n", 457 sc->sc_dev.dv_xname); 458 goto fail; 459 } 460 461 error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs, 462 size, (caddr_t *)&rx_ring->desc, 463 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 464 if (error != 0) { 465 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs); 466 rx_ring->desc = NULL; 467 printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname); 468 goto fail; 469 } 470 471 error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg, 472 1, size, BUS_DMA_NOWAIT); 473 if (error != 0) { 474 printf("%s: could not load rx desc\n", 475 sc->sc_dev.dv_xname); 476 goto fail; 477 } 478 479 bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size, 480 BUS_DMASYNC_PREWRITE); 481 482 /* Allocate Rx buffers. */ 483 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 484 rx_data = &rx_ring->rx_data[i]; 485 486 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 487 0, BUS_DMA_NOWAIT, &rx_data->map); 488 if (error != 0) { 489 printf("%s: could not create rx buf DMA map\n", 490 sc->sc_dev.dv_xname); 491 goto fail; 492 } 493 494 rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 495 if (rx_data->m == NULL) { 496 printf("%s: could not allocate rx mbuf\n", 497 sc->sc_dev.dv_xname); 498 error = ENOMEM; 499 goto fail; 500 } 501 502 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 503 mtod(rx_data->m, void *), MCLBYTES, NULL, 504 BUS_DMA_NOWAIT | BUS_DMA_READ); 505 if (error != 0) { 506 printf("%s: could not load rx buf DMA map\n", 507 sc->sc_dev.dv_xname); 508 goto fail; 509 } 510 511 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 512 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 513 } 514 fail: if (error != 0) 515 rtwn_free_rx_list(sc); 516 return (error); 517 } 518 519 void 520 rtwn_reset_rx_list(struct rtwn_pci_softc *sc) 521 { 522 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 523 struct rtwn_rx_data *rx_data; 524 int i; 525 526 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 527 rx_data = &rx_ring->rx_data[i]; 528 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 529 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 530 } 531 } 532 533 void 534 rtwn_free_rx_list(struct rtwn_pci_softc *sc) 535 { 536 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 537 struct rtwn_rx_data *rx_data; 538 int i, s; 539 540 s = splnet(); 541 542 if (rx_ring->map) { 543 if (rx_ring->desc) { 544 bus_dmamap_unload(sc->sc_dmat, rx_ring->map); 545 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc, 546 sizeof (struct r92c_rx_desc_pci) * 547 RTWN_RX_LIST_COUNT); 548 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, 549 rx_ring->nsegs); 550 rx_ring->desc = NULL; 551 } 552 bus_dmamap_destroy(sc->sc_dmat, rx_ring->map); 553 rx_ring->map = NULL; 554 } 555 556 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 557 rx_data = &rx_ring->rx_data[i]; 558 559 if (rx_data->m != NULL) { 560 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 561 m_freem(rx_data->m); 562 rx_data->m = NULL; 563 } 564 bus_dmamap_destroy(sc->sc_dmat, rx_data->map); 565 rx_data->map = NULL; 566 } 567 568 splx(s); 569 } 570 571 int 572 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid) 573 { 574 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 575 struct rtwn_tx_data *tx_data; 576 int i = 0, error = 0; 577 578 error = bus_dmamap_create(sc->sc_dmat, 579 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1, 580 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0, 581 BUS_DMA_NOWAIT, &tx_ring->map); 582 if (error != 0) { 583 printf("%s: could not create tx ring DMA map\n", 584 sc->sc_dev.dv_xname); 585 goto fail; 586 } 587 588 error = bus_dmamem_alloc(sc->sc_dmat, 589 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0, 590 &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 591 if (error != 0) { 592 printf("%s: could not allocate tx ring DMA memory\n", 593 sc->sc_dev.dv_xname); 594 goto fail; 595 } 596 597 error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs, 598 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 599 (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT); 600 if (error != 0) { 601 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 602 printf("%s: can't map tx ring DMA memory\n", 603 sc->sc_dev.dv_xname); 604 goto fail; 605 } 606 607 error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc, 608 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL, 609 BUS_DMA_NOWAIT); 610 if (error != 0) { 611 printf("%s: could not load tx ring DMA map\n", 612 sc->sc_dev.dv_xname); 613 goto fail; 614 } 615 616 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 617 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 618 619 /* setup tx desc */ 620 desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr 621 + sizeof(struct r92c_tx_desc_pci) 622 * ((i + 1) % RTWN_TX_LIST_COUNT)); 623 624 tx_data = &tx_ring->tx_data[i]; 625 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 626 0, BUS_DMA_NOWAIT, &tx_data->map); 627 if (error != 0) { 628 printf("%s: could not create tx buf DMA map\n", 629 sc->sc_dev.dv_xname); 630 goto fail; 631 } 632 tx_data->m = NULL; 633 tx_data->ni = NULL; 634 } 635 fail: 636 if (error != 0) 637 rtwn_free_tx_list(sc, qid); 638 return (error); 639 } 640 641 void 642 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid) 643 { 644 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 645 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 646 int i; 647 648 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 649 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 650 struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i]; 651 652 memset(desc, 0, sizeof(*desc) - 653 (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) + 654 sizeof(desc->nextdescaddr))); 655 656 if (tx_data->m != NULL) { 657 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 658 m_freem(tx_data->m); 659 tx_data->m = NULL; 660 ieee80211_release_node(ic, tx_data->ni); 661 tx_data->ni = NULL; 662 } 663 } 664 665 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 666 BUS_DMASYNC_POSTWRITE); 667 668 sc->qfullmsk &= ~(1 << qid); 669 tx_ring->queued = 0; 670 tx_ring->cur = 0; 671 } 672 673 void 674 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid) 675 { 676 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 677 struct rtwn_tx_data *tx_data; 678 int i; 679 680 if (tx_ring->map != NULL) { 681 if (tx_ring->desc != NULL) { 682 bus_dmamap_unload(sc->sc_dmat, tx_ring->map); 683 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc, 684 sizeof (struct r92c_tx_desc_pci) * 685 RTWN_TX_LIST_COUNT); 686 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 687 } 688 bus_dmamap_destroy(sc->sc_dmat, tx_ring->map); 689 } 690 691 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 692 tx_data = &tx_ring->tx_data[i]; 693 694 if (tx_data->m != NULL) { 695 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 696 m_freem(tx_data->m); 697 tx_data->m = NULL; 698 } 699 bus_dmamap_destroy(sc->sc_dmat, tx_data->map); 700 } 701 702 sc->qfullmsk &= ~(1 << qid); 703 tx_ring->queued = 0; 704 tx_ring->cur = 0; 705 } 706 707 void 708 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val) 709 { 710 struct rtwn_pci_softc *sc = cookie; 711 712 bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val); 713 } 714 715 void 716 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val) 717 { 718 struct rtwn_pci_softc *sc = cookie; 719 720 val = htole16(val); 721 bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val); 722 } 723 724 void 725 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val) 726 { 727 struct rtwn_pci_softc *sc = cookie; 728 729 val = htole32(val); 730 bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val); 731 } 732 733 uint8_t 734 rtwn_pci_read_1(void *cookie, uint16_t addr) 735 { 736 struct rtwn_pci_softc *sc = cookie; 737 738 return bus_space_read_1(sc->sc_st, sc->sc_sh, addr); 739 } 740 741 uint16_t 742 rtwn_pci_read_2(void *cookie, uint16_t addr) 743 { 744 struct rtwn_pci_softc *sc = cookie; 745 746 return bus_space_read_2(sc->sc_st, sc->sc_sh, addr); 747 } 748 749 uint32_t 750 rtwn_pci_read_4(void *cookie, uint16_t addr) 751 { 752 struct rtwn_pci_softc *sc = cookie; 753 754 return bus_space_read_4(sc->sc_st, sc->sc_sh, addr); 755 } 756 757 void 758 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc, 759 struct rtwn_rx_data *rx_data, int desc_idx) 760 { 761 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 762 struct ifnet *ifp = &ic->ic_if; 763 struct ieee80211_rxinfo rxi; 764 struct ieee80211_frame *wh; 765 struct ieee80211_node *ni; 766 struct r92c_rx_phystat *phy = NULL; 767 uint32_t rxdw0, rxdw3; 768 struct mbuf *m, *m1; 769 uint8_t rate; 770 int8_t rssi = 0; 771 int infosz, pktlen, shift, error; 772 773 rxdw0 = letoh32(rx_desc->rxdw0); 774 rxdw3 = letoh32(rx_desc->rxdw3); 775 776 if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) { 777 /* 778 * This should not happen since we setup our Rx filter 779 * to not receive these frames. 780 */ 781 ifp->if_ierrors++; 782 return; 783 } 784 785 pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); 786 if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) { 787 ifp->if_ierrors++; 788 return; 789 } 790 791 rate = MS(rxdw3, R92C_RXDW3_RATE); 792 infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; 793 if (infosz > sizeof(struct r92c_rx_phystat)) 794 infosz = sizeof(struct r92c_rx_phystat); 795 shift = MS(rxdw0, R92C_RXDW0_SHIFT); 796 797 /* Get RSSI from PHY status descriptor if present. */ 798 if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { 799 phy = mtod(rx_data->m, struct r92c_rx_phystat *); 800 rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy); 801 /* Update our average RSSI. */ 802 rtwn_update_avgrssi(&sc->sc_sc, rate, rssi); 803 } 804 805 DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n", 806 pktlen, rate, infosz, shift, rssi)); 807 808 m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 809 if (m1 == NULL) { 810 ifp->if_ierrors++; 811 return; 812 } 813 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 814 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 815 mtod(m1, void *), MCLBYTES, NULL, 816 BUS_DMA_NOWAIT | BUS_DMA_READ); 817 if (error != 0) { 818 m_freem(m1); 819 820 if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, 821 rx_data->m, BUS_DMA_NOWAIT)) 822 panic("%s: could not load old RX mbuf", 823 sc->sc_dev.dv_xname); 824 825 /* Physical address may have changed. */ 826 rtwn_setup_rx_desc(sc, rx_desc, 827 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx); 828 829 ifp->if_ierrors++; 830 return; 831 } 832 833 /* Finalize mbuf. */ 834 m = rx_data->m; 835 rx_data->m = m1; 836 m->m_pkthdr.len = m->m_len = pktlen + infosz + shift; 837 838 /* Update RX descriptor. */ 839 rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr, 840 MCLBYTES, desc_idx); 841 842 /* Get ieee80211 frame header. */ 843 if (rxdw0 & R92C_RXDW0_PHYST) 844 m_adj(m, infosz + shift); 845 else 846 m_adj(m, shift); 847 wh = mtod(m, struct ieee80211_frame *); 848 849 #if NBPFILTER > 0 850 if (__predict_false(sc->sc_drvbpf != NULL)) { 851 struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap; 852 struct mbuf mb; 853 854 tap->wr_flags = 0; 855 /* Map HW rate index to 802.11 rate. */ 856 tap->wr_flags = 2; 857 if (!(rxdw3 & R92C_RXDW3_HT)) { 858 switch (rate) { 859 /* CCK. */ 860 case 0: tap->wr_rate = 2; break; 861 case 1: tap->wr_rate = 4; break; 862 case 2: tap->wr_rate = 11; break; 863 case 3: tap->wr_rate = 22; break; 864 /* OFDM. */ 865 case 4: tap->wr_rate = 12; break; 866 case 5: tap->wr_rate = 18; break; 867 case 6: tap->wr_rate = 24; break; 868 case 7: tap->wr_rate = 36; break; 869 case 8: tap->wr_rate = 48; break; 870 case 9: tap->wr_rate = 72; break; 871 case 10: tap->wr_rate = 96; break; 872 case 11: tap->wr_rate = 108; break; 873 } 874 } else if (rate >= 12) { /* MCS0~15. */ 875 /* Bit 7 set means HT MCS instead of rate. */ 876 tap->wr_rate = 0x80 | (rate - 12); 877 } 878 tap->wr_dbm_antsignal = rssi; 879 tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq); 880 tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags); 881 882 mb.m_data = (caddr_t)tap; 883 mb.m_len = sc->sc_rxtap_len; 884 mb.m_next = m; 885 mb.m_nextpkt = NULL; 886 mb.m_type = 0; 887 mb.m_flags = 0; 888 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 889 } 890 #endif 891 892 ni = ieee80211_find_rxnode(ic, wh); 893 rxi.rxi_flags = 0; 894 rxi.rxi_rssi = rssi; 895 rxi.rxi_tstamp = 0; /* Unused. */ 896 ieee80211_input(ifp, m, ni, &rxi); 897 /* Node is no longer needed. */ 898 ieee80211_release_node(ic, ni); 899 } 900 901 int 902 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni) 903 { 904 struct rtwn_pci_softc *sc = cookie; 905 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 906 struct ieee80211_frame *wh; 907 struct ieee80211_key *k = NULL; 908 struct rtwn_tx_ring *tx_ring; 909 struct rtwn_tx_data *data; 910 struct r92c_tx_desc_pci *txd; 911 uint16_t qos; 912 uint8_t raid, type, tid, qid; 913 int hasqos, error; 914 915 wh = mtod(m, struct ieee80211_frame *); 916 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 917 918 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 919 k = ieee80211_get_txkey(ic, wh, ni); 920 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 921 return (ENOBUFS); 922 wh = mtod(m, struct ieee80211_frame *); 923 } 924 925 if ((hasqos = ieee80211_has_qos(wh))) { 926 qos = ieee80211_get_qos(wh); 927 tid = qos & IEEE80211_QOS_TID; 928 qid = ieee80211_up_to_ac(ic, tid); 929 } else if (type != IEEE80211_FC0_TYPE_DATA) { 930 qid = RTWN_VO_QUEUE; 931 } else 932 qid = RTWN_BE_QUEUE; 933 934 /* Grab a Tx buffer from the ring. */ 935 tx_ring = &sc->tx_ring[qid]; 936 data = &tx_ring->tx_data[tx_ring->cur]; 937 if (data->m != NULL) { 938 m_freem(m); 939 return (ENOBUFS); 940 } 941 942 /* Fill Tx descriptor. */ 943 txd = &tx_ring->desc[tx_ring->cur]; 944 if (htole32(txd->txdw0) & R92C_RXDW0_OWN) { 945 m_freem(m); 946 return (ENOBUFS); 947 } 948 txd->txdw0 = htole32( 949 SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) | 950 SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | 951 R92C_TXDW0_FSG | R92C_TXDW0_LSG); 952 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 953 txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); 954 955 txd->txdw1 = 0; 956 #ifdef notyet 957 if (k != NULL) { 958 switch (k->k_cipher) { 959 case IEEE80211_CIPHER_WEP40: 960 case IEEE80211_CIPHER_WEP104: 961 case IEEE80211_CIPHER_TKIP: 962 cipher = R92C_TXDW1_CIPHER_RC4; 963 break; 964 case IEEE80211_CIPHER_CCMP: 965 cipher = R92C_TXDW1_CIPHER_AES; 966 break; 967 default: 968 cipher = R92C_TXDW1_CIPHER_NONE; 969 } 970 txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); 971 } 972 #endif 973 txd->txdw4 = 0; 974 txd->txdw5 = 0; 975 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 976 type == IEEE80211_FC0_TYPE_DATA) { 977 if (ic->ic_curmode == IEEE80211_MODE_11B || 978 (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B)) 979 raid = R92C_RAID_11B; 980 else 981 raid = R92C_RAID_11BG; 982 txd->txdw1 |= htole32( 983 SM(R92C_TXDW1_MACID, R92C_MACID_BSS) | 984 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) | 985 SM(R92C_TXDW1_RAID, raid) | 986 R92C_TXDW1_AGGBK); 987 988 if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 989 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 990 R92C_TXDW4_HWRTSEN); 991 } else if (ic->ic_flags & IEEE80211_F_USEPROT) { 992 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 993 txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF | 994 R92C_TXDW4_HWRTSEN); 995 } else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 996 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 997 R92C_TXDW4_HWRTSEN); 998 } 999 } 1000 /* Send RTS at OFDM24. */ 1001 txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8)); 1002 txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf)); 1003 /* Send data at OFDM54. */ 1004 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 11)); 1005 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f)); 1006 1007 } else { 1008 txd->txdw1 |= htole32( 1009 SM(R92C_TXDW1_MACID, 0) | 1010 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) | 1011 SM(R92C_TXDW1_RAID, R92C_RAID_11B)); 1012 1013 /* Force CCK1. */ 1014 txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); 1015 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0)); 1016 } 1017 /* Set sequence number (already little endian). */ 1018 txd->txdseq = *(uint16_t *)wh->i_seq; 1019 1020 if (!hasqos) { 1021 /* Use HW sequence numbering for non-QoS frames. */ 1022 txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ); 1023 txd->txdseq |= htole16(0x8000); /* WTF? */ 1024 } else 1025 txd->txdw4 |= htole32(R92C_TXDW4_QOS); 1026 1027 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1028 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1029 if (error && error != EFBIG) { 1030 printf("%s: can't map mbuf (error %d)\n", 1031 sc->sc_dev.dv_xname, error); 1032 m_freem(m); 1033 return error; 1034 } 1035 if (error != 0) { 1036 /* Too many DMA segments, linearize mbuf. */ 1037 if (m_defrag(m, M_DONTWAIT)) { 1038 m_freem(m); 1039 return ENOBUFS; 1040 } 1041 1042 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1043 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1044 if (error != 0) { 1045 printf("%s: can't map mbuf (error %d)\n", 1046 sc->sc_dev.dv_xname, error); 1047 m_freem(m); 1048 return error; 1049 } 1050 } 1051 1052 txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr); 1053 txd->txbufsize = htole16(m->m_pkthdr.len); 1054 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 1055 BUS_SPACE_BARRIER_WRITE); 1056 txd->txdw0 |= htole32(R92C_TXDW0_OWN); 1057 1058 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1059 BUS_DMASYNC_POSTWRITE); 1060 bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES, 1061 BUS_DMASYNC_POSTWRITE); 1062 1063 data->m = m; 1064 data->ni = ni; 1065 1066 #if NBPFILTER > 0 1067 if (__predict_false(sc->sc_drvbpf != NULL)) { 1068 struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap; 1069 struct mbuf mb; 1070 1071 tap->wt_flags = 0; 1072 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1073 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1074 1075 mb.m_data = (caddr_t)tap; 1076 mb.m_len = sc->sc_txtap_len; 1077 mb.m_next = m; 1078 mb.m_nextpkt = NULL; 1079 mb.m_type = 0; 1080 mb.m_flags = 0; 1081 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1082 } 1083 #endif 1084 1085 tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT; 1086 tx_ring->queued++; 1087 1088 if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1)) 1089 sc->qfullmsk |= (1 << qid); 1090 1091 /* Kick TX. */ 1092 rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid)); 1093 1094 return (0); 1095 } 1096 1097 void 1098 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid) 1099 { 1100 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 1101 struct ifnet *ifp = &ic->ic_if; 1102 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 1103 struct rtwn_tx_data *tx_data; 1104 struct r92c_tx_desc_pci *tx_desc; 1105 int i; 1106 1107 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1108 BUS_DMASYNC_POSTREAD); 1109 1110 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 1111 tx_data = &tx_ring->tx_data[i]; 1112 if (tx_data->m == NULL) 1113 continue; 1114 1115 tx_desc = &tx_ring->desc[i]; 1116 if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN) 1117 continue; 1118 1119 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 1120 m_freem(tx_data->m); 1121 tx_data->m = NULL; 1122 ieee80211_release_node(ic, tx_data->ni); 1123 tx_data->ni = NULL; 1124 1125 ifp->if_opackets++; 1126 sc->sc_sc.sc_tx_timer = 0; 1127 tx_ring->queued--; 1128 } 1129 1130 if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1)) 1131 sc->qfullmsk &= ~(1 << qid); 1132 1133 if (sc->qfullmsk == 0) { 1134 ifq_clr_oactive(&ifp->if_snd); 1135 (*ifp->if_start)(ifp); 1136 } 1137 } 1138 1139 int 1140 rtwn_alloc_buffers(void *cookie) 1141 { 1142 /* Tx/Rx buffers were already allocated in rtwn_pci_attach() */ 1143 return (0); 1144 } 1145 1146 int 1147 rtwn_pci_init(void *cookie) 1148 { 1149 /* nothing to do */ 1150 return (0); 1151 } 1152 1153 void 1154 rtwn_pci_stop(void *cookie) 1155 { 1156 struct rtwn_pci_softc *sc = cookie; 1157 uint16_t reg; 1158 int i, s; 1159 1160 s = splnet(); 1161 1162 /* Disable interrupts. */ 1163 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1164 1165 /* Stop hardware. */ 1166 rtwn_pci_write_1(sc, R92C_TXPAUSE, 0xff); 1167 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1168 reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN); 1169 reg |= R92C_SYS_FUNC_EN_BB_GLB_RST; 1170 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1171 reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST; 1172 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1173 reg = rtwn_pci_read_2(sc, R92C_CR); 1174 reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1175 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1176 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1177 R92C_CR_ENSEC); 1178 rtwn_pci_write_2(sc, R92C_CR, reg); 1179 if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) 1180 rtwn_fw_reset(&sc->sc_sc); 1181 /* TODO: linux does additional btcoex stuff here */ 1182 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */ 1183 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */ 1184 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */ 1185 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x0e); 1186 rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN); 1187 1188 for (i = 0; i < RTWN_NTXQUEUES; i++) 1189 rtwn_reset_tx_list(sc, i); 1190 rtwn_reset_rx_list(sc); 1191 1192 splx(s); 1193 } 1194 1195 int 1196 rtwn_intr(void *xsc) 1197 { 1198 struct rtwn_pci_softc *sc = xsc; 1199 u_int32_t status; 1200 int i; 1201 1202 status = rtwn_pci_read_4(sc, R92C_HISR); 1203 if (status == 0 || status == 0xffffffff) 1204 return (0); 1205 1206 /* Disable interrupts. */ 1207 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1208 1209 /* Ack interrupts. */ 1210 rtwn_pci_write_4(sc, R92C_HISR, status); 1211 1212 /* Vendor driver treats RX errors like ROK... */ 1213 if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) { 1214 bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0, 1215 sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT, 1216 BUS_DMASYNC_POSTREAD); 1217 1218 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 1219 struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i]; 1220 struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i]; 1221 1222 if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN) 1223 continue; 1224 1225 rtwn_rx_frame(sc, rx_desc, rx_data, i); 1226 } 1227 } 1228 1229 if (status & R92C_IMR_BDOK) 1230 rtwn_tx_done(sc, RTWN_BEACON_QUEUE); 1231 if (status & R92C_IMR_HIGHDOK) 1232 rtwn_tx_done(sc, RTWN_HIGH_QUEUE); 1233 if (status & R92C_IMR_MGNTDOK) 1234 rtwn_tx_done(sc, RTWN_MGNT_QUEUE); 1235 if (status & R92C_IMR_BKDOK) 1236 rtwn_tx_done(sc, RTWN_BK_QUEUE); 1237 if (status & R92C_IMR_BEDOK) 1238 rtwn_tx_done(sc, RTWN_BE_QUEUE); 1239 if (status & R92C_IMR_VIDOK) 1240 rtwn_tx_done(sc, RTWN_VI_QUEUE); 1241 if (status & R92C_IMR_VODOK) 1242 rtwn_tx_done(sc, RTWN_VO_QUEUE); 1243 1244 /* Enable interrupts. */ 1245 rtwn_pci_write_4(sc, R92C_HIMR, RTWN_INT_ENABLE); 1246 1247 return (1); 1248 } 1249 1250 int 1251 rtwn_is_oactive(void *cookie) 1252 { 1253 struct rtwn_pci_softc *sc = cookie; 1254 1255 return (sc->qfullmsk != 0); 1256 } 1257 1258 int 1259 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data) 1260 { 1261 int ntries; 1262 1263 rtwn_pci_write_4(sc, R92C_LLT_INIT, 1264 SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | 1265 SM(R92C_LLT_INIT_ADDR, addr) | 1266 SM(R92C_LLT_INIT_DATA, data)); 1267 /* Wait for write operation to complete. */ 1268 for (ntries = 0; ntries < 20; ntries++) { 1269 if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == 1270 R92C_LLT_INIT_OP_NO_ACTIVE) 1271 return (0); 1272 DELAY(5); 1273 } 1274 return (ETIMEDOUT); 1275 } 1276 1277 int 1278 rtwn_llt_init(struct rtwn_pci_softc *sc) 1279 { 1280 int i, error; 1281 1282 /* Reserve pages [0; R92C_TX_PAGE_COUNT]. */ 1283 for (i = 0; i < R92C_TX_PAGE_COUNT; i++) { 1284 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1285 return (error); 1286 } 1287 /* NB: 0xff indicates end-of-list. */ 1288 if ((error = rtwn_llt_write(sc, i, 0xff)) != 0) 1289 return (error); 1290 /* 1291 * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1] 1292 * as ring buffer. 1293 */ 1294 for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) { 1295 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1296 return (error); 1297 } 1298 /* Make the last page point to the beginning of the ring buffer. */ 1299 error = rtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1); 1300 return (error); 1301 } 1302 1303 int 1304 rtwn_power_on(void *cookie) 1305 { 1306 struct rtwn_pci_softc *sc = cookie; 1307 uint32_t reg; 1308 int ntries; 1309 1310 /* Wait for autoload done bit. */ 1311 for (ntries = 0; ntries < 1000; ntries++) { 1312 if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) & 1313 R92C_APS_FSMCO_PFM_ALDN) 1314 break; 1315 DELAY(5); 1316 } 1317 if (ntries == 1000) { 1318 printf("%s: timeout waiting for chip autoload\n", 1319 sc->sc_dev.dv_xname); 1320 return (ETIMEDOUT); 1321 } 1322 1323 /* Unlock ISO/CLK/Power control register. */ 1324 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0); 1325 1326 /* TODO: check if we need this for 8188CE */ 1327 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1328 /* bt coex */ 1329 reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO); 1330 reg |= (R92C_APS_FSMCO_SOP_ABG | 1331 R92C_APS_FSMCO_SOP_AMB | 1332 R92C_APS_FSMCO_XOP_BTCK); 1333 rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg); 1334 } 1335 1336 /* Move SPS into PWM mode. */ 1337 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b); 1338 DELAY(100); 1339 1340 /* Set low byte to 0x0f, leave others unchanged. */ 1341 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, 1342 (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f); 1343 1344 /* TODO: check if we need this for 8188CE */ 1345 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1346 /* bt coex */ 1347 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL); 1348 reg &= (~0x00024800); /* XXX magic from linux */ 1349 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg); 1350 } 1351 1352 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1353 (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) | 1354 R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR); 1355 DELAY(200); 1356 1357 /* TODO: linux does additional btcoex stuff here */ 1358 1359 /* Auto enable WLAN. */ 1360 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1361 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); 1362 for (ntries = 0; ntries < 1000; ntries++) { 1363 if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1364 R92C_APS_FSMCO_APFM_ONMAC)) 1365 break; 1366 DELAY(5); 1367 } 1368 if (ntries == 1000) { 1369 printf("%s: timeout waiting for MAC auto ON\n", 1370 sc->sc_dev.dv_xname); 1371 return (ETIMEDOUT); 1372 } 1373 1374 /* Enable radio, GPIO and LED functions. */ 1375 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1376 R92C_APS_FSMCO_AFSM_PCIE | 1377 R92C_APS_FSMCO_PDN_EN | 1378 R92C_APS_FSMCO_PFM_ALDN); 1379 /* Release RF digital isolation. */ 1380 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1381 rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); 1382 1383 if (sc->sc_sc.chip & RTWN_CHIP_92C) 1384 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77); 1385 else 1386 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22); 1387 1388 rtwn_pci_write_4(sc, R92C_INT_MIG, 0); 1389 1390 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1391 /* bt coex */ 1392 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2); 1393 reg &= 0xfd; /* XXX magic from linux */ 1394 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg); 1395 } 1396 1397 rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG, 1398 rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL); 1399 1400 reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL); 1401 if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) { 1402 printf("%s: radio is disabled by hardware switch\n", 1403 sc->sc_dev.dv_xname); 1404 return (EPERM); /* :-) */ 1405 } 1406 1407 /* Initialize MAC. */ 1408 reg = rtwn_pci_read_1(sc, R92C_APSD_CTRL); 1409 rtwn_pci_write_1(sc, R92C_APSD_CTRL, 1410 rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); 1411 for (ntries = 0; ntries < 200; ntries++) { 1412 if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) & 1413 R92C_APSD_CTRL_OFF_STATUS)) 1414 break; 1415 DELAY(500); 1416 } 1417 if (ntries == 200) { 1418 printf("%s: timeout waiting for MAC initialization\n", 1419 sc->sc_dev.dv_xname); 1420 return (ETIMEDOUT); 1421 } 1422 1423 /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ 1424 reg = rtwn_pci_read_2(sc, R92C_CR); 1425 reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1426 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1427 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1428 R92C_CR_ENSEC; 1429 rtwn_pci_write_2(sc, R92C_CR, reg); 1430 1431 rtwn_pci_write_1(sc, 0xfe10, 0x19); 1432 1433 return (0); 1434 } 1435 1436 int 1437 rtwn_dma_init(void *cookie) 1438 { 1439 struct rtwn_pci_softc *sc = cookie; 1440 uint32_t reg; 1441 int error; 1442 1443 /* Initialize LLT table. */ 1444 error = rtwn_llt_init(sc); 1445 if (error != 0) 1446 return error; 1447 1448 /* Set number of pages for normal priority queue. */ 1449 rtwn_pci_write_2(sc, R92C_RQPN_NPQ, 0); 1450 rtwn_pci_write_4(sc, R92C_RQPN, 1451 /* Set number of pages for public queue. */ 1452 SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) | 1453 /* Set number of pages for high priority queue. */ 1454 SM(R92C_RQPN_HPQ, R92C_HPQ_NPAGES) | 1455 /* Set number of pages for low priority queue. */ 1456 SM(R92C_RQPN_LPQ, R92C_LPQ_NPAGES) | 1457 /* Load values. */ 1458 R92C_RQPN_LD); 1459 1460 rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY); 1461 rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY); 1462 rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, 1463 R92C_TX_PAGE_BOUNDARY); 1464 rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY); 1465 rtwn_pci_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY); 1466 1467 reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL); 1468 reg &= ~R92C_TRXDMA_CTRL_QMAP_M; 1469 reg |= 0xF771; 1470 rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg); 1471 1472 rtwn_pci_write_4(sc, R92C_TCR, 1473 R92C_TCR_CFENDFORM | (1 << 12) | (1 << 13)); 1474 1475 /* Configure Tx DMA. */ 1476 rtwn_pci_write_4(sc, R92C_BKQ_DESA, 1477 sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr); 1478 rtwn_pci_write_4(sc, R92C_BEQ_DESA, 1479 sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr); 1480 rtwn_pci_write_4(sc, R92C_VIQ_DESA, 1481 sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr); 1482 rtwn_pci_write_4(sc, R92C_VOQ_DESA, 1483 sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr); 1484 rtwn_pci_write_4(sc, R92C_BCNQ_DESA, 1485 sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr); 1486 rtwn_pci_write_4(sc, R92C_MGQ_DESA, 1487 sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr); 1488 rtwn_pci_write_4(sc, R92C_HQ_DESA, 1489 sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr); 1490 1491 /* Configure Rx DMA. */ 1492 rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr); 1493 1494 /* Set Tx/Rx transfer page boundary. */ 1495 rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff); 1496 1497 /* Set Tx/Rx transfer page size. */ 1498 rtwn_pci_write_1(sc, R92C_PBP, 1499 SM(R92C_PBP_PSRX, R92C_PBP_128) | 1500 SM(R92C_PBP_PSTX, R92C_PBP_128)); 1501 1502 return (0); 1503 } 1504 1505 int 1506 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len) 1507 { 1508 struct rtwn_pci_softc *sc = cookie; 1509 uint32_t reg; 1510 int off, mlen, error = 0, i; 1511 1512 reg = rtwn_pci_read_4(sc, R92C_MCUFWDL); 1513 reg = RW(reg, R92C_MCUFWDL_PAGE, page); 1514 rtwn_pci_write_4(sc, R92C_MCUFWDL, reg); 1515 1516 DELAY(5); 1517 1518 off = R92C_FW_START_ADDR; 1519 while (len > 0) { 1520 if (len > 196) 1521 mlen = 196; 1522 else if (len > 4) 1523 mlen = 4; 1524 else 1525 mlen = 1; 1526 for (i = 0; i < mlen; i++) 1527 rtwn_pci_write_1(sc, off++, buf[i]); 1528 buf += mlen; 1529 len -= mlen; 1530 } 1531 1532 return (error); 1533 } 1534 1535 int 1536 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len) 1537 { 1538 struct rtwn_pci_softc *sc = cookie; 1539 const char *name; 1540 int error; 1541 1542 if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) == 1543 RTWN_CHIP_UMC_A_CUT) 1544 name = "rtwn-rtl8192cfwU"; 1545 else 1546 name = "rtwn-rtl8192cfwU_B"; 1547 1548 error = loadfirmware(name, fw, len); 1549 if (error) 1550 printf("%s: could not read firmware %s (error %d)\n", 1551 sc->sc_dev.dv_xname, name, error); 1552 return (error); 1553 } 1554 1555 void 1556 rtwn_mac_init(void *cookie) 1557 { 1558 struct rtwn_pci_softc *sc = cookie; 1559 int i; 1560 1561 /* Write MAC initialization values. */ 1562 for (i = 0; i < nitems(rtl8192ce_mac); i++) 1563 rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg, 1564 rtl8192ce_mac[i].val); 1565 } 1566 1567 void 1568 rtwn_bb_init(void *cookie) 1569 { 1570 struct rtwn_pci_softc *sc = cookie; 1571 const struct r92c_bb_prog *prog; 1572 uint32_t reg; 1573 int i; 1574 1575 /* Enable BB and RF. */ 1576 rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN, 1577 rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) | 1578 R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | 1579 R92C_SYS_FUNC_EN_DIO_RF); 1580 1581 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); 1582 1583 rtwn_pci_write_1(sc, R92C_RF_CTRL, 1584 R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); 1585 1586 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1587 R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA | 1588 R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST | 1589 R92C_SYS_FUNC_EN_BBRSTB); 1590 1591 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); 1592 1593 rtwn_pci_write_4(sc, R92C_LEDCFG0, 1594 rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000); 1595 1596 /* Select BB programming. */ 1597 prog = (sc->sc_sc.chip & RTWN_CHIP_92C) ? 1598 &rtl8192ce_bb_prog_2t : &rtl8192ce_bb_prog_1t; 1599 1600 /* Write BB initialization values. */ 1601 for (i = 0; i < prog->count; i++) { 1602 rtwn_bb_write(sc, prog->regs[i], prog->vals[i]); 1603 DELAY(1); 1604 } 1605 1606 if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) { 1607 /* 8192C 1T only configuration. */ 1608 reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO); 1609 reg = (reg & ~0x00000003) | 0x2; 1610 rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); 1611 1612 reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO); 1613 reg = (reg & ~0x00300033) | 0x00200022; 1614 rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); 1615 1616 reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING); 1617 reg = (reg & ~0xff000000) | 0x45 << 24; 1618 rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); 1619 1620 reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); 1621 reg = (reg & ~0x000000ff) | 0x23; 1622 rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); 1623 1624 reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); 1625 reg = (reg & ~0x00000030) | 1 << 4; 1626 rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); 1627 1628 reg = rtwn_bb_read(sc, 0xe74); 1629 reg = (reg & ~0x0c000000) | 2 << 26; 1630 rtwn_bb_write(sc, 0xe74, reg); 1631 reg = rtwn_bb_read(sc, 0xe78); 1632 reg = (reg & ~0x0c000000) | 2 << 26; 1633 rtwn_bb_write(sc, 0xe78, reg); 1634 reg = rtwn_bb_read(sc, 0xe7c); 1635 reg = (reg & ~0x0c000000) | 2 << 26; 1636 rtwn_bb_write(sc, 0xe7c, reg); 1637 reg = rtwn_bb_read(sc, 0xe80); 1638 reg = (reg & ~0x0c000000) | 2 << 26; 1639 rtwn_bb_write(sc, 0xe80, reg); 1640 reg = rtwn_bb_read(sc, 0xe88); 1641 reg = (reg & ~0x0c000000) | 2 << 26; 1642 rtwn_bb_write(sc, 0xe88, reg); 1643 } 1644 1645 /* Write AGC values. */ 1646 for (i = 0; i < prog->agccount; i++) { 1647 rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, 1648 prog->agcvals[i]); 1649 DELAY(1); 1650 } 1651 1652 if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & 1653 R92C_HSSI_PARAM2_CCK_HIPWR) 1654 sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR; 1655 } 1656 1657 void 1658 rtwn_calib_to(void *arg) 1659 { 1660 struct rtwn_pci_softc *sc = arg; 1661 1662 rtwn_calib(&sc->sc_sc); 1663 } 1664 1665 void 1666 rtwn_next_calib(void *cookie) 1667 { 1668 struct rtwn_pci_softc *sc = cookie; 1669 1670 timeout_add_sec(&sc->calib_to, 2); 1671 } 1672 1673 void 1674 rtwn_cancel_calib(void *cookie) 1675 { 1676 struct rtwn_pci_softc *sc = cookie; 1677 1678 if (timeout_initialized(&sc->calib_to)) 1679 timeout_del(&sc->calib_to); 1680 } 1681 1682 void 1683 rtwn_scan_to(void *arg) 1684 { 1685 struct rtwn_pci_softc *sc = arg; 1686 1687 rtwn_next_scan(&sc->sc_sc); 1688 } 1689 1690 void 1691 rtwn_pci_next_scan(void *cookie) 1692 { 1693 struct rtwn_pci_softc *sc = cookie; 1694 1695 timeout_add_msec(&sc->scan_to, 200); 1696 } 1697 1698 void 1699 rtwn_cancel_scan(void *cookie) 1700 { 1701 struct rtwn_pci_softc *sc = cookie; 1702 1703 if (timeout_initialized(&sc->scan_to)) 1704 timeout_del(&sc->scan_to); 1705 } 1706 1707 void 1708 rtwn_wait_async(void *cookie) 1709 { 1710 /* nothing to do */ 1711 } 1712