1 /* $OpenBSD: if_rtwn.c,v 1.22 2016/06/17 10:53:55 stsp Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * PCI front-end for Realtek RTL8188CE driver. 22 */ 23 24 #include "bpfilter.h" 25 26 #include <sys/param.h> 27 #include <sys/sockio.h> 28 #include <sys/mbuf.h> 29 #include <sys/kernel.h> 30 #include <sys/socket.h> 31 #include <sys/systm.h> 32 #include <sys/task.h> 33 #include <sys/timeout.h> 34 #include <sys/conf.h> 35 #include <sys/device.h> 36 #include <sys/endian.h> 37 38 #include <machine/bus.h> 39 #include <machine/intr.h> 40 41 #if NBPFILTER > 0 42 #include <net/bpf.h> 43 #endif 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #include <netinet/in.h> 49 #include <netinet/if_ether.h> 50 51 #include <net80211/ieee80211_var.h> 52 #include <net80211/ieee80211_radiotap.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 #include <dev/pci/pcidevs.h> 57 58 #include <dev/ic/r92creg.h> 59 #include <dev/ic/rtwnvar.h> 60 61 /* 62 * Driver definitions. 63 */ 64 65 #define R92C_PUBQ_NPAGES 176 66 #define R92C_HPQ_NPAGES 41 67 #define R92C_LPQ_NPAGES 28 68 #define R92C_TXPKTBUF_COUNT 256 69 #define R92C_TX_PAGE_COUNT \ 70 (R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES) 71 #define R92C_TX_PAGE_BOUNDARY (R92C_TX_PAGE_COUNT + 1) 72 73 #define RTWN_NTXQUEUES 9 74 #define RTWN_RX_LIST_COUNT 256 75 #define RTWN_TX_LIST_COUNT 256 76 77 /* TX queue indices. */ 78 #define RTWN_BK_QUEUE 0 79 #define RTWN_BE_QUEUE 1 80 #define RTWN_VI_QUEUE 2 81 #define RTWN_VO_QUEUE 3 82 #define RTWN_BEACON_QUEUE 4 83 #define RTWN_TXCMD_QUEUE 5 84 #define RTWN_MGNT_QUEUE 6 85 #define RTWN_HIGH_QUEUE 7 86 #define RTWN_HCCA_QUEUE 8 87 88 struct rtwn_rx_radiotap_header { 89 struct ieee80211_radiotap_header wr_ihdr; 90 uint8_t wr_flags; 91 uint8_t wr_rate; 92 uint16_t wr_chan_freq; 93 uint16_t wr_chan_flags; 94 uint8_t wr_dbm_antsignal; 95 } __packed; 96 97 #define RTWN_RX_RADIOTAP_PRESENT \ 98 (1 << IEEE80211_RADIOTAP_FLAGS | \ 99 1 << IEEE80211_RADIOTAP_RATE | \ 100 1 << IEEE80211_RADIOTAP_CHANNEL | \ 101 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) 102 103 struct rtwn_tx_radiotap_header { 104 struct ieee80211_radiotap_header wt_ihdr; 105 uint8_t wt_flags; 106 uint16_t wt_chan_freq; 107 uint16_t wt_chan_flags; 108 } __packed; 109 110 #define RTWN_TX_RADIOTAP_PRESENT \ 111 (1 << IEEE80211_RADIOTAP_FLAGS | \ 112 1 << IEEE80211_RADIOTAP_CHANNEL) 113 114 struct rtwn_rx_data { 115 bus_dmamap_t map; 116 struct mbuf *m; 117 }; 118 119 struct rtwn_rx_ring { 120 struct r92c_rx_desc_pci *desc; 121 bus_dmamap_t map; 122 bus_dma_segment_t seg; 123 int nsegs; 124 struct rtwn_rx_data rx_data[RTWN_RX_LIST_COUNT]; 125 126 }; 127 struct rtwn_tx_data { 128 bus_dmamap_t map; 129 struct mbuf *m; 130 struct ieee80211_node *ni; 131 }; 132 133 struct rtwn_tx_ring { 134 bus_dmamap_t map; 135 bus_dma_segment_t seg; 136 int nsegs; 137 struct r92c_tx_desc_pci *desc; 138 struct rtwn_tx_data tx_data[RTWN_TX_LIST_COUNT]; 139 int queued; 140 int cur; 141 }; 142 143 struct rtwn_pci_softc { 144 struct device sc_dev; 145 struct rtwn_softc sc_sc; 146 147 struct rtwn_rx_ring rx_ring; 148 struct rtwn_tx_ring tx_ring[RTWN_NTXQUEUES]; 149 uint32_t qfullmsk; 150 151 struct timeout calib_to; 152 struct timeout scan_to; 153 154 /* PCI specific goo. */ 155 bus_dma_tag_t sc_dmat; 156 pci_chipset_tag_t sc_pc; 157 pcitag_t sc_tag; 158 void *sc_ih; 159 bus_space_tag_t sc_st; 160 bus_space_handle_t sc_sh; 161 bus_size_t sc_mapsize; 162 int sc_cap_off; 163 164 #if NBPFILTER > 0 165 caddr_t sc_drvbpf; 166 167 union { 168 struct rtwn_rx_radiotap_header th; 169 uint8_t pad[64]; 170 } sc_rxtapu; 171 #define sc_rxtap sc_rxtapu.th 172 int sc_rxtap_len; 173 174 union { 175 struct rtwn_tx_radiotap_header th; 176 uint8_t pad[64]; 177 } sc_txtapu; 178 #define sc_txtap sc_txtapu.th 179 int sc_txtap_len; 180 #endif 181 }; 182 183 #ifdef RTWN_DEBUG 184 #define DPRINTF(x) do { if (rtwn_debug) printf x; } while (0) 185 #define DPRINTFN(n, x) do { if (rtwn_debug >= (n)) printf x; } while (0) 186 extern int rtwn_debug; 187 #else 188 #define DPRINTF(x) 189 #define DPRINTFN(n, x) 190 #endif 191 192 /* 193 * PCI configuration space registers. 194 */ 195 #define RTWN_PCI_IOBA 0x10 /* i/o mapped base */ 196 #define RTWN_PCI_MMBA 0x18 /* memory mapped base */ 197 198 static const struct pci_matchid rtwn_pci_devices[] = { 199 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8188 } 200 }; 201 202 int rtwn_pci_match(struct device *, void *, void *); 203 void rtwn_pci_attach(struct device *, struct device *, void *); 204 int rtwn_pci_detach(struct device *, int); 205 int rtwn_pci_activate(struct device *, int); 206 int rtwn_alloc_rx_list(struct rtwn_pci_softc *); 207 void rtwn_reset_rx_list(struct rtwn_pci_softc *); 208 void rtwn_free_rx_list(struct rtwn_pci_softc *); 209 void rtwn_setup_rx_desc(struct rtwn_pci_softc *, 210 struct r92c_rx_desc_pci *, bus_addr_t, size_t, int); 211 int rtwn_alloc_tx_list(struct rtwn_pci_softc *, int); 212 void rtwn_reset_tx_list(struct rtwn_pci_softc *, int); 213 void rtwn_free_tx_list(struct rtwn_pci_softc *, int); 214 void rtwn_pci_write_1(void *, uint16_t, uint8_t); 215 void rtwn_pci_write_2(void *, uint16_t, uint16_t); 216 void rtwn_pci_write_4(void *, uint16_t, uint32_t); 217 uint8_t rtwn_pci_read_1(void *, uint16_t); 218 uint16_t rtwn_pci_read_2(void *, uint16_t); 219 uint32_t rtwn_pci_read_4(void *, uint16_t); 220 void rtwn_rx_frame(struct rtwn_pci_softc *, 221 struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int); 222 int rtwn_tx(void *, struct mbuf *, struct ieee80211_node *); 223 void rtwn_tx_done(struct rtwn_pci_softc *, int); 224 int rtwn_alloc_buffers(void *); 225 int rtwn_pci_init(void *); 226 void rtwn_pci_stop(void *); 227 int rtwn_intr(void *); 228 int rtwn_is_oactive(void *); 229 int rtwn_power_on(void *); 230 int rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t); 231 int rtwn_llt_init(struct rtwn_pci_softc *); 232 int rtwn_dma_init(void *); 233 int rtwn_fw_loadpage(void *, int, uint8_t *, int); 234 int rtwn_pci_load_firmware(void *, u_char **, size_t *); 235 void rtwn_mac_init(void *); 236 void rtwn_bb_init(void *); 237 void rtwn_calib_to(void *); 238 void rtwn_next_calib(void *); 239 void rtwn_cancel_calib(void *); 240 void rtwn_scan_to(void *); 241 void rtwn_pci_next_scan(void *); 242 void rtwn_cancel_scan(void *); 243 void rtwn_wait_async(void *); 244 245 /* Aliases. */ 246 #define rtwn_bb_write rtwn_pci_write_4 247 #define rtwn_bb_read rtwn_pci_read_4 248 249 struct cfdriver rtwn_cd = { 250 NULL, "rtwn", DV_IFNET 251 }; 252 253 const struct cfattach rtwn_pci_ca = { 254 sizeof(struct rtwn_pci_softc), 255 rtwn_pci_match, 256 rtwn_pci_attach, 257 rtwn_pci_detach, 258 rtwn_pci_activate 259 }; 260 261 int 262 rtwn_pci_match(struct device *parent, void *match, void *aux) 263 { 264 return (pci_matchbyid(aux, rtwn_pci_devices, 265 nitems(rtwn_pci_devices))); 266 } 267 268 void 269 rtwn_pci_attach(struct device *parent, struct device *self, void *aux) 270 { 271 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self; 272 struct pci_attach_args *pa = aux; 273 struct ifnet *ifp; 274 int i, error; 275 pcireg_t memtype; 276 pci_intr_handle_t ih; 277 const char *intrstr; 278 279 sc->sc_dmat = pa->pa_dmat; 280 sc->sc_pc = pa->pa_pc; 281 sc->sc_tag = pa->pa_tag; 282 283 timeout_set(&sc->calib_to, rtwn_calib_to, sc); 284 timeout_set(&sc->scan_to, rtwn_scan_to, sc); 285 286 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 287 288 /* Map control/status registers. */ 289 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA); 290 error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st, 291 &sc->sc_sh, NULL, &sc->sc_mapsize, 0); 292 if (error != 0) { 293 printf(": can't map mem space\n"); 294 return; 295 } 296 297 if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) { 298 printf(": can't map interrupt\n"); 299 return; 300 } 301 intrstr = pci_intr_string(sc->sc_pc, ih); 302 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET, 303 rtwn_intr, sc, sc->sc_dev.dv_xname); 304 if (sc->sc_ih == NULL) { 305 printf(": can't establish interrupt"); 306 if (intrstr != NULL) 307 printf(" at %s", intrstr); 308 printf("\n"); 309 return; 310 } 311 printf(": %s\n", intrstr); 312 313 /* Disable PCIe Active State Power Management (ASPM). */ 314 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 315 &sc->sc_cap_off, NULL)) { 316 uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag, 317 sc->sc_cap_off + PCI_PCIE_LCSR); 318 lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1); 319 pci_conf_write(sc->sc_pc, sc->sc_tag, 320 sc->sc_cap_off + PCI_PCIE_LCSR, lcsr); 321 } 322 323 /* Allocate Tx/Rx buffers. */ 324 error = rtwn_alloc_rx_list(sc); 325 if (error != 0) { 326 printf("%s: could not allocate Rx buffers\n", 327 sc->sc_dev.dv_xname); 328 return; 329 } 330 for (i = 0; i < RTWN_NTXQUEUES; i++) { 331 error = rtwn_alloc_tx_list(sc, i); 332 if (error != 0) { 333 printf("%s: could not allocate Tx buffers\n", 334 sc->sc_dev.dv_xname); 335 rtwn_free_rx_list(sc); 336 return; 337 } 338 } 339 340 /* Attach the bus-agnostic driver. */ 341 sc->sc_sc.sc_ops.cookie = sc; 342 sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1; 343 sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2; 344 sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4; 345 sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1; 346 sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2; 347 sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4; 348 sc->sc_sc.sc_ops.tx = rtwn_tx; 349 sc->sc_sc.sc_ops.power_on = rtwn_power_on; 350 sc->sc_sc.sc_ops.dma_init = rtwn_dma_init; 351 sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware; 352 sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage; 353 sc->sc_sc.sc_ops.mac_init = rtwn_mac_init; 354 sc->sc_sc.sc_ops.bb_init = rtwn_bb_init; 355 sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers; 356 sc->sc_sc.sc_ops.init = rtwn_pci_init; 357 sc->sc_sc.sc_ops.stop = rtwn_pci_stop; 358 sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive; 359 sc->sc_sc.sc_ops.next_calib = rtwn_next_calib; 360 sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib; 361 sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan; 362 sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan; 363 sc->sc_sc.sc_ops.wait_async = rtwn_wait_async; 364 error = rtwn_attach(&sc->sc_dev, &sc->sc_sc, 365 RTWN_CHIP_88C | RTWN_CHIP_PCI); 366 if (error != 0) { 367 rtwn_free_rx_list(sc); 368 for (i = 0; i < RTWN_NTXQUEUES; i++) 369 rtwn_free_tx_list(sc, i); 370 return; 371 } 372 373 /* ifp is now valid */ 374 ifp = &sc->sc_sc.sc_ic.ic_if; 375 #if NBPFILTER > 0 376 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 377 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 378 379 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 380 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 381 sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT); 382 383 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 384 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 385 sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT); 386 #endif 387 } 388 389 int 390 rtwn_pci_detach(struct device *self, int flags) 391 { 392 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 393 int s, i; 394 395 s = splnet(); 396 397 if (timeout_initialized(&sc->calib_to)) 398 timeout_del(&sc->calib_to); 399 if (timeout_initialized(&sc->scan_to)) 400 timeout_del(&sc->scan_to); 401 402 rtwn_detach(&sc->sc_sc, flags); 403 404 /* Free Tx/Rx buffers. */ 405 for (i = 0; i < RTWN_NTXQUEUES; i++) 406 rtwn_free_tx_list(sc, i); 407 rtwn_free_rx_list(sc); 408 splx(s); 409 410 return (0); 411 } 412 413 int 414 rtwn_pci_activate(struct device *self, int act) 415 { 416 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 417 418 return rtwn_activate(&sc->sc_sc, act); 419 } 420 421 void 422 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc, 423 bus_addr_t addr, size_t len, int idx) 424 { 425 memset(desc, 0, sizeof(*desc)); 426 desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) | 427 ((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0)); 428 desc->rxbufaddr = htole32(addr); 429 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 430 BUS_SPACE_BARRIER_WRITE); 431 desc->rxdw0 |= htole32(R92C_RXDW0_OWN); 432 } 433 434 int 435 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc) 436 { 437 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 438 struct rtwn_rx_data *rx_data; 439 size_t size; 440 int i, error = 0; 441 442 /* Allocate Rx descriptors. */ 443 size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT; 444 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 445 &rx_ring->map); 446 if (error != 0) { 447 printf("%s: could not create rx desc DMA map\n", 448 sc->sc_dev.dv_xname); 449 rx_ring->map = NULL; 450 goto fail; 451 } 452 453 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1, 454 &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 455 if (error != 0) { 456 printf("%s: could not allocate rx desc\n", 457 sc->sc_dev.dv_xname); 458 goto fail; 459 } 460 461 error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs, 462 size, (caddr_t *)&rx_ring->desc, 463 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 464 if (error != 0) { 465 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs); 466 rx_ring->desc = NULL; 467 printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname); 468 goto fail; 469 } 470 471 error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg, 472 1, size, BUS_DMA_NOWAIT); 473 if (error != 0) { 474 printf("%s: could not load rx desc\n", 475 sc->sc_dev.dv_xname); 476 goto fail; 477 } 478 479 bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size, 480 BUS_DMASYNC_PREWRITE); 481 482 /* Allocate Rx buffers. */ 483 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 484 rx_data = &rx_ring->rx_data[i]; 485 486 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 487 0, BUS_DMA_NOWAIT, &rx_data->map); 488 if (error != 0) { 489 printf("%s: could not create rx buf DMA map\n", 490 sc->sc_dev.dv_xname); 491 goto fail; 492 } 493 494 rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 495 if (rx_data->m == NULL) { 496 printf("%s: could not allocate rx mbuf\n", 497 sc->sc_dev.dv_xname); 498 error = ENOMEM; 499 goto fail; 500 } 501 502 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 503 mtod(rx_data->m, void *), MCLBYTES, NULL, 504 BUS_DMA_NOWAIT | BUS_DMA_READ); 505 if (error != 0) { 506 printf("%s: could not load rx buf DMA map\n", 507 sc->sc_dev.dv_xname); 508 goto fail; 509 } 510 511 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 512 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 513 } 514 fail: if (error != 0) 515 rtwn_free_rx_list(sc); 516 return (error); 517 } 518 519 void 520 rtwn_reset_rx_list(struct rtwn_pci_softc *sc) 521 { 522 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 523 struct rtwn_rx_data *rx_data; 524 int i; 525 526 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 527 rx_data = &rx_ring->rx_data[i]; 528 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 529 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 530 } 531 } 532 533 void 534 rtwn_free_rx_list(struct rtwn_pci_softc *sc) 535 { 536 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 537 struct rtwn_rx_data *rx_data; 538 int i, s; 539 540 s = splnet(); 541 542 if (rx_ring->map) { 543 if (rx_ring->desc) { 544 bus_dmamap_unload(sc->sc_dmat, rx_ring->map); 545 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc, 546 sizeof (struct r92c_rx_desc_pci) * 547 RTWN_RX_LIST_COUNT); 548 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, 549 rx_ring->nsegs); 550 rx_ring->desc = NULL; 551 } 552 bus_dmamap_destroy(sc->sc_dmat, rx_ring->map); 553 rx_ring->map = NULL; 554 } 555 556 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 557 rx_data = &rx_ring->rx_data[i]; 558 559 if (rx_data->m != NULL) { 560 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 561 m_freem(rx_data->m); 562 rx_data->m = NULL; 563 } 564 bus_dmamap_destroy(sc->sc_dmat, rx_data->map); 565 rx_data->map = NULL; 566 } 567 568 splx(s); 569 } 570 571 int 572 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid) 573 { 574 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 575 struct rtwn_tx_data *tx_data; 576 int i = 0, error = 0; 577 578 error = bus_dmamap_create(sc->sc_dmat, 579 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1, 580 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0, 581 BUS_DMA_NOWAIT, &tx_ring->map); 582 if (error != 0) { 583 printf("%s: could not create tx ring DMA map\n", 584 sc->sc_dev.dv_xname); 585 goto fail; 586 } 587 588 error = bus_dmamem_alloc(sc->sc_dmat, 589 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0, 590 &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 591 if (error != 0) { 592 printf("%s: could not allocate tx ring DMA memory\n", 593 sc->sc_dev.dv_xname); 594 goto fail; 595 } 596 597 error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs, 598 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 599 (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT); 600 if (error != 0) { 601 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 602 printf("%s: can't map tx ring DMA memory\n", 603 sc->sc_dev.dv_xname); 604 goto fail; 605 } 606 607 error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc, 608 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL, 609 BUS_DMA_NOWAIT); 610 if (error != 0) { 611 printf("%s: could not load tx ring DMA map\n", 612 sc->sc_dev.dv_xname); 613 goto fail; 614 } 615 616 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 617 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 618 619 /* setup tx desc */ 620 desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr 621 + sizeof(struct r92c_tx_desc_pci) 622 * ((i + 1) % RTWN_TX_LIST_COUNT)); 623 624 tx_data = &tx_ring->tx_data[i]; 625 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 626 0, BUS_DMA_NOWAIT, &tx_data->map); 627 if (error != 0) { 628 printf("%s: could not create tx buf DMA map\n", 629 sc->sc_dev.dv_xname); 630 goto fail; 631 } 632 tx_data->m = NULL; 633 tx_data->ni = NULL; 634 } 635 fail: 636 if (error != 0) 637 rtwn_free_tx_list(sc, qid); 638 return (error); 639 } 640 641 void 642 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid) 643 { 644 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 645 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 646 int i; 647 648 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 649 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 650 struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i]; 651 652 memset(desc, 0, sizeof(*desc) - 653 (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) + 654 sizeof(desc->nextdescaddr))); 655 656 if (tx_data->m != NULL) { 657 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 658 m_freem(tx_data->m); 659 tx_data->m = NULL; 660 ieee80211_release_node(ic, tx_data->ni); 661 tx_data->ni = NULL; 662 } 663 } 664 665 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 666 BUS_DMASYNC_POSTWRITE); 667 668 sc->qfullmsk &= ~(1 << qid); 669 tx_ring->queued = 0; 670 tx_ring->cur = 0; 671 } 672 673 void 674 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid) 675 { 676 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 677 struct rtwn_tx_data *tx_data; 678 int i; 679 680 if (tx_ring->map != NULL) { 681 if (tx_ring->desc != NULL) { 682 bus_dmamap_unload(sc->sc_dmat, tx_ring->map); 683 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc, 684 sizeof (struct r92c_tx_desc_pci) * 685 RTWN_TX_LIST_COUNT); 686 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 687 } 688 bus_dmamap_destroy(sc->sc_dmat, tx_ring->map); 689 } 690 691 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 692 tx_data = &tx_ring->tx_data[i]; 693 694 if (tx_data->m != NULL) { 695 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 696 m_freem(tx_data->m); 697 tx_data->m = NULL; 698 } 699 bus_dmamap_destroy(sc->sc_dmat, tx_data->map); 700 } 701 702 sc->qfullmsk &= ~(1 << qid); 703 tx_ring->queued = 0; 704 tx_ring->cur = 0; 705 } 706 707 void 708 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val) 709 { 710 struct rtwn_pci_softc *sc = cookie; 711 712 bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val); 713 } 714 715 void 716 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val) 717 { 718 struct rtwn_pci_softc *sc = cookie; 719 720 val = htole16(val); 721 bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val); 722 } 723 724 void 725 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val) 726 { 727 struct rtwn_pci_softc *sc = cookie; 728 729 val = htole32(val); 730 bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val); 731 } 732 733 uint8_t 734 rtwn_pci_read_1(void *cookie, uint16_t addr) 735 { 736 struct rtwn_pci_softc *sc = cookie; 737 738 return bus_space_read_1(sc->sc_st, sc->sc_sh, addr); 739 } 740 741 uint16_t 742 rtwn_pci_read_2(void *cookie, uint16_t addr) 743 { 744 struct rtwn_pci_softc *sc = cookie; 745 746 return bus_space_read_2(sc->sc_st, sc->sc_sh, addr); 747 } 748 749 uint32_t 750 rtwn_pci_read_4(void *cookie, uint16_t addr) 751 { 752 struct rtwn_pci_softc *sc = cookie; 753 754 return bus_space_read_4(sc->sc_st, sc->sc_sh, addr); 755 } 756 757 void 758 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc, 759 struct rtwn_rx_data *rx_data, int desc_idx) 760 { 761 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 762 struct ifnet *ifp = &ic->ic_if; 763 struct ieee80211_rxinfo rxi; 764 struct ieee80211_frame *wh; 765 struct ieee80211_node *ni; 766 struct r92c_rx_phystat *phy = NULL; 767 uint32_t rxdw0, rxdw3; 768 struct mbuf *m, *m1; 769 uint8_t rate; 770 int8_t rssi = 0; 771 int infosz, pktlen, shift, error; 772 773 rxdw0 = letoh32(rx_desc->rxdw0); 774 rxdw3 = letoh32(rx_desc->rxdw3); 775 776 if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) { 777 /* 778 * This should not happen since we setup our Rx filter 779 * to not receive these frames. 780 */ 781 ifp->if_ierrors++; 782 return; 783 } 784 785 pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); 786 if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) { 787 ifp->if_ierrors++; 788 return; 789 } 790 791 rate = MS(rxdw3, R92C_RXDW3_RATE); 792 infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; 793 if (infosz > sizeof(struct r92c_rx_phystat)) 794 infosz = sizeof(struct r92c_rx_phystat); 795 shift = MS(rxdw0, R92C_RXDW0_SHIFT); 796 797 /* Get RSSI from PHY status descriptor if present. */ 798 if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { 799 phy = mtod(rx_data->m, struct r92c_rx_phystat *); 800 rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy); 801 /* Update our average RSSI. */ 802 rtwn_update_avgrssi(&sc->sc_sc, rate, rssi); 803 } 804 805 DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n", 806 pktlen, rate, infosz, shift, rssi)); 807 808 m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 809 if (m1 == NULL) { 810 ifp->if_ierrors++; 811 return; 812 } 813 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 814 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 815 mtod(m1, void *), MCLBYTES, NULL, 816 BUS_DMA_NOWAIT | BUS_DMA_READ); 817 if (error != 0) { 818 m_freem(m1); 819 820 if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, 821 rx_data->m, BUS_DMA_NOWAIT)) 822 panic("%s: could not load old RX mbuf", 823 sc->sc_dev.dv_xname); 824 825 /* Physical address may have changed. */ 826 rtwn_setup_rx_desc(sc, rx_desc, 827 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx); 828 829 ifp->if_ierrors++; 830 return; 831 } 832 833 /* Finalize mbuf. */ 834 m = rx_data->m; 835 rx_data->m = m1; 836 m->m_pkthdr.len = m->m_len = pktlen + infosz + shift; 837 838 /* Update RX descriptor. */ 839 rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr, 840 MCLBYTES, desc_idx); 841 842 /* Get ieee80211 frame header. */ 843 if (rxdw0 & R92C_RXDW0_PHYST) 844 m_adj(m, infosz + shift); 845 else 846 m_adj(m, shift); 847 wh = mtod(m, struct ieee80211_frame *); 848 849 #if NBPFILTER > 0 850 if (__predict_false(sc->sc_drvbpf != NULL)) { 851 struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap; 852 struct mbuf mb; 853 854 tap->wr_flags = 0; 855 /* Map HW rate index to 802.11 rate. */ 856 tap->wr_flags = 2; 857 if (!(rxdw3 & R92C_RXDW3_HT)) { 858 switch (rate) { 859 /* CCK. */ 860 case 0: tap->wr_rate = 2; break; 861 case 1: tap->wr_rate = 4; break; 862 case 2: tap->wr_rate = 11; break; 863 case 3: tap->wr_rate = 22; break; 864 /* OFDM. */ 865 case 4: tap->wr_rate = 12; break; 866 case 5: tap->wr_rate = 18; break; 867 case 6: tap->wr_rate = 24; break; 868 case 7: tap->wr_rate = 36; break; 869 case 8: tap->wr_rate = 48; break; 870 case 9: tap->wr_rate = 72; break; 871 case 10: tap->wr_rate = 96; break; 872 case 11: tap->wr_rate = 108; break; 873 } 874 } else if (rate >= 12) { /* MCS0~15. */ 875 /* Bit 7 set means HT MCS instead of rate. */ 876 tap->wr_rate = 0x80 | (rate - 12); 877 } 878 tap->wr_dbm_antsignal = rssi; 879 tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq); 880 tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags); 881 882 mb.m_data = (caddr_t)tap; 883 mb.m_len = sc->sc_rxtap_len; 884 mb.m_next = m; 885 mb.m_nextpkt = NULL; 886 mb.m_type = 0; 887 mb.m_flags = 0; 888 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 889 } 890 #endif 891 892 ni = ieee80211_find_rxnode(ic, wh); 893 rxi.rxi_flags = 0; 894 rxi.rxi_rssi = rssi; 895 rxi.rxi_tstamp = 0; /* Unused. */ 896 ieee80211_input(ifp, m, ni, &rxi); 897 /* Node is no longer needed. */ 898 ieee80211_release_node(ic, ni); 899 } 900 901 int 902 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni) 903 { 904 struct rtwn_pci_softc *sc = cookie; 905 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 906 struct ieee80211_frame *wh; 907 struct ieee80211_key *k = NULL; 908 struct rtwn_tx_ring *tx_ring; 909 struct rtwn_tx_data *data; 910 struct r92c_tx_desc_pci *txd; 911 uint16_t qos; 912 uint8_t raid, type, tid, qid; 913 int hasqos, error; 914 915 wh = mtod(m, struct ieee80211_frame *); 916 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 917 918 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 919 k = ieee80211_get_txkey(ic, wh, ni); 920 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 921 return (ENOBUFS); 922 wh = mtod(m, struct ieee80211_frame *); 923 } 924 925 if ((hasqos = ieee80211_has_qos(wh))) { 926 qos = ieee80211_get_qos(wh); 927 tid = qos & IEEE80211_QOS_TID; 928 qid = ieee80211_up_to_ac(ic, tid); 929 } else if (type != IEEE80211_FC0_TYPE_DATA) { 930 qid = RTWN_VO_QUEUE; 931 } else 932 qid = RTWN_BE_QUEUE; 933 934 /* Grab a Tx buffer from the ring. */ 935 tx_ring = &sc->tx_ring[qid]; 936 data = &tx_ring->tx_data[tx_ring->cur]; 937 if (data->m != NULL) { 938 m_freem(m); 939 return (ENOBUFS); 940 } 941 942 /* Fill Tx descriptor. */ 943 txd = &tx_ring->desc[tx_ring->cur]; 944 if (htole32(txd->txdw0) & R92C_RXDW0_OWN) { 945 m_freem(m); 946 return (ENOBUFS); 947 } 948 txd->txdw0 = htole32( 949 SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) | 950 SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | 951 R92C_TXDW0_FSG | R92C_TXDW0_LSG); 952 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 953 txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); 954 955 txd->txdw1 = 0; 956 #ifdef notyet 957 if (k != NULL) { 958 switch (k->k_cipher) { 959 case IEEE80211_CIPHER_WEP40: 960 case IEEE80211_CIPHER_WEP104: 961 case IEEE80211_CIPHER_TKIP: 962 cipher = R92C_TXDW1_CIPHER_RC4; 963 break; 964 case IEEE80211_CIPHER_CCMP: 965 cipher = R92C_TXDW1_CIPHER_AES; 966 break; 967 default: 968 cipher = R92C_TXDW1_CIPHER_NONE; 969 } 970 txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); 971 } 972 #endif 973 txd->txdw4 = 0; 974 txd->txdw5 = 0; 975 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 976 type == IEEE80211_FC0_TYPE_DATA) { 977 if (ic->ic_curmode == IEEE80211_MODE_11B || 978 (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B)) 979 raid = R92C_RAID_11B; 980 else 981 raid = R92C_RAID_11BG; 982 txd->txdw1 |= htole32( 983 SM(R92C_TXDW1_MACID, R92C_MACID_BSS) | 984 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) | 985 SM(R92C_TXDW1_RAID, raid) | 986 R92C_TXDW1_AGGBK); 987 988 if (ic->ic_flags & IEEE80211_F_USEPROT) { 989 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 990 txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF | 991 R92C_TXDW4_HWRTSEN); 992 } else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 993 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 994 R92C_TXDW4_HWRTSEN); 995 } 996 } 997 /* Send RTS at OFDM24. */ 998 txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8)); 999 txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf)); 1000 /* Send data at OFDM54. */ 1001 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 11)); 1002 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f)); 1003 1004 } else { 1005 txd->txdw1 |= htole32( 1006 SM(R92C_TXDW1_MACID, 0) | 1007 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) | 1008 SM(R92C_TXDW1_RAID, R92C_RAID_11B)); 1009 1010 /* Force CCK1. */ 1011 txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); 1012 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0)); 1013 } 1014 /* Set sequence number (already little endian). */ 1015 txd->txdseq = *(uint16_t *)wh->i_seq; 1016 1017 if (!hasqos) { 1018 /* Use HW sequence numbering for non-QoS frames. */ 1019 txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ); 1020 txd->txdseq |= htole16(0x8000); /* WTF? */ 1021 } else 1022 txd->txdw4 |= htole32(R92C_TXDW4_QOS); 1023 1024 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1025 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1026 if (error && error != EFBIG) { 1027 printf("%s: can't map mbuf (error %d)\n", 1028 sc->sc_dev.dv_xname, error); 1029 m_freem(m); 1030 return error; 1031 } 1032 if (error != 0) { 1033 /* Too many DMA segments, linearize mbuf. */ 1034 if (m_defrag(m, M_DONTWAIT)) { 1035 m_freem(m); 1036 return ENOBUFS; 1037 } 1038 1039 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1040 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1041 if (error != 0) { 1042 printf("%s: can't map mbuf (error %d)\n", 1043 sc->sc_dev.dv_xname, error); 1044 m_freem(m); 1045 return error; 1046 } 1047 } 1048 1049 txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr); 1050 txd->txbufsize = htole16(m->m_pkthdr.len); 1051 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 1052 BUS_SPACE_BARRIER_WRITE); 1053 txd->txdw0 |= htole32(R92C_TXDW0_OWN); 1054 1055 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1056 BUS_DMASYNC_POSTWRITE); 1057 bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES, 1058 BUS_DMASYNC_POSTWRITE); 1059 1060 data->m = m; 1061 data->ni = ni; 1062 1063 #if NBPFILTER > 0 1064 if (__predict_false(sc->sc_drvbpf != NULL)) { 1065 struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap; 1066 struct mbuf mb; 1067 1068 tap->wt_flags = 0; 1069 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1070 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1071 1072 mb.m_data = (caddr_t)tap; 1073 mb.m_len = sc->sc_txtap_len; 1074 mb.m_next = m; 1075 mb.m_nextpkt = NULL; 1076 mb.m_type = 0; 1077 mb.m_flags = 0; 1078 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1079 } 1080 #endif 1081 1082 tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT; 1083 tx_ring->queued++; 1084 1085 if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1)) 1086 sc->qfullmsk |= (1 << qid); 1087 1088 /* Kick TX. */ 1089 rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid)); 1090 1091 return (0); 1092 } 1093 1094 void 1095 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid) 1096 { 1097 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 1098 struct ifnet *ifp = &ic->ic_if; 1099 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 1100 struct rtwn_tx_data *tx_data; 1101 struct r92c_tx_desc_pci *tx_desc; 1102 int i; 1103 1104 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1105 BUS_DMASYNC_POSTREAD); 1106 1107 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 1108 tx_data = &tx_ring->tx_data[i]; 1109 if (tx_data->m == NULL) 1110 continue; 1111 1112 tx_desc = &tx_ring->desc[i]; 1113 if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN) 1114 continue; 1115 1116 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 1117 m_freem(tx_data->m); 1118 tx_data->m = NULL; 1119 ieee80211_release_node(ic, tx_data->ni); 1120 tx_data->ni = NULL; 1121 1122 ifp->if_opackets++; 1123 sc->sc_sc.sc_tx_timer = 0; 1124 tx_ring->queued--; 1125 } 1126 1127 if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1)) 1128 sc->qfullmsk &= ~(1 << qid); 1129 1130 if (sc->qfullmsk == 0) { 1131 ifq_clr_oactive(&ifp->if_snd); 1132 (*ifp->if_start)(ifp); 1133 } 1134 } 1135 1136 int 1137 rtwn_alloc_buffers(void *cookie) 1138 { 1139 /* Tx/Rx buffers were already allocated in rtwn_pci_attach() */ 1140 return (0); 1141 } 1142 1143 int 1144 rtwn_pci_init(void *cookie) 1145 { 1146 /* nothing to do */ 1147 return (0); 1148 } 1149 1150 void 1151 rtwn_pci_stop(void *cookie) 1152 { 1153 struct rtwn_pci_softc *sc = cookie; 1154 uint16_t reg; 1155 int i, s; 1156 1157 s = splnet(); 1158 1159 /* Disable interrupts. */ 1160 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1161 1162 /* Stop hardware. */ 1163 rtwn_pci_write_1(sc, R92C_TXPAUSE, 0xff); 1164 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1165 reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN); 1166 reg |= R92C_SYS_FUNC_EN_BB_GLB_RST; 1167 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1168 reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST; 1169 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1170 reg = rtwn_pci_read_2(sc, R92C_CR); 1171 reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1172 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1173 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1174 R92C_CR_ENSEC); 1175 rtwn_pci_write_2(sc, R92C_CR, reg); 1176 if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) 1177 rtwn_fw_reset(&sc->sc_sc); 1178 /* TODO: linux does additional btcoex stuff here */ 1179 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */ 1180 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */ 1181 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */ 1182 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x0e); 1183 rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN); 1184 1185 for (i = 0; i < RTWN_NTXQUEUES; i++) 1186 rtwn_reset_tx_list(sc, i); 1187 rtwn_reset_rx_list(sc); 1188 1189 splx(s); 1190 } 1191 1192 int 1193 rtwn_intr(void *xsc) 1194 { 1195 struct rtwn_pci_softc *sc = xsc; 1196 u_int32_t status; 1197 int i; 1198 1199 status = rtwn_pci_read_4(sc, R92C_HISR); 1200 if (status == 0 || status == 0xffffffff) 1201 return (0); 1202 1203 /* Disable interrupts. */ 1204 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1205 1206 /* Ack interrupts. */ 1207 rtwn_pci_write_4(sc, R92C_HISR, status); 1208 1209 /* Vendor driver treats RX errors like ROK... */ 1210 if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) { 1211 bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0, 1212 sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT, 1213 BUS_DMASYNC_POSTREAD); 1214 1215 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 1216 struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i]; 1217 struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i]; 1218 1219 if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN) 1220 continue; 1221 1222 rtwn_rx_frame(sc, rx_desc, rx_data, i); 1223 } 1224 } 1225 1226 if (status & R92C_IMR_BDOK) 1227 rtwn_tx_done(sc, RTWN_BEACON_QUEUE); 1228 if (status & R92C_IMR_HIGHDOK) 1229 rtwn_tx_done(sc, RTWN_HIGH_QUEUE); 1230 if (status & R92C_IMR_MGNTDOK) 1231 rtwn_tx_done(sc, RTWN_MGNT_QUEUE); 1232 if (status & R92C_IMR_BKDOK) 1233 rtwn_tx_done(sc, RTWN_BK_QUEUE); 1234 if (status & R92C_IMR_BEDOK) 1235 rtwn_tx_done(sc, RTWN_BE_QUEUE); 1236 if (status & R92C_IMR_VIDOK) 1237 rtwn_tx_done(sc, RTWN_VI_QUEUE); 1238 if (status & R92C_IMR_VODOK) 1239 rtwn_tx_done(sc, RTWN_VO_QUEUE); 1240 1241 /* Enable interrupts. */ 1242 rtwn_pci_write_4(sc, R92C_HIMR, RTWN_INT_ENABLE); 1243 1244 return (1); 1245 } 1246 1247 int 1248 rtwn_is_oactive(void *cookie) 1249 { 1250 struct rtwn_pci_softc *sc = cookie; 1251 1252 return (sc->qfullmsk != 0); 1253 } 1254 1255 int 1256 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data) 1257 { 1258 int ntries; 1259 1260 rtwn_pci_write_4(sc, R92C_LLT_INIT, 1261 SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | 1262 SM(R92C_LLT_INIT_ADDR, addr) | 1263 SM(R92C_LLT_INIT_DATA, data)); 1264 /* Wait for write operation to complete. */ 1265 for (ntries = 0; ntries < 20; ntries++) { 1266 if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == 1267 R92C_LLT_INIT_OP_NO_ACTIVE) 1268 return (0); 1269 DELAY(5); 1270 } 1271 return (ETIMEDOUT); 1272 } 1273 1274 int 1275 rtwn_llt_init(struct rtwn_pci_softc *sc) 1276 { 1277 int i, error; 1278 1279 /* Reserve pages [0; R92C_TX_PAGE_COUNT]. */ 1280 for (i = 0; i < R92C_TX_PAGE_COUNT; i++) { 1281 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1282 return (error); 1283 } 1284 /* NB: 0xff indicates end-of-list. */ 1285 if ((error = rtwn_llt_write(sc, i, 0xff)) != 0) 1286 return (error); 1287 /* 1288 * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1] 1289 * as ring buffer. 1290 */ 1291 for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) { 1292 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1293 return (error); 1294 } 1295 /* Make the last page point to the beginning of the ring buffer. */ 1296 error = rtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1); 1297 return (error); 1298 } 1299 1300 int 1301 rtwn_power_on(void *cookie) 1302 { 1303 struct rtwn_pci_softc *sc = cookie; 1304 uint32_t reg; 1305 int ntries; 1306 1307 /* Wait for autoload done bit. */ 1308 for (ntries = 0; ntries < 1000; ntries++) { 1309 if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) & 1310 R92C_APS_FSMCO_PFM_ALDN) 1311 break; 1312 DELAY(5); 1313 } 1314 if (ntries == 1000) { 1315 printf("%s: timeout waiting for chip autoload\n", 1316 sc->sc_dev.dv_xname); 1317 return (ETIMEDOUT); 1318 } 1319 1320 /* Unlock ISO/CLK/Power control register. */ 1321 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0); 1322 1323 /* TODO: check if we need this for 8188CE */ 1324 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1325 /* bt coex */ 1326 reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO); 1327 reg |= (R92C_APS_FSMCO_SOP_ABG | 1328 R92C_APS_FSMCO_SOP_AMB | 1329 R92C_APS_FSMCO_XOP_BTCK); 1330 rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg); 1331 } 1332 1333 /* Move SPS into PWM mode. */ 1334 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b); 1335 DELAY(100); 1336 1337 /* Set low byte to 0x0f, leave others unchanged. */ 1338 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, 1339 (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f); 1340 1341 /* TODO: check if we need this for 8188CE */ 1342 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1343 /* bt coex */ 1344 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL); 1345 reg &= (~0x00024800); /* XXX magic from linux */ 1346 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg); 1347 } 1348 1349 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1350 (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) | 1351 R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR); 1352 DELAY(200); 1353 1354 /* TODO: linux does additional btcoex stuff here */ 1355 1356 /* Auto enable WLAN. */ 1357 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1358 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); 1359 for (ntries = 0; ntries < 1000; ntries++) { 1360 if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1361 R92C_APS_FSMCO_APFM_ONMAC)) 1362 break; 1363 DELAY(5); 1364 } 1365 if (ntries == 1000) { 1366 printf("%s: timeout waiting for MAC auto ON\n", 1367 sc->sc_dev.dv_xname); 1368 return (ETIMEDOUT); 1369 } 1370 1371 /* Enable radio, GPIO and LED functions. */ 1372 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1373 R92C_APS_FSMCO_AFSM_PCIE | 1374 R92C_APS_FSMCO_PDN_EN | 1375 R92C_APS_FSMCO_PFM_ALDN); 1376 /* Release RF digital isolation. */ 1377 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1378 rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); 1379 1380 if (sc->sc_sc.chip & RTWN_CHIP_92C) 1381 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77); 1382 else 1383 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22); 1384 1385 rtwn_pci_write_4(sc, R92C_INT_MIG, 0); 1386 1387 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1388 /* bt coex */ 1389 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2); 1390 reg &= 0xfd; /* XXX magic from linux */ 1391 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg); 1392 } 1393 1394 rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG, 1395 rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL); 1396 1397 reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL); 1398 if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) { 1399 printf("%s: radio is disabled by hardware switch\n", 1400 sc->sc_dev.dv_xname); 1401 return (EPERM); /* :-) */ 1402 } 1403 1404 /* Initialize MAC. */ 1405 reg = rtwn_pci_read_1(sc, R92C_APSD_CTRL); 1406 rtwn_pci_write_1(sc, R92C_APSD_CTRL, 1407 rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); 1408 for (ntries = 0; ntries < 200; ntries++) { 1409 if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) & 1410 R92C_APSD_CTRL_OFF_STATUS)) 1411 break; 1412 DELAY(500); 1413 } 1414 if (ntries == 200) { 1415 printf("%s: timeout waiting for MAC initialization\n", 1416 sc->sc_dev.dv_xname); 1417 return (ETIMEDOUT); 1418 } 1419 1420 /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ 1421 reg = rtwn_pci_read_2(sc, R92C_CR); 1422 reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1423 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1424 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1425 R92C_CR_ENSEC; 1426 rtwn_pci_write_2(sc, R92C_CR, reg); 1427 1428 rtwn_pci_write_1(sc, 0xfe10, 0x19); 1429 1430 return (0); 1431 } 1432 1433 int 1434 rtwn_dma_init(void *cookie) 1435 { 1436 struct rtwn_pci_softc *sc = cookie; 1437 uint32_t reg; 1438 int error; 1439 1440 /* Initialize LLT table. */ 1441 error = rtwn_llt_init(sc); 1442 if (error != 0) 1443 return error; 1444 1445 /* Set number of pages for normal priority queue. */ 1446 rtwn_pci_write_2(sc, R92C_RQPN_NPQ, 0); 1447 rtwn_pci_write_4(sc, R92C_RQPN, 1448 /* Set number of pages for public queue. */ 1449 SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) | 1450 /* Set number of pages for high priority queue. */ 1451 SM(R92C_RQPN_HPQ, R92C_HPQ_NPAGES) | 1452 /* Set number of pages for low priority queue. */ 1453 SM(R92C_RQPN_LPQ, R92C_LPQ_NPAGES) | 1454 /* Load values. */ 1455 R92C_RQPN_LD); 1456 1457 rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY); 1458 rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY); 1459 rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, 1460 R92C_TX_PAGE_BOUNDARY); 1461 rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY); 1462 rtwn_pci_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY); 1463 1464 reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL); 1465 reg &= ~R92C_TRXDMA_CTRL_QMAP_M; 1466 reg |= 0xF771; 1467 rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg); 1468 1469 rtwn_pci_write_4(sc, R92C_TCR, 1470 R92C_TCR_CFENDFORM | (1 << 12) | (1 << 13)); 1471 1472 /* Configure Tx DMA. */ 1473 rtwn_pci_write_4(sc, R92C_BKQ_DESA, 1474 sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr); 1475 rtwn_pci_write_4(sc, R92C_BEQ_DESA, 1476 sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr); 1477 rtwn_pci_write_4(sc, R92C_VIQ_DESA, 1478 sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr); 1479 rtwn_pci_write_4(sc, R92C_VOQ_DESA, 1480 sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr); 1481 rtwn_pci_write_4(sc, R92C_BCNQ_DESA, 1482 sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr); 1483 rtwn_pci_write_4(sc, R92C_MGQ_DESA, 1484 sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr); 1485 rtwn_pci_write_4(sc, R92C_HQ_DESA, 1486 sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr); 1487 1488 /* Configure Rx DMA. */ 1489 rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr); 1490 1491 /* Set Tx/Rx transfer page boundary. */ 1492 rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff); 1493 1494 /* Set Tx/Rx transfer page size. */ 1495 rtwn_pci_write_1(sc, R92C_PBP, 1496 SM(R92C_PBP_PSRX, R92C_PBP_128) | 1497 SM(R92C_PBP_PSTX, R92C_PBP_128)); 1498 1499 return (0); 1500 } 1501 1502 int 1503 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len) 1504 { 1505 struct rtwn_pci_softc *sc = cookie; 1506 uint32_t reg; 1507 int off, mlen, error = 0, i; 1508 1509 reg = rtwn_pci_read_4(sc, R92C_MCUFWDL); 1510 reg = RW(reg, R92C_MCUFWDL_PAGE, page); 1511 rtwn_pci_write_4(sc, R92C_MCUFWDL, reg); 1512 1513 DELAY(5); 1514 1515 off = R92C_FW_START_ADDR; 1516 while (len > 0) { 1517 if (len > 196) 1518 mlen = 196; 1519 else if (len > 4) 1520 mlen = 4; 1521 else 1522 mlen = 1; 1523 for (i = 0; i < mlen; i++) 1524 rtwn_pci_write_1(sc, off++, buf[i]); 1525 buf += mlen; 1526 len -= mlen; 1527 } 1528 1529 return (error); 1530 } 1531 1532 int 1533 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len) 1534 { 1535 struct rtwn_pci_softc *sc = cookie; 1536 const char *name; 1537 int error; 1538 1539 if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) == 1540 RTWN_CHIP_UMC_A_CUT) 1541 name = "rtwn-rtl8192cfwU"; 1542 else 1543 name = "rtwn-rtl8192cfwU_B"; 1544 1545 error = loadfirmware(name, fw, len); 1546 if (error) 1547 printf("%s: could not read firmware %s (error %d)\n", 1548 sc->sc_dev.dv_xname, name, error); 1549 return (error); 1550 } 1551 1552 void 1553 rtwn_mac_init(void *cookie) 1554 { 1555 struct rtwn_pci_softc *sc = cookie; 1556 int i; 1557 1558 /* Write MAC initialization values. */ 1559 for (i = 0; i < nitems(rtl8192ce_mac); i++) 1560 rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg, 1561 rtl8192ce_mac[i].val); 1562 } 1563 1564 void 1565 rtwn_bb_init(void *cookie) 1566 { 1567 struct rtwn_pci_softc *sc = cookie; 1568 const struct r92c_bb_prog *prog; 1569 uint32_t reg; 1570 int i; 1571 1572 /* Enable BB and RF. */ 1573 rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN, 1574 rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) | 1575 R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | 1576 R92C_SYS_FUNC_EN_DIO_RF); 1577 1578 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); 1579 1580 rtwn_pci_write_1(sc, R92C_RF_CTRL, 1581 R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); 1582 1583 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1584 R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA | 1585 R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST | 1586 R92C_SYS_FUNC_EN_BBRSTB); 1587 1588 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); 1589 1590 rtwn_pci_write_4(sc, R92C_LEDCFG0, 1591 rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000); 1592 1593 /* Select BB programming. */ 1594 prog = (sc->sc_sc.chip & RTWN_CHIP_92C) ? 1595 &rtl8192ce_bb_prog_2t : &rtl8192ce_bb_prog_1t; 1596 1597 /* Write BB initialization values. */ 1598 for (i = 0; i < prog->count; i++) { 1599 rtwn_bb_write(sc, prog->regs[i], prog->vals[i]); 1600 DELAY(1); 1601 } 1602 1603 if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) { 1604 /* 8192C 1T only configuration. */ 1605 reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO); 1606 reg = (reg & ~0x00000003) | 0x2; 1607 rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); 1608 1609 reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO); 1610 reg = (reg & ~0x00300033) | 0x00200022; 1611 rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); 1612 1613 reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING); 1614 reg = (reg & ~0xff000000) | 0x45 << 24; 1615 rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); 1616 1617 reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); 1618 reg = (reg & ~0x000000ff) | 0x23; 1619 rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); 1620 1621 reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); 1622 reg = (reg & ~0x00000030) | 1 << 4; 1623 rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); 1624 1625 reg = rtwn_bb_read(sc, 0xe74); 1626 reg = (reg & ~0x0c000000) | 2 << 26; 1627 rtwn_bb_write(sc, 0xe74, reg); 1628 reg = rtwn_bb_read(sc, 0xe78); 1629 reg = (reg & ~0x0c000000) | 2 << 26; 1630 rtwn_bb_write(sc, 0xe78, reg); 1631 reg = rtwn_bb_read(sc, 0xe7c); 1632 reg = (reg & ~0x0c000000) | 2 << 26; 1633 rtwn_bb_write(sc, 0xe7c, reg); 1634 reg = rtwn_bb_read(sc, 0xe80); 1635 reg = (reg & ~0x0c000000) | 2 << 26; 1636 rtwn_bb_write(sc, 0xe80, reg); 1637 reg = rtwn_bb_read(sc, 0xe88); 1638 reg = (reg & ~0x0c000000) | 2 << 26; 1639 rtwn_bb_write(sc, 0xe88, reg); 1640 } 1641 1642 /* Write AGC values. */ 1643 for (i = 0; i < prog->agccount; i++) { 1644 rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, 1645 prog->agcvals[i]); 1646 DELAY(1); 1647 } 1648 1649 if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & 1650 R92C_HSSI_PARAM2_CCK_HIPWR) 1651 sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR; 1652 } 1653 1654 void 1655 rtwn_calib_to(void *arg) 1656 { 1657 struct rtwn_pci_softc *sc = arg; 1658 1659 rtwn_calib(&sc->sc_sc); 1660 } 1661 1662 void 1663 rtwn_next_calib(void *cookie) 1664 { 1665 struct rtwn_pci_softc *sc = cookie; 1666 1667 timeout_add_sec(&sc->calib_to, 2); 1668 } 1669 1670 void 1671 rtwn_cancel_calib(void *cookie) 1672 { 1673 struct rtwn_pci_softc *sc = cookie; 1674 1675 if (timeout_initialized(&sc->calib_to)) 1676 timeout_del(&sc->calib_to); 1677 } 1678 1679 void 1680 rtwn_scan_to(void *arg) 1681 { 1682 struct rtwn_pci_softc *sc = arg; 1683 1684 rtwn_next_scan(&sc->sc_sc); 1685 } 1686 1687 void 1688 rtwn_pci_next_scan(void *cookie) 1689 { 1690 struct rtwn_pci_softc *sc = cookie; 1691 1692 timeout_add_msec(&sc->scan_to, 200); 1693 } 1694 1695 void 1696 rtwn_cancel_scan(void *cookie) 1697 { 1698 struct rtwn_pci_softc *sc = cookie; 1699 1700 if (timeout_initialized(&sc->scan_to)) 1701 timeout_del(&sc->scan_to); 1702 } 1703 1704 void 1705 rtwn_wait_async(void *cookie) 1706 { 1707 /* nothing to do */ 1708 } 1709