1 /* $OpenBSD: if_rtwn.c,v 1.35 2018/10/01 22:36:08 jmatthew Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org> 6 * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * PCI front-end for Realtek RTL8188CE/RTL8188EE/RTL8192CE/RTL8723AE driver. 23 */ 24 25 #include "bpfilter.h" 26 27 #include <sys/param.h> 28 #include <sys/sockio.h> 29 #include <sys/mbuf.h> 30 #include <sys/kernel.h> 31 #include <sys/socket.h> 32 #include <sys/systm.h> 33 #include <sys/task.h> 34 #include <sys/timeout.h> 35 #include <sys/conf.h> 36 #include <sys/device.h> 37 #include <sys/endian.h> 38 39 #include <machine/bus.h> 40 #include <machine/intr.h> 41 42 #if NBPFILTER > 0 43 #include <net/bpf.h> 44 #endif 45 #include <net/if.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 49 #include <netinet/in.h> 50 #include <netinet/if_ether.h> 51 52 #include <net80211/ieee80211_var.h> 53 #include <net80211/ieee80211_amrr.h> 54 #include <net80211/ieee80211_radiotap.h> 55 56 #include <dev/pci/pcireg.h> 57 #include <dev/pci/pcivar.h> 58 #include <dev/pci/pcidevs.h> 59 60 #include <dev/ic/r92creg.h> 61 #include <dev/ic/rtwnvar.h> 62 63 /* 64 * Driver definitions. 65 */ 66 67 #define R92C_NPQ_NPAGES 0 68 #define R92C_PUBQ_NPAGES 176 69 #define R92C_HPQ_NPAGES 41 70 #define R92C_LPQ_NPAGES 28 71 #define R92C_TXPKTBUF_COUNT 256 72 #define R92C_TX_PAGE_COUNT \ 73 (R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES) 74 #define R92C_TX_PAGE_BOUNDARY (R92C_TX_PAGE_COUNT + 1) 75 #define R92C_MAX_RX_DMA_SIZE 0x2800 76 77 #define R88E_NPQ_NPAGES 0 78 #define R88E_PUBQ_NPAGES 116 79 #define R88E_HPQ_NPAGES 41 80 #define R88E_LPQ_NPAGES 13 81 #define R88E_TXPKTBUF_COUNT 176 82 #define R88E_TX_PAGE_COUNT \ 83 (R88E_PUBQ_NPAGES + R88E_HPQ_NPAGES + R88E_LPQ_NPAGES) 84 #define R88E_TX_PAGE_BOUNDARY (R88E_TX_PAGE_COUNT + 1) 85 #define R88E_MAX_RX_DMA_SIZE 0x2600 86 87 #define R23A_NPQ_NPAGES 0 88 #define R23A_PUBQ_NPAGES 189 89 #define R23A_HPQ_NPAGES 28 90 #define R23A_LPQ_NPAGES 28 91 #define R23A_TXPKTBUF_COUNT 256 92 #define R23A_TX_PAGE_COUNT \ 93 (R23A_PUBQ_NPAGES + R23A_HPQ_NPAGES + R23A_LPQ_NPAGES) 94 #define R23A_TX_PAGE_BOUNDARY (R23A_TX_PAGE_COUNT + 1) 95 #define R23A_MAX_RX_DMA_SIZE 0x2800 96 97 #define RTWN_NTXQUEUES 9 98 #define RTWN_RX_LIST_COUNT 256 99 #define RTWN_TX_LIST_COUNT 256 100 101 /* TX queue indices. */ 102 #define RTWN_BK_QUEUE 0 103 #define RTWN_BE_QUEUE 1 104 #define RTWN_VI_QUEUE 2 105 #define RTWN_VO_QUEUE 3 106 #define RTWN_BEACON_QUEUE 4 107 #define RTWN_TXCMD_QUEUE 5 108 #define RTWN_MGNT_QUEUE 6 109 #define RTWN_HIGH_QUEUE 7 110 #define RTWN_HCCA_QUEUE 8 111 112 struct rtwn_rx_radiotap_header { 113 struct ieee80211_radiotap_header wr_ihdr; 114 uint8_t wr_flags; 115 uint8_t wr_rate; 116 uint16_t wr_chan_freq; 117 uint16_t wr_chan_flags; 118 uint8_t wr_dbm_antsignal; 119 } __packed; 120 121 #define RTWN_RX_RADIOTAP_PRESENT \ 122 (1 << IEEE80211_RADIOTAP_FLAGS | \ 123 1 << IEEE80211_RADIOTAP_RATE | \ 124 1 << IEEE80211_RADIOTAP_CHANNEL | \ 125 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) 126 127 struct rtwn_tx_radiotap_header { 128 struct ieee80211_radiotap_header wt_ihdr; 129 uint8_t wt_flags; 130 uint16_t wt_chan_freq; 131 uint16_t wt_chan_flags; 132 } __packed; 133 134 #define RTWN_TX_RADIOTAP_PRESENT \ 135 (1 << IEEE80211_RADIOTAP_FLAGS | \ 136 1 << IEEE80211_RADIOTAP_CHANNEL) 137 138 struct rtwn_rx_data { 139 bus_dmamap_t map; 140 struct mbuf *m; 141 }; 142 143 struct rtwn_rx_ring { 144 struct r92c_rx_desc_pci *desc; 145 bus_dmamap_t map; 146 bus_dma_segment_t seg; 147 int nsegs; 148 struct rtwn_rx_data rx_data[RTWN_RX_LIST_COUNT]; 149 150 }; 151 struct rtwn_tx_data { 152 bus_dmamap_t map; 153 struct mbuf *m; 154 struct ieee80211_node *ni; 155 }; 156 157 struct rtwn_tx_ring { 158 bus_dmamap_t map; 159 bus_dma_segment_t seg; 160 int nsegs; 161 struct r92c_tx_desc_pci *desc; 162 struct rtwn_tx_data tx_data[RTWN_TX_LIST_COUNT]; 163 int queued; 164 int cur; 165 }; 166 167 struct rtwn_pci_softc { 168 struct device sc_dev; 169 struct rtwn_softc sc_sc; 170 171 struct rtwn_rx_ring rx_ring; 172 struct rtwn_tx_ring tx_ring[RTWN_NTXQUEUES]; 173 uint32_t qfullmsk; 174 175 struct timeout calib_to; 176 struct timeout scan_to; 177 178 /* PCI specific goo. */ 179 bus_dma_tag_t sc_dmat; 180 pci_chipset_tag_t sc_pc; 181 pcitag_t sc_tag; 182 void *sc_ih; 183 bus_space_tag_t sc_st; 184 bus_space_handle_t sc_sh; 185 bus_size_t sc_mapsize; 186 int sc_cap_off; 187 188 struct ieee80211_amrr amrr; 189 struct ieee80211_amrr_node amn; 190 191 #if NBPFILTER > 0 192 caddr_t sc_drvbpf; 193 194 union { 195 struct rtwn_rx_radiotap_header th; 196 uint8_t pad[64]; 197 } sc_rxtapu; 198 #define sc_rxtap sc_rxtapu.th 199 int sc_rxtap_len; 200 201 union { 202 struct rtwn_tx_radiotap_header th; 203 uint8_t pad[64]; 204 } sc_txtapu; 205 #define sc_txtap sc_txtapu.th 206 int sc_txtap_len; 207 #endif 208 }; 209 210 #ifdef RTWN_DEBUG 211 #define DPRINTF(x) do { if (rtwn_debug) printf x; } while (0) 212 #define DPRINTFN(n, x) do { if (rtwn_debug >= (n)) printf x; } while (0) 213 extern int rtwn_debug; 214 #else 215 #define DPRINTF(x) 216 #define DPRINTFN(n, x) 217 #endif 218 219 /* 220 * PCI configuration space registers. 221 */ 222 #define RTWN_PCI_IOBA 0x10 /* i/o mapped base */ 223 #define RTWN_PCI_MMBA 0x18 /* memory mapped base */ 224 225 static const struct pci_matchid rtwn_pci_devices[] = { 226 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8188CE }, 227 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8188EE }, 228 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8192CE }, 229 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8723AE } 230 }; 231 232 int rtwn_pci_match(struct device *, void *, void *); 233 void rtwn_pci_attach(struct device *, struct device *, void *); 234 int rtwn_pci_detach(struct device *, int); 235 int rtwn_pci_activate(struct device *, int); 236 int rtwn_alloc_rx_list(struct rtwn_pci_softc *); 237 void rtwn_reset_rx_list(struct rtwn_pci_softc *); 238 void rtwn_free_rx_list(struct rtwn_pci_softc *); 239 void rtwn_setup_rx_desc(struct rtwn_pci_softc *, 240 struct r92c_rx_desc_pci *, bus_addr_t, size_t, int); 241 int rtwn_alloc_tx_list(struct rtwn_pci_softc *, int); 242 void rtwn_reset_tx_list(struct rtwn_pci_softc *, int); 243 void rtwn_free_tx_list(struct rtwn_pci_softc *, int); 244 void rtwn_pci_write_1(void *, uint16_t, uint8_t); 245 void rtwn_pci_write_2(void *, uint16_t, uint16_t); 246 void rtwn_pci_write_4(void *, uint16_t, uint32_t); 247 uint8_t rtwn_pci_read_1(void *, uint16_t); 248 uint16_t rtwn_pci_read_2(void *, uint16_t); 249 uint32_t rtwn_pci_read_4(void *, uint16_t); 250 void rtwn_rx_frame(struct rtwn_pci_softc *, 251 struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int); 252 int rtwn_tx(void *, struct mbuf *, struct ieee80211_node *); 253 void rtwn_tx_done(struct rtwn_pci_softc *, int); 254 int rtwn_alloc_buffers(void *); 255 int rtwn_pci_init(void *); 256 void rtwn_pci_88e_stop(struct rtwn_pci_softc *); 257 void rtwn_pci_stop(void *); 258 int rtwn_intr(void *); 259 int rtwn_is_oactive(void *); 260 int rtwn_92c_power_on(struct rtwn_pci_softc *); 261 int rtwn_88e_power_on(struct rtwn_pci_softc *); 262 int rtwn_23a_power_on(struct rtwn_pci_softc *); 263 int rtwn_power_on(void *); 264 int rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t); 265 int rtwn_llt_init(struct rtwn_pci_softc *, int); 266 int rtwn_dma_init(void *); 267 int rtwn_fw_loadpage(void *, int, uint8_t *, int); 268 int rtwn_pci_load_firmware(void *, u_char **, size_t *); 269 void rtwn_mac_init(void *); 270 void rtwn_bb_init(void *); 271 void rtwn_calib_to(void *); 272 void rtwn_next_calib(void *); 273 void rtwn_cancel_calib(void *); 274 void rtwn_scan_to(void *); 275 void rtwn_pci_next_scan(void *); 276 void rtwn_cancel_scan(void *); 277 void rtwn_wait_async(void *); 278 void rtwn_poll_c2h_events(struct rtwn_pci_softc *); 279 void rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int); 280 281 /* Aliases. */ 282 #define rtwn_bb_write rtwn_pci_write_4 283 #define rtwn_bb_read rtwn_pci_read_4 284 285 struct cfdriver rtwn_cd = { 286 NULL, "rtwn", DV_IFNET 287 }; 288 289 const struct cfattach rtwn_pci_ca = { 290 sizeof(struct rtwn_pci_softc), 291 rtwn_pci_match, 292 rtwn_pci_attach, 293 rtwn_pci_detach, 294 rtwn_pci_activate 295 }; 296 297 int 298 rtwn_pci_match(struct device *parent, void *match, void *aux) 299 { 300 return (pci_matchbyid(aux, rtwn_pci_devices, 301 nitems(rtwn_pci_devices))); 302 } 303 304 void 305 rtwn_pci_attach(struct device *parent, struct device *self, void *aux) 306 { 307 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self; 308 struct pci_attach_args *pa = aux; 309 struct ifnet *ifp; 310 int i, error; 311 pcireg_t memtype; 312 pci_intr_handle_t ih; 313 const char *intrstr; 314 315 sc->sc_dmat = pa->pa_dmat; 316 sc->sc_pc = pa->pa_pc; 317 sc->sc_tag = pa->pa_tag; 318 319 timeout_set(&sc->calib_to, rtwn_calib_to, sc); 320 timeout_set(&sc->scan_to, rtwn_scan_to, sc); 321 322 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 323 324 /* Map control/status registers. */ 325 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA); 326 error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st, 327 &sc->sc_sh, NULL, &sc->sc_mapsize, 0); 328 if (error != 0) { 329 printf(": can't map mem space\n"); 330 return; 331 } 332 333 if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) { 334 printf(": can't map interrupt\n"); 335 return; 336 } 337 intrstr = pci_intr_string(sc->sc_pc, ih); 338 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET, 339 rtwn_intr, sc, sc->sc_dev.dv_xname); 340 if (sc->sc_ih == NULL) { 341 printf(": can't establish interrupt"); 342 if (intrstr != NULL) 343 printf(" at %s", intrstr); 344 printf("\n"); 345 return; 346 } 347 printf(": %s\n", intrstr); 348 349 /* Disable PCIe Active State Power Management (ASPM). */ 350 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 351 &sc->sc_cap_off, NULL)) { 352 uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag, 353 sc->sc_cap_off + PCI_PCIE_LCSR); 354 lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1); 355 pci_conf_write(sc->sc_pc, sc->sc_tag, 356 sc->sc_cap_off + PCI_PCIE_LCSR, lcsr); 357 } 358 359 /* Allocate Tx/Rx buffers. */ 360 error = rtwn_alloc_rx_list(sc); 361 if (error != 0) { 362 printf("%s: could not allocate Rx buffers\n", 363 sc->sc_dev.dv_xname); 364 return; 365 } 366 for (i = 0; i < RTWN_NTXQUEUES; i++) { 367 error = rtwn_alloc_tx_list(sc, i); 368 if (error != 0) { 369 printf("%s: could not allocate Tx buffers\n", 370 sc->sc_dev.dv_xname); 371 rtwn_free_rx_list(sc); 372 return; 373 } 374 } 375 376 sc->amrr.amrr_min_success_threshold = 1; 377 sc->amrr.amrr_max_success_threshold = 15; 378 379 /* Attach the bus-agnostic driver. */ 380 sc->sc_sc.sc_ops.cookie = sc; 381 sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1; 382 sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2; 383 sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4; 384 sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1; 385 sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2; 386 sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4; 387 sc->sc_sc.sc_ops.tx = rtwn_tx; 388 sc->sc_sc.sc_ops.power_on = rtwn_power_on; 389 sc->sc_sc.sc_ops.dma_init = rtwn_dma_init; 390 sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware; 391 sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage; 392 sc->sc_sc.sc_ops.mac_init = rtwn_mac_init; 393 sc->sc_sc.sc_ops.bb_init = rtwn_bb_init; 394 sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers; 395 sc->sc_sc.sc_ops.init = rtwn_pci_init; 396 sc->sc_sc.sc_ops.stop = rtwn_pci_stop; 397 sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive; 398 sc->sc_sc.sc_ops.next_calib = rtwn_next_calib; 399 sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib; 400 sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan; 401 sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan; 402 sc->sc_sc.sc_ops.wait_async = rtwn_wait_async; 403 404 sc->sc_sc.chip = RTWN_CHIP_PCI; 405 switch (PCI_PRODUCT(pa->pa_id)) { 406 case PCI_PRODUCT_REALTEK_RTL8188CE: 407 case PCI_PRODUCT_REALTEK_RTL8192CE: 408 sc->sc_sc.chip |= RTWN_CHIP_88C | RTWN_CHIP_92C; 409 break; 410 case PCI_PRODUCT_REALTEK_RTL8188EE: 411 sc->sc_sc.chip |= RTWN_CHIP_88E; 412 break; 413 case PCI_PRODUCT_REALTEK_RTL8723AE: 414 sc->sc_sc.chip |= RTWN_CHIP_23A; 415 break; 416 } 417 418 error = rtwn_attach(&sc->sc_dev, &sc->sc_sc); 419 if (error != 0) { 420 rtwn_free_rx_list(sc); 421 for (i = 0; i < RTWN_NTXQUEUES; i++) 422 rtwn_free_tx_list(sc, i); 423 return; 424 } 425 426 /* ifp is now valid */ 427 ifp = &sc->sc_sc.sc_ic.ic_if; 428 #if NBPFILTER > 0 429 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 430 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 431 432 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu); 433 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 434 sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT); 435 436 sc->sc_txtap_len = sizeof(sc->sc_txtapu); 437 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 438 sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT); 439 #endif 440 } 441 442 int 443 rtwn_pci_detach(struct device *self, int flags) 444 { 445 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 446 int s, i; 447 448 s = splnet(); 449 450 if (timeout_initialized(&sc->calib_to)) 451 timeout_del(&sc->calib_to); 452 if (timeout_initialized(&sc->scan_to)) 453 timeout_del(&sc->scan_to); 454 455 rtwn_detach(&sc->sc_sc, flags); 456 457 /* Free Tx/Rx buffers. */ 458 for (i = 0; i < RTWN_NTXQUEUES; i++) 459 rtwn_free_tx_list(sc, i); 460 rtwn_free_rx_list(sc); 461 splx(s); 462 463 return (0); 464 } 465 466 int 467 rtwn_pci_activate(struct device *self, int act) 468 { 469 struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self; 470 471 return rtwn_activate(&sc->sc_sc, act); 472 } 473 474 void 475 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc, 476 bus_addr_t addr, size_t len, int idx) 477 { 478 memset(desc, 0, sizeof(*desc)); 479 desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) | 480 ((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0)); 481 desc->rxbufaddr = htole32(addr); 482 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 483 BUS_SPACE_BARRIER_WRITE); 484 desc->rxdw0 |= htole32(R92C_RXDW0_OWN); 485 } 486 487 int 488 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc) 489 { 490 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 491 struct rtwn_rx_data *rx_data; 492 size_t size; 493 int i, error = 0; 494 495 /* Allocate Rx descriptors. */ 496 size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT; 497 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 498 &rx_ring->map); 499 if (error != 0) { 500 printf("%s: could not create rx desc DMA map\n", 501 sc->sc_dev.dv_xname); 502 rx_ring->map = NULL; 503 goto fail; 504 } 505 506 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1, 507 &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 508 if (error != 0) { 509 printf("%s: could not allocate rx desc\n", 510 sc->sc_dev.dv_xname); 511 goto fail; 512 } 513 514 error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs, 515 size, (caddr_t *)&rx_ring->desc, 516 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 517 if (error != 0) { 518 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs); 519 rx_ring->desc = NULL; 520 printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname); 521 goto fail; 522 } 523 524 error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg, 525 1, size, BUS_DMA_NOWAIT); 526 if (error != 0) { 527 printf("%s: could not load rx desc\n", 528 sc->sc_dev.dv_xname); 529 goto fail; 530 } 531 532 bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size, 533 BUS_DMASYNC_PREWRITE); 534 535 /* Allocate Rx buffers. */ 536 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 537 rx_data = &rx_ring->rx_data[i]; 538 539 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 540 0, BUS_DMA_NOWAIT, &rx_data->map); 541 if (error != 0) { 542 printf("%s: could not create rx buf DMA map\n", 543 sc->sc_dev.dv_xname); 544 goto fail; 545 } 546 547 rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 548 if (rx_data->m == NULL) { 549 printf("%s: could not allocate rx mbuf\n", 550 sc->sc_dev.dv_xname); 551 error = ENOMEM; 552 goto fail; 553 } 554 555 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 556 mtod(rx_data->m, void *), MCLBYTES, NULL, 557 BUS_DMA_NOWAIT | BUS_DMA_READ); 558 if (error != 0) { 559 printf("%s: could not load rx buf DMA map\n", 560 sc->sc_dev.dv_xname); 561 goto fail; 562 } 563 564 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 565 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 566 } 567 fail: if (error != 0) 568 rtwn_free_rx_list(sc); 569 return (error); 570 } 571 572 void 573 rtwn_reset_rx_list(struct rtwn_pci_softc *sc) 574 { 575 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 576 struct rtwn_rx_data *rx_data; 577 int i; 578 579 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 580 rx_data = &rx_ring->rx_data[i]; 581 rtwn_setup_rx_desc(sc, &rx_ring->desc[i], 582 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i); 583 } 584 } 585 586 void 587 rtwn_free_rx_list(struct rtwn_pci_softc *sc) 588 { 589 struct rtwn_rx_ring *rx_ring = &sc->rx_ring; 590 struct rtwn_rx_data *rx_data; 591 int i, s; 592 593 s = splnet(); 594 595 if (rx_ring->map) { 596 if (rx_ring->desc) { 597 bus_dmamap_unload(sc->sc_dmat, rx_ring->map); 598 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc, 599 sizeof (struct r92c_rx_desc_pci) * 600 RTWN_RX_LIST_COUNT); 601 bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, 602 rx_ring->nsegs); 603 rx_ring->desc = NULL; 604 } 605 bus_dmamap_destroy(sc->sc_dmat, rx_ring->map); 606 rx_ring->map = NULL; 607 } 608 609 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 610 rx_data = &rx_ring->rx_data[i]; 611 612 if (rx_data->m != NULL) { 613 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 614 m_freem(rx_data->m); 615 rx_data->m = NULL; 616 } 617 bus_dmamap_destroy(sc->sc_dmat, rx_data->map); 618 rx_data->map = NULL; 619 } 620 621 splx(s); 622 } 623 624 int 625 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid) 626 { 627 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 628 struct rtwn_tx_data *tx_data; 629 int i = 0, error = 0; 630 631 error = bus_dmamap_create(sc->sc_dmat, 632 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1, 633 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0, 634 BUS_DMA_NOWAIT, &tx_ring->map); 635 if (error != 0) { 636 printf("%s: could not create tx ring DMA map\n", 637 sc->sc_dev.dv_xname); 638 goto fail; 639 } 640 641 error = bus_dmamem_alloc(sc->sc_dmat, 642 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0, 643 &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 644 if (error != 0) { 645 printf("%s: could not allocate tx ring DMA memory\n", 646 sc->sc_dev.dv_xname); 647 goto fail; 648 } 649 650 error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs, 651 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 652 (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT); 653 if (error != 0) { 654 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 655 printf("%s: can't map tx ring DMA memory\n", 656 sc->sc_dev.dv_xname); 657 goto fail; 658 } 659 660 error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc, 661 sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL, 662 BUS_DMA_NOWAIT); 663 if (error != 0) { 664 printf("%s: could not load tx ring DMA map\n", 665 sc->sc_dev.dv_xname); 666 goto fail; 667 } 668 669 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 670 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 671 672 /* setup tx desc */ 673 desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr 674 + sizeof(struct r92c_tx_desc_pci) 675 * ((i + 1) % RTWN_TX_LIST_COUNT)); 676 677 tx_data = &tx_ring->tx_data[i]; 678 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 679 0, BUS_DMA_NOWAIT, &tx_data->map); 680 if (error != 0) { 681 printf("%s: could not create tx buf DMA map\n", 682 sc->sc_dev.dv_xname); 683 goto fail; 684 } 685 tx_data->m = NULL; 686 tx_data->ni = NULL; 687 } 688 fail: 689 if (error != 0) 690 rtwn_free_tx_list(sc, qid); 691 return (error); 692 } 693 694 void 695 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid) 696 { 697 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 698 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 699 int i; 700 701 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 702 struct r92c_tx_desc_pci *desc = &tx_ring->desc[i]; 703 struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i]; 704 705 memset(desc, 0, sizeof(*desc) - 706 (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) + 707 sizeof(desc->nextdescaddr))); 708 709 if (tx_data->m != NULL) { 710 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 711 m_freem(tx_data->m); 712 tx_data->m = NULL; 713 ieee80211_release_node(ic, tx_data->ni); 714 tx_data->ni = NULL; 715 } 716 } 717 718 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 719 BUS_DMASYNC_POSTWRITE); 720 721 sc->qfullmsk &= ~(1 << qid); 722 tx_ring->queued = 0; 723 tx_ring->cur = 0; 724 } 725 726 void 727 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid) 728 { 729 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 730 struct rtwn_tx_data *tx_data; 731 int i; 732 733 if (tx_ring->map != NULL) { 734 if (tx_ring->desc != NULL) { 735 bus_dmamap_unload(sc->sc_dmat, tx_ring->map); 736 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc, 737 sizeof (struct r92c_tx_desc_pci) * 738 RTWN_TX_LIST_COUNT); 739 bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs); 740 } 741 bus_dmamap_destroy(sc->sc_dmat, tx_ring->map); 742 } 743 744 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 745 tx_data = &tx_ring->tx_data[i]; 746 747 if (tx_data->m != NULL) { 748 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 749 m_freem(tx_data->m); 750 tx_data->m = NULL; 751 } 752 bus_dmamap_destroy(sc->sc_dmat, tx_data->map); 753 } 754 755 sc->qfullmsk &= ~(1 << qid); 756 tx_ring->queued = 0; 757 tx_ring->cur = 0; 758 } 759 760 void 761 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val) 762 { 763 struct rtwn_pci_softc *sc = cookie; 764 765 bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val); 766 } 767 768 void 769 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val) 770 { 771 struct rtwn_pci_softc *sc = cookie; 772 773 val = htole16(val); 774 bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val); 775 } 776 777 void 778 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val) 779 { 780 struct rtwn_pci_softc *sc = cookie; 781 782 val = htole32(val); 783 bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val); 784 } 785 786 uint8_t 787 rtwn_pci_read_1(void *cookie, uint16_t addr) 788 { 789 struct rtwn_pci_softc *sc = cookie; 790 791 return bus_space_read_1(sc->sc_st, sc->sc_sh, addr); 792 } 793 794 uint16_t 795 rtwn_pci_read_2(void *cookie, uint16_t addr) 796 { 797 struct rtwn_pci_softc *sc = cookie; 798 uint16_t val; 799 800 val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr); 801 return le16toh(val); 802 } 803 804 uint32_t 805 rtwn_pci_read_4(void *cookie, uint16_t addr) 806 { 807 struct rtwn_pci_softc *sc = cookie; 808 uint32_t val; 809 810 val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr); 811 return le32toh(val); 812 } 813 814 void 815 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc, 816 struct rtwn_rx_data *rx_data, int desc_idx) 817 { 818 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 819 struct ifnet *ifp = &ic->ic_if; 820 struct ieee80211_rxinfo rxi; 821 struct ieee80211_frame *wh; 822 struct ieee80211_node *ni; 823 struct r92c_rx_phystat *phy = NULL; 824 uint32_t rxdw0, rxdw3; 825 struct mbuf *m, *m1; 826 uint8_t rate; 827 int8_t rssi = 0; 828 int infosz, pktlen, shift, error; 829 830 rxdw0 = letoh32(rx_desc->rxdw0); 831 rxdw3 = letoh32(rx_desc->rxdw3); 832 833 if (sc->sc_sc.chip & RTWN_CHIP_88E) { 834 int ntries, type; 835 struct r88e_tx_rpt_ccx *rxstat; 836 837 type = MS(rxdw3, R88E_RXDW3_RPT); 838 if (type == R88E_RXDW3_RPT_TX1) { 839 uint32_t rptb1, rptb2; 840 841 rxstat = mtod(rx_data->m, struct r88e_tx_rpt_ccx *); 842 rptb1 = letoh32(rxstat->rptb1); 843 rptb2 = letoh32(rxstat->rptb2); 844 ntries = MS(rptb2, R88E_RPTB2_RETRY_CNT); 845 if (rptb1 & R88E_RPTB1_PKT_OK) 846 sc->amn.amn_txcnt++; 847 if (ntries > 0) 848 sc->amn.amn_retrycnt++; 849 850 rtwn_setup_rx_desc(sc, rx_desc, 851 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, 852 desc_idx); 853 return; 854 } 855 } 856 857 if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) { 858 /* 859 * This should not happen since we setup our Rx filter 860 * to not receive these frames. 861 */ 862 ifp->if_ierrors++; 863 return; 864 } 865 866 pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); 867 if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) { 868 ifp->if_ierrors++; 869 return; 870 } 871 872 rate = MS(rxdw3, R92C_RXDW3_RATE); 873 infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; 874 if (infosz > sizeof(struct r92c_rx_phystat)) 875 infosz = sizeof(struct r92c_rx_phystat); 876 shift = MS(rxdw0, R92C_RXDW0_SHIFT); 877 878 /* Get RSSI from PHY status descriptor if present. */ 879 if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { 880 phy = mtod(rx_data->m, struct r92c_rx_phystat *); 881 rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy); 882 /* Update our average RSSI. */ 883 rtwn_update_avgrssi(&sc->sc_sc, rate, rssi); 884 } 885 886 DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n", 887 pktlen, rate, infosz, shift, rssi)); 888 889 m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 890 if (m1 == NULL) { 891 ifp->if_ierrors++; 892 return; 893 } 894 bus_dmamap_unload(sc->sc_dmat, rx_data->map); 895 error = bus_dmamap_load(sc->sc_dmat, rx_data->map, 896 mtod(m1, void *), MCLBYTES, NULL, 897 BUS_DMA_NOWAIT | BUS_DMA_READ); 898 if (error != 0) { 899 m_freem(m1); 900 901 if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, 902 rx_data->m, BUS_DMA_NOWAIT)) 903 panic("%s: could not load old RX mbuf", 904 sc->sc_dev.dv_xname); 905 906 /* Physical address may have changed. */ 907 rtwn_setup_rx_desc(sc, rx_desc, 908 rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx); 909 910 ifp->if_ierrors++; 911 return; 912 } 913 914 /* Finalize mbuf. */ 915 m = rx_data->m; 916 rx_data->m = m1; 917 m->m_pkthdr.len = m->m_len = pktlen + infosz + shift; 918 919 /* Update RX descriptor. */ 920 rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr, 921 MCLBYTES, desc_idx); 922 923 /* Get ieee80211 frame header. */ 924 if (rxdw0 & R92C_RXDW0_PHYST) 925 m_adj(m, infosz + shift); 926 else 927 m_adj(m, shift); 928 wh = mtod(m, struct ieee80211_frame *); 929 930 #if NBPFILTER > 0 931 if (__predict_false(sc->sc_drvbpf != NULL)) { 932 struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap; 933 struct mbuf mb; 934 935 tap->wr_flags = 0; 936 /* Map HW rate index to 802.11 rate. */ 937 tap->wr_flags = 2; 938 if (!(rxdw3 & R92C_RXDW3_HT)) { 939 switch (rate) { 940 /* CCK. */ 941 case 0: tap->wr_rate = 2; break; 942 case 1: tap->wr_rate = 4; break; 943 case 2: tap->wr_rate = 11; break; 944 case 3: tap->wr_rate = 22; break; 945 /* OFDM. */ 946 case 4: tap->wr_rate = 12; break; 947 case 5: tap->wr_rate = 18; break; 948 case 6: tap->wr_rate = 24; break; 949 case 7: tap->wr_rate = 36; break; 950 case 8: tap->wr_rate = 48; break; 951 case 9: tap->wr_rate = 72; break; 952 case 10: tap->wr_rate = 96; break; 953 case 11: tap->wr_rate = 108; break; 954 } 955 } else if (rate >= 12) { /* MCS0~15. */ 956 /* Bit 7 set means HT MCS instead of rate. */ 957 tap->wr_rate = 0x80 | (rate - 12); 958 } 959 tap->wr_dbm_antsignal = rssi; 960 tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq); 961 tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags); 962 963 mb.m_data = (caddr_t)tap; 964 mb.m_len = sc->sc_rxtap_len; 965 mb.m_next = m; 966 mb.m_nextpkt = NULL; 967 mb.m_type = 0; 968 mb.m_flags = 0; 969 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 970 } 971 #endif 972 973 ni = ieee80211_find_rxnode(ic, wh); 974 rxi.rxi_flags = 0; 975 rxi.rxi_rssi = rssi; 976 rxi.rxi_tstamp = 0; /* Unused. */ 977 ieee80211_input(ifp, m, ni, &rxi); 978 /* Node is no longer needed. */ 979 ieee80211_release_node(ic, ni); 980 } 981 982 int 983 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni) 984 { 985 struct rtwn_pci_softc *sc = cookie; 986 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 987 struct ieee80211_frame *wh; 988 struct ieee80211_key *k = NULL; 989 struct rtwn_tx_ring *tx_ring; 990 struct rtwn_tx_data *data; 991 struct r92c_tx_desc_pci *txd; 992 uint16_t qos; 993 uint8_t raid, type, tid, qid; 994 int hasqos, error; 995 996 wh = mtod(m, struct ieee80211_frame *); 997 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 998 999 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 1000 k = ieee80211_get_txkey(ic, wh, ni); 1001 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 1002 return (ENOBUFS); 1003 wh = mtod(m, struct ieee80211_frame *); 1004 } 1005 1006 if ((hasqos = ieee80211_has_qos(wh))) { 1007 qos = ieee80211_get_qos(wh); 1008 tid = qos & IEEE80211_QOS_TID; 1009 qid = ieee80211_up_to_ac(ic, tid); 1010 } else if (type != IEEE80211_FC0_TYPE_DATA) { 1011 qid = RTWN_VO_QUEUE; 1012 } else 1013 qid = RTWN_BE_QUEUE; 1014 1015 /* Grab a Tx buffer from the ring. */ 1016 tx_ring = &sc->tx_ring[qid]; 1017 data = &tx_ring->tx_data[tx_ring->cur]; 1018 if (data->m != NULL) { 1019 m_freem(m); 1020 return (ENOBUFS); 1021 } 1022 1023 /* Fill Tx descriptor. */ 1024 txd = &tx_ring->desc[tx_ring->cur]; 1025 if (htole32(txd->txdw0) & R92C_RXDW0_OWN) { 1026 m_freem(m); 1027 return (ENOBUFS); 1028 } 1029 txd->txdw0 = htole32( 1030 SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) | 1031 SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | 1032 R92C_TXDW0_FSG | R92C_TXDW0_LSG); 1033 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 1034 txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); 1035 1036 txd->txdw1 = 0; 1037 #ifdef notyet 1038 if (k != NULL) { 1039 switch (k->k_cipher) { 1040 case IEEE80211_CIPHER_WEP40: 1041 case IEEE80211_CIPHER_WEP104: 1042 case IEEE80211_CIPHER_TKIP: 1043 cipher = R92C_TXDW1_CIPHER_RC4; 1044 break; 1045 case IEEE80211_CIPHER_CCMP: 1046 cipher = R92C_TXDW1_CIPHER_AES; 1047 break; 1048 default: 1049 cipher = R92C_TXDW1_CIPHER_NONE; 1050 } 1051 txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); 1052 } 1053 #endif 1054 txd->txdw4 = 0; 1055 txd->txdw5 = 0; 1056 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 1057 type == IEEE80211_FC0_TYPE_DATA) { 1058 if (ic->ic_curmode == IEEE80211_MODE_11B || 1059 (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B)) 1060 raid = R92C_RAID_11B; 1061 else 1062 raid = R92C_RAID_11BG; 1063 1064 if (sc->sc_sc.chip & RTWN_CHIP_88E) { 1065 txd->txdw1 |= htole32( 1066 SM(R88E_TXDW1_MACID, R92C_MACID_BSS) | 1067 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) | 1068 SM(R92C_TXDW1_RAID, raid)); 1069 txd->txdw2 |= htole32(R88E_TXDW2_AGGBK); 1070 } else { 1071 txd->txdw1 |= htole32( 1072 SM(R92C_TXDW1_MACID, R92C_MACID_BSS) | 1073 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) | 1074 SM(R92C_TXDW1_RAID, raid) | 1075 R92C_TXDW1_AGGBK); 1076 } 1077 1078 /* Request TX status report for AMRR. */ 1079 txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT); 1080 1081 if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 1082 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 1083 R92C_TXDW4_HWRTSEN); 1084 } else if (ic->ic_flags & IEEE80211_F_USEPROT) { 1085 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1086 txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF | 1087 R92C_TXDW4_HWRTSEN); 1088 } else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1089 txd->txdw4 |= htole32(R92C_TXDW4_RTSEN | 1090 R92C_TXDW4_HWRTSEN); 1091 } 1092 } 1093 1094 if (ic->ic_curmode == IEEE80211_MODE_11B) 1095 txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0)); 1096 else 1097 txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 3)); 1098 txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf)); 1099 1100 /* Use AMMR rate for data. */ 1101 txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); 1102 if (ic->ic_fixed_rate != -1) 1103 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 1104 ic->ic_fixed_rate)); 1105 else 1106 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 1107 ni->ni_txrate)); 1108 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f)); 1109 } else { 1110 txd->txdw1 |= htole32( 1111 SM(R92C_TXDW1_MACID, 0) | 1112 SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) | 1113 SM(R92C_TXDW1_RAID, R92C_RAID_11B)); 1114 1115 /* Force CCK1. */ 1116 txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); 1117 txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0)); 1118 } 1119 /* Set sequence number (already little endian). */ 1120 txd->txdseq = (*(uint16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; 1121 if (sc->sc_sc.chip & RTWN_CHIP_23A) 1122 txd->txdseq |= htole16(R23A_TXDW3_TXRPTEN); 1123 1124 if (!hasqos) { 1125 /* Use HW sequence numbering for non-QoS frames. */ 1126 if (!(sc->sc_sc.chip & RTWN_CHIP_23A)) 1127 txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ); 1128 txd->txdseq |= htole16(R92C_TXDW3_HWSEQEN); 1129 } else 1130 txd->txdw4 |= htole32(R92C_TXDW4_QOS); 1131 1132 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1133 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1134 if (error && error != EFBIG) { 1135 printf("%s: can't map mbuf (error %d)\n", 1136 sc->sc_dev.dv_xname, error); 1137 m_freem(m); 1138 return error; 1139 } 1140 if (error != 0) { 1141 /* Too many DMA segments, linearize mbuf. */ 1142 if (m_defrag(m, M_DONTWAIT)) { 1143 m_freem(m); 1144 return ENOBUFS; 1145 } 1146 1147 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 1148 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1149 if (error != 0) { 1150 printf("%s: can't map mbuf (error %d)\n", 1151 sc->sc_dev.dv_xname, error); 1152 m_freem(m); 1153 return error; 1154 } 1155 } 1156 1157 txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr); 1158 txd->txbufsize = htole16(m->m_pkthdr.len); 1159 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize, 1160 BUS_SPACE_BARRIER_WRITE); 1161 txd->txdw0 |= htole32(R92C_TXDW0_OWN); 1162 1163 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1164 BUS_DMASYNC_POSTWRITE); 1165 bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES, 1166 BUS_DMASYNC_POSTWRITE); 1167 1168 data->m = m; 1169 data->ni = ni; 1170 1171 #if NBPFILTER > 0 1172 if (__predict_false(sc->sc_drvbpf != NULL)) { 1173 struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap; 1174 struct mbuf mb; 1175 1176 tap->wt_flags = 0; 1177 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1178 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1179 1180 mb.m_data = (caddr_t)tap; 1181 mb.m_len = sc->sc_txtap_len; 1182 mb.m_next = m; 1183 mb.m_nextpkt = NULL; 1184 mb.m_type = 0; 1185 mb.m_flags = 0; 1186 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1187 } 1188 #endif 1189 1190 tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT; 1191 tx_ring->queued++; 1192 1193 if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1)) 1194 sc->qfullmsk |= (1 << qid); 1195 1196 /* Kick TX. */ 1197 rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid)); 1198 1199 return (0); 1200 } 1201 1202 void 1203 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid) 1204 { 1205 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 1206 struct ifnet *ifp = &ic->ic_if; 1207 struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid]; 1208 struct rtwn_tx_data *tx_data; 1209 struct r92c_tx_desc_pci *tx_desc; 1210 int i; 1211 1212 bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES, 1213 BUS_DMASYNC_POSTREAD); 1214 1215 for (i = 0; i < RTWN_TX_LIST_COUNT; i++) { 1216 tx_data = &tx_ring->tx_data[i]; 1217 if (tx_data->m == NULL) 1218 continue; 1219 1220 tx_desc = &tx_ring->desc[i]; 1221 if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN) 1222 continue; 1223 1224 bus_dmamap_unload(sc->sc_dmat, tx_data->map); 1225 m_freem(tx_data->m); 1226 tx_data->m = NULL; 1227 ieee80211_release_node(ic, tx_data->ni); 1228 tx_data->ni = NULL; 1229 1230 sc->sc_sc.sc_tx_timer = 0; 1231 tx_ring->queued--; 1232 1233 if (!(sc->sc_sc.chip & RTWN_CHIP_23A)) 1234 rtwn_poll_c2h_events(sc); 1235 } 1236 1237 if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1)) 1238 sc->qfullmsk &= ~(1 << qid); 1239 1240 if (sc->qfullmsk == 0) { 1241 ifq_clr_oactive(&ifp->if_snd); 1242 (*ifp->if_start)(ifp); 1243 } 1244 } 1245 1246 int 1247 rtwn_alloc_buffers(void *cookie) 1248 { 1249 /* Tx/Rx buffers were already allocated in rtwn_pci_attach() */ 1250 return (0); 1251 } 1252 1253 int 1254 rtwn_pci_init(void *cookie) 1255 { 1256 struct rtwn_pci_softc *sc = cookie; 1257 ieee80211_amrr_node_init(&sc->amrr, &sc->amn); 1258 1259 /* Enable TX reports for AMRR */ 1260 if (sc->sc_sc.chip & RTWN_CHIP_88E) { 1261 rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL, 1262 (rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) & ~0) | 1263 R88E_TX_RPT_CTRL_EN); 1264 rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL + 1, 0x02); 1265 1266 rtwn_pci_write_2(sc, R88E_TX_RPT_TIME, 0xcdf0); 1267 } 1268 1269 return (0); 1270 } 1271 1272 void 1273 rtwn_pci_92c_stop(struct rtwn_pci_softc *sc) 1274 { 1275 uint16_t reg; 1276 1277 /* Disable interrupts. */ 1278 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1279 1280 /* Stop hardware. */ 1281 rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL); 1282 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1283 reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN); 1284 reg |= R92C_SYS_FUNC_EN_BB_GLB_RST; 1285 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1286 reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST; 1287 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg); 1288 reg = rtwn_pci_read_2(sc, R92C_CR); 1289 reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1290 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1291 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1292 R92C_CR_ENSEC); 1293 rtwn_pci_write_2(sc, R92C_CR, reg); 1294 if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) 1295 rtwn_fw_reset(&sc->sc_sc); 1296 /* TODO: linux does additional btcoex stuff here */ 1297 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */ 1298 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */ 1299 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */ 1300 rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 | 1301 R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08); 1302 rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN); 1303 } 1304 1305 void 1306 rtwn_pci_88e_stop(struct rtwn_pci_softc *sc) 1307 { 1308 int i; 1309 uint16_t reg; 1310 1311 /* Disable interrupts. */ 1312 rtwn_pci_write_4(sc, R88E_HIMR, 0x00000000); 1313 1314 /* Stop hardware. */ 1315 rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL, 1316 rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) & 1317 ~(R88E_TX_RPT_CTRL_EN)); 1318 1319 for (i = 0; i < 100; i++) { 1320 if (rtwn_pci_read_1(sc, R88E_RXDMA_CTRL) & 0x02) 1321 break; 1322 DELAY(10); 1323 } 1324 if (i == 100) 1325 DPRINTF(("rxdma ctrl didn't go off, %x\n", rtwn_pci_read_1(sc, R88E_RXDMA_CTRL))); 1326 1327 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff); 1328 1329 rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL); 1330 1331 /* ensure transmission has stopped */ 1332 for (i = 0; i < 100; i++) { 1333 if (rtwn_pci_read_4(sc, 0x5f8) == 0) 1334 break; 1335 DELAY(10); 1336 } 1337 if (i == 100) 1338 DPRINTF(("tx didn't stop\n")); 1339 1340 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1341 rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & 1342 ~(R92C_SYS_FUNC_EN_BBRSTB)); 1343 DELAY(1); 1344 reg = rtwn_pci_read_2(sc, R92C_CR); 1345 reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1346 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1347 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1348 R92C_CR_ENSEC); 1349 rtwn_pci_write_2(sc, R92C_CR, reg); 1350 rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST, 1351 rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20); 1352 1353 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1354 if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) 1355 rtwn_fw_reset(&sc->sc_sc); 1356 1357 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1, 1358 rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~0x02); 1359 rtwn_pci_write_1(sc, R92C_MCUFWDL, 0); 1360 1361 rtwn_pci_write_1(sc, R88E_32K_CTRL, 1362 rtwn_pci_read_1(sc, R88E_32K_CTRL) & ~(0x01)); 1363 1364 /* transition to cardemu state */ 1365 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0); 1366 rtwn_pci_write_1(sc, R92C_LPLDO_CTRL, 1367 rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) | 0x10); 1368 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1369 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_OFF); 1370 for (i = 0; i < 100; i++) { 1371 if ((rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1372 R92C_APS_FSMCO_APFM_OFF) == 0) 1373 break; 1374 DELAY(10); 1375 } 1376 if (i == 100) 1377 DPRINTF(("apfm off didn't go off\n")); 1378 1379 /* transition to card disabled state */ 1380 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2, 1381 rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); 1382 1383 rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1, 1384 rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08); 1385 rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1, 1386 rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08); 1387 1388 rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 | 1389 R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08); 1390 } 1391 1392 void 1393 rtwn_pci_23a_stop(struct rtwn_pci_softc *sc) 1394 { 1395 int i; 1396 1397 /* Disable interrupts. */ 1398 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1399 1400 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff); 1401 rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL); 1402 1403 /* ensure transmission has stopped */ 1404 for (i = 0; i < 100; i++) { 1405 if (rtwn_pci_read_4(sc, 0x5f8) == 0) 1406 break; 1407 DELAY(10); 1408 } 1409 if (i == 100) 1410 DPRINTF(("tx didn't stop\n")); 1411 1412 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1413 rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & 1414 ~(R92C_SYS_FUNC_EN_BBRSTB)); 1415 DELAY(1); 1416 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1417 rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & 1418 ~(R92C_SYS_FUNC_EN_BB_GLB_RST)); 1419 1420 rtwn_pci_write_2(sc, R92C_CR, 1421 rtwn_pci_read_2(sc, R92C_CR) & 1422 ~(R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSWBCN)); 1423 1424 rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST, 1425 rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20); 1426 1427 /* Turn off RF */ 1428 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1429 if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) 1430 rtwn_fw_reset(&sc->sc_sc); 1431 1432 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1, 1433 rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~R92C_SYS_FUNC_EN_DIOE); 1434 rtwn_pci_write_1(sc, R92C_MCUFWDL, 0); 1435 1436 rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00); 1437 rtwn_pci_write_1(sc, R92C_LEDCFG2, rtwn_pci_read_1(sc, R92C_LEDCFG2) & ~(0x80)); 1438 rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) | 1439 R92C_APS_FSMCO_APFM_OFF); 1440 rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1441 ~(R92C_APS_FSMCO_APFM_OFF)); 1442 1443 rtwn_pci_write_4(sc, R92C_APS_FSMCO, 1444 rtwn_pci_read_4(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_RDY_MACON); 1445 rtwn_pci_write_4(sc, R92C_APS_FSMCO, 1446 rtwn_pci_read_4(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APDM_HPDN); 1447 1448 rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1, 1449 rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08); 1450 rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1, 1451 rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08); 1452 1453 rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 | 1454 R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08); 1455 } 1456 1457 void 1458 rtwn_pci_stop(void *cookie) 1459 { 1460 struct rtwn_pci_softc *sc = cookie; 1461 int i, s; 1462 1463 s = splnet(); 1464 1465 if (sc->sc_sc.chip & RTWN_CHIP_88E) { 1466 rtwn_pci_88e_stop(sc); 1467 } else if (sc->sc_sc.chip & RTWN_CHIP_23A) { 1468 rtwn_pci_23a_stop(sc); 1469 } else { 1470 rtwn_pci_92c_stop(sc); 1471 } 1472 1473 for (i = 0; i < RTWN_NTXQUEUES; i++) 1474 rtwn_reset_tx_list(sc, i); 1475 rtwn_reset_rx_list(sc); 1476 1477 splx(s); 1478 } 1479 1480 int 1481 rtwn_88e_intr(struct rtwn_pci_softc *sc) 1482 { 1483 u_int32_t status, estatus; 1484 int i; 1485 1486 status = rtwn_pci_read_4(sc, R88E_HISR); 1487 if (status == 0 || status == 0xffffffff) 1488 return (0); 1489 1490 estatus = rtwn_pci_read_4(sc, R88E_HISRE); 1491 1492 status &= RTWN_88E_INT_ENABLE; 1493 estatus &= R88E_HIMRE_RXFOVW; 1494 1495 rtwn_pci_write_4(sc, R88E_HIMR, 0); 1496 rtwn_pci_write_4(sc, R88E_HIMRE, 0); 1497 rtwn_pci_write_4(sc, R88E_HISR, status); 1498 rtwn_pci_write_4(sc, R88E_HISRE, estatus); 1499 1500 if (status & R88E_HIMR_HIGHDOK) 1501 rtwn_tx_done(sc, RTWN_HIGH_QUEUE); 1502 if (status & R88E_HIMR_MGNTDOK) 1503 rtwn_tx_done(sc, RTWN_MGNT_QUEUE); 1504 if (status & R88E_HIMR_BKDOK) 1505 rtwn_tx_done(sc, RTWN_BK_QUEUE); 1506 if (status & R88E_HIMR_BEDOK) 1507 rtwn_tx_done(sc, RTWN_BE_QUEUE); 1508 if (status & R88E_HIMR_VIDOK) 1509 rtwn_tx_done(sc, RTWN_VI_QUEUE); 1510 if (status & R88E_HIMR_VODOK) 1511 rtwn_tx_done(sc, RTWN_VO_QUEUE); 1512 if ((status & (R88E_HIMR_ROK | R88E_HIMR_RDU)) || 1513 (estatus & R88E_HIMRE_RXFOVW)) { 1514 bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0, 1515 sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT, 1516 BUS_DMASYNC_POSTREAD); 1517 1518 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 1519 struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i]; 1520 struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i]; 1521 1522 if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN) 1523 continue; 1524 1525 rtwn_rx_frame(sc, rx_desc, rx_data, i); 1526 } 1527 } 1528 1529 if (status & R88E_HIMR_HSISR_IND_ON_INT) { 1530 rtwn_pci_write_1(sc, R92C_HSISR, 1531 rtwn_pci_read_1(sc, R92C_HSISR) | 1532 R88E_HSIMR_PDN_INT_EN | R88E_HSIMR_RON_INT_EN); 1533 } 1534 1535 /* Enable interrupts. */ 1536 rtwn_pci_write_4(sc, R88E_HIMR, RTWN_88E_INT_ENABLE); 1537 rtwn_pci_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW); 1538 1539 return (1); 1540 } 1541 1542 int 1543 rtwn_intr(void *xsc) 1544 { 1545 struct rtwn_pci_softc *sc = xsc; 1546 u_int32_t status; 1547 int i; 1548 1549 if (sc->sc_sc.chip & RTWN_CHIP_88E) 1550 return (rtwn_88e_intr(sc)); 1551 1552 status = rtwn_pci_read_4(sc, R92C_HISR); 1553 if (status == 0 || status == 0xffffffff) 1554 return (0); 1555 1556 /* Disable interrupts. */ 1557 rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000); 1558 1559 /* Ack interrupts. */ 1560 rtwn_pci_write_4(sc, R92C_HISR, status); 1561 1562 /* Vendor driver treats RX errors like ROK... */ 1563 if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) { 1564 bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0, 1565 sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT, 1566 BUS_DMASYNC_POSTREAD); 1567 1568 for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { 1569 struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i]; 1570 struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i]; 1571 1572 if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN) 1573 continue; 1574 1575 rtwn_rx_frame(sc, rx_desc, rx_data, i); 1576 } 1577 } 1578 1579 if (status & R92C_IMR_BDOK) 1580 rtwn_tx_done(sc, RTWN_BEACON_QUEUE); 1581 if (status & R92C_IMR_HIGHDOK) 1582 rtwn_tx_done(sc, RTWN_HIGH_QUEUE); 1583 if (status & R92C_IMR_MGNTDOK) 1584 rtwn_tx_done(sc, RTWN_MGNT_QUEUE); 1585 if (status & R92C_IMR_BKDOK) 1586 rtwn_tx_done(sc, RTWN_BK_QUEUE); 1587 if (status & R92C_IMR_BEDOK) 1588 rtwn_tx_done(sc, RTWN_BE_QUEUE); 1589 if (status & R92C_IMR_VIDOK) 1590 rtwn_tx_done(sc, RTWN_VI_QUEUE); 1591 if (status & R92C_IMR_VODOK) 1592 rtwn_tx_done(sc, RTWN_VO_QUEUE); 1593 1594 if (sc->sc_sc.chip & RTWN_CHIP_23A) { 1595 if (status & R92C_IMR_ATIMEND) 1596 rtwn_poll_c2h_events(sc); 1597 } 1598 1599 /* Enable interrupts. */ 1600 rtwn_pci_write_4(sc, R92C_HIMR, RTWN_92C_INT_ENABLE); 1601 1602 return (1); 1603 } 1604 1605 int 1606 rtwn_is_oactive(void *cookie) 1607 { 1608 struct rtwn_pci_softc *sc = cookie; 1609 1610 return (sc->qfullmsk != 0); 1611 } 1612 1613 int 1614 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data) 1615 { 1616 int ntries; 1617 1618 rtwn_pci_write_4(sc, R92C_LLT_INIT, 1619 SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | 1620 SM(R92C_LLT_INIT_ADDR, addr) | 1621 SM(R92C_LLT_INIT_DATA, data)); 1622 /* Wait for write operation to complete. */ 1623 for (ntries = 0; ntries < 20; ntries++) { 1624 if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == 1625 R92C_LLT_INIT_OP_NO_ACTIVE) 1626 return (0); 1627 DELAY(5); 1628 } 1629 return (ETIMEDOUT); 1630 } 1631 1632 int 1633 rtwn_llt_init(struct rtwn_pci_softc *sc, int page_count) 1634 { 1635 int i, error, pktbuf_count; 1636 1637 if (sc->sc_sc.chip & RTWN_CHIP_88E) 1638 pktbuf_count = R88E_TXPKTBUF_COUNT; 1639 else if (sc->sc_sc.chip & RTWN_CHIP_23A) 1640 pktbuf_count = R23A_TXPKTBUF_COUNT; 1641 else 1642 pktbuf_count = R92C_TXPKTBUF_COUNT; 1643 1644 /* Reserve pages [0; page_count]. */ 1645 for (i = 0; i < page_count; i++) { 1646 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1647 return (error); 1648 } 1649 /* NB: 0xff indicates end-of-list. */ 1650 if ((error = rtwn_llt_write(sc, i, 0xff)) != 0) 1651 return (error); 1652 /* 1653 * Use pages [page_count + 1; pktbuf_count - 1] 1654 * as ring buffer. 1655 */ 1656 for (++i; i < pktbuf_count - 1; i++) { 1657 if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) 1658 return (error); 1659 } 1660 /* Make the last page point to the beginning of the ring buffer. */ 1661 error = rtwn_llt_write(sc, i, pktbuf_count + 1); 1662 return (error); 1663 } 1664 1665 int 1666 rtwn_92c_power_on(struct rtwn_pci_softc *sc) 1667 { 1668 uint32_t reg; 1669 int ntries; 1670 1671 /* Wait for autoload done bit. */ 1672 for (ntries = 0; ntries < 1000; ntries++) { 1673 if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) & 1674 R92C_APS_FSMCO_PFM_ALDN) 1675 break; 1676 DELAY(5); 1677 } 1678 if (ntries == 1000) { 1679 printf("%s: timeout waiting for chip autoload\n", 1680 sc->sc_dev.dv_xname); 1681 return (ETIMEDOUT); 1682 } 1683 1684 /* Unlock ISO/CLK/Power control register. */ 1685 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0); 1686 1687 /* TODO: check if we need this for 8188CE */ 1688 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1689 /* bt coex */ 1690 reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO); 1691 reg |= (R92C_APS_FSMCO_SOP_ABG | 1692 R92C_APS_FSMCO_SOP_AMB | 1693 R92C_APS_FSMCO_XOP_BTCK); 1694 rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg); 1695 } 1696 1697 /* Move SPS into PWM mode. */ 1698 rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b); 1699 DELAY(100); 1700 1701 /* Set low byte to 0x0f, leave others unchanged. */ 1702 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, 1703 (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f); 1704 1705 /* TODO: check if we need this for 8188CE */ 1706 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1707 /* bt coex */ 1708 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL); 1709 reg &= (~0x00024800); /* XXX magic from linux */ 1710 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg); 1711 } 1712 1713 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1714 (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) | 1715 R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR); 1716 DELAY(200); 1717 1718 /* TODO: linux does additional btcoex stuff here */ 1719 1720 /* Auto enable WLAN. */ 1721 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1722 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); 1723 for (ntries = 0; ntries < 1000; ntries++) { 1724 if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1725 R92C_APS_FSMCO_APFM_ONMAC)) 1726 break; 1727 DELAY(5); 1728 } 1729 if (ntries == 1000) { 1730 printf("%s: timeout waiting for MAC auto ON\n", 1731 sc->sc_dev.dv_xname); 1732 return (ETIMEDOUT); 1733 } 1734 1735 /* Enable radio, GPIO and LED functions. */ 1736 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1737 R92C_APS_FSMCO_AFSM_PCIE | 1738 R92C_APS_FSMCO_PDN_EN | 1739 R92C_APS_FSMCO_PFM_ALDN); 1740 /* Release RF digital isolation. */ 1741 rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL, 1742 rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); 1743 1744 if (sc->sc_sc.chip & RTWN_CHIP_92C) 1745 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77); 1746 else 1747 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22); 1748 1749 rtwn_pci_write_4(sc, R92C_INT_MIG, 0); 1750 1751 if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) { 1752 /* bt coex */ 1753 reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2); 1754 reg &= 0xfd; /* XXX magic from linux */ 1755 rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg); 1756 } 1757 1758 rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG, 1759 rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL); 1760 1761 reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL); 1762 if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) { 1763 printf("%s: radio is disabled by hardware switch\n", 1764 sc->sc_dev.dv_xname); 1765 return (EPERM); /* :-) */ 1766 } 1767 1768 /* Initialize MAC. */ 1769 rtwn_pci_write_1(sc, R92C_APSD_CTRL, 1770 rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); 1771 for (ntries = 0; ntries < 200; ntries++) { 1772 if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) & 1773 R92C_APSD_CTRL_OFF_STATUS)) 1774 break; 1775 DELAY(500); 1776 } 1777 if (ntries == 200) { 1778 printf("%s: timeout waiting for MAC initialization\n", 1779 sc->sc_dev.dv_xname); 1780 return (ETIMEDOUT); 1781 } 1782 1783 /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ 1784 reg = rtwn_pci_read_2(sc, R92C_CR); 1785 reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1786 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1787 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1788 R92C_CR_ENSEC; 1789 rtwn_pci_write_2(sc, R92C_CR, reg); 1790 1791 rtwn_pci_write_1(sc, 0xfe10, 0x19); 1792 1793 return (0); 1794 } 1795 1796 int 1797 rtwn_88e_power_on(struct rtwn_pci_softc *sc) 1798 { 1799 uint32_t reg; 1800 int ntries; 1801 1802 /* Disable XTAL output for power saving. */ 1803 rtwn_pci_write_1(sc, R88E_XCK_OUT_CTRL, 1804 rtwn_pci_read_1(sc, R88E_XCK_OUT_CTRL) & ~R88E_XCK_OUT_CTRL_EN); 1805 1806 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1807 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & (~R92C_APS_FSMCO_APDM_HPDN)); 1808 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0); 1809 1810 /* Wait for power ready bit. */ 1811 for (ntries = 0; ntries < 5000; ntries++) { 1812 if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST) 1813 break; 1814 DELAY(10); 1815 } 1816 if (ntries == 5000) { 1817 printf("%s: timeout waiting for chip power up\n", 1818 sc->sc_dev.dv_xname); 1819 return (ETIMEDOUT); 1820 } 1821 1822 /* Reset BB. */ 1823 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 1824 rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB | 1825 R92C_SYS_FUNC_EN_BB_GLB_RST)); 1826 1827 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2, 1828 rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); 1829 1830 /* Disable HWPDN. */ 1831 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1832 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN); 1833 /* Disable WL suspend. */ 1834 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1835 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1836 ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE)); 1837 1838 /* Auto enable WLAN. */ 1839 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1840 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); 1841 for (ntries = 0; ntries < 5000; ntries++) { 1842 if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1843 R92C_APS_FSMCO_APFM_ONMAC)) 1844 break; 1845 DELAY(10); 1846 } 1847 if (ntries == 5000) { 1848 printf("%s: timeout waiting for MAC auto ON\n", 1849 sc->sc_dev.dv_xname); 1850 return (ETIMEDOUT); 1851 } 1852 1853 /* Enable LDO normal mode. */ 1854 rtwn_pci_write_1(sc, R92C_LPLDO_CTRL, 1855 rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) & ~0x10); 1856 1857 rtwn_pci_write_1(sc, R92C_APS_FSMCO, 1858 rtwn_pci_read_1(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_PDN_EN); 1859 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2, 1860 rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04); 1861 1862 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1, 1863 rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1) | 0x02); 1864 1865 rtwn_pci_write_1(sc, R92C_SYS_CLKR, 1866 rtwn_pci_read_1(sc, R92C_SYS_CLKR) | 0x08); 1867 1868 rtwn_pci_write_2(sc, R92C_GPIO_MUXCFG, 1869 rtwn_pci_read_2(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENSIC); 1870 1871 /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ 1872 rtwn_pci_write_2(sc, R92C_CR, 0); 1873 reg = rtwn_pci_read_2(sc, R92C_CR); 1874 reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1875 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1876 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1877 R92C_CR_ENSEC | R92C_CR_CALTMR_EN; 1878 rtwn_pci_write_2(sc, R92C_CR, reg); 1879 1880 rtwn_pci_write_1(sc, R92C_MSR, 0); 1881 return (0); 1882 } 1883 1884 int 1885 rtwn_23a_power_on(struct rtwn_pci_softc *sc) 1886 { 1887 uint32_t reg; 1888 int ntries; 1889 1890 rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x00); 1891 1892 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1893 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1894 ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE)); 1895 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0x00); 1896 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1897 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APFM_RSM); 1898 1899 /* Wait for power ready bit. */ 1900 for (ntries = 0; ntries < 5000; ntries++) { 1901 if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST) 1902 break; 1903 DELAY(10); 1904 } 1905 if (ntries == 5000) { 1906 printf("%s: timeout waiting for chip power up\n", 1907 sc->sc_dev.dv_xname); 1908 return (ETIMEDOUT); 1909 } 1910 1911 /* Release WLON reset */ 1912 rtwn_pci_write_4(sc, R92C_APS_FSMCO, rtwn_pci_read_4(sc, R92C_APS_FSMCO) | 1913 R92C_APS_FSMCO_RDY_MACON); 1914 /* Disable HWPDN. */ 1915 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1916 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN); 1917 /* Disable WL suspend. */ 1918 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1919 rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1920 ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE)); 1921 1922 /* Auto enable WLAN. */ 1923 rtwn_pci_write_2(sc, R92C_APS_FSMCO, 1924 rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); 1925 for (ntries = 0; ntries < 5000; ntries++) { 1926 if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) & 1927 R92C_APS_FSMCO_APFM_ONMAC)) 1928 break; 1929 DELAY(10); 1930 } 1931 if (ntries == 5000) { 1932 printf("%s: timeout waiting for MAC auto ON (%x)\n", 1933 sc->sc_dev.dv_xname, rtwn_pci_read_2(sc, R92C_APS_FSMCO)); 1934 return (ETIMEDOUT); 1935 } 1936 1937 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2, 1938 rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04); 1939 1940 /* emac time out */ 1941 rtwn_pci_write_1(sc, 0x369, rtwn_pci_read_1(sc, 0x369) | 0x80); 1942 1943 for (ntries = 0; ntries < 100; ntries++) { 1944 rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e); 1945 DELAY(100); 1946 rtwn_pci_write_2(sc, R92C_MDIO + 2, 0xc280); 1947 rtwn_pci_write_2(sc, R92C_MDIO, 0xc290); 1948 rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x3e); 1949 DELAY(100); 1950 rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e); 1951 DELAY(100); 1952 if (rtwn_pci_read_2(sc, R92C_MDIO + 2) == 0xc290) 1953 break; 1954 } 1955 if (ntries == 100) { 1956 printf("%s: timeout configuring ePHY\n", sc->sc_dev.dv_xname); 1957 return (ETIMEDOUT); 1958 } 1959 1960 /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ 1961 rtwn_pci_write_2(sc, R92C_CR, 0); 1962 reg = rtwn_pci_read_2(sc, R92C_CR); 1963 reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | 1964 R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | 1965 R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | 1966 R92C_CR_ENSEC | R92C_CR_CALTMR_EN; 1967 rtwn_pci_write_2(sc, R92C_CR, reg); 1968 1969 return (0); 1970 } 1971 1972 int 1973 rtwn_power_on(void *cookie) 1974 { 1975 struct rtwn_pci_softc *sc = cookie; 1976 1977 if (sc->sc_sc.chip & RTWN_CHIP_88E) 1978 return (rtwn_88e_power_on(sc)); 1979 else if (sc->sc_sc.chip & RTWN_CHIP_23A) 1980 return (rtwn_23a_power_on(sc)); 1981 else 1982 return (rtwn_92c_power_on(sc)); 1983 } 1984 1985 int 1986 rtwn_dma_init(void *cookie) 1987 { 1988 struct rtwn_pci_softc *sc = cookie; 1989 uint32_t reg; 1990 uint16_t dmasize; 1991 int hqpages, lqpages, nqpages, pagecnt, boundary, trxdma, tcr; 1992 int error; 1993 1994 if (sc->sc_sc.chip & RTWN_CHIP_88E) { 1995 nqpages = R88E_NPQ_NPAGES; 1996 hqpages = R88E_HPQ_NPAGES; 1997 lqpages = R88E_LPQ_NPAGES; 1998 pagecnt = R88E_TX_PAGE_COUNT; 1999 boundary = R88E_TX_PAGE_BOUNDARY; 2000 dmasize = R88E_MAX_RX_DMA_SIZE; 2001 tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN3; 2002 trxdma = 0xe771; 2003 } else if (sc->sc_sc.chip & RTWN_CHIP_23A) { 2004 nqpages = R23A_NPQ_NPAGES; 2005 hqpages = R23A_HPQ_NPAGES; 2006 lqpages = R23A_LPQ_NPAGES; 2007 pagecnt = R23A_TX_PAGE_COUNT; 2008 boundary = R23A_TX_PAGE_BOUNDARY; 2009 dmasize = R23A_MAX_RX_DMA_SIZE; 2010 tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 | 2011 R92C_TCR_ERRSTEN1; 2012 trxdma = 0xf771; 2013 } else { 2014 nqpages = R92C_NPQ_NPAGES; 2015 hqpages = R92C_HPQ_NPAGES; 2016 lqpages = R92C_LPQ_NPAGES; 2017 pagecnt = R92C_TX_PAGE_COUNT; 2018 boundary = R92C_TX_PAGE_BOUNDARY; 2019 dmasize = R92C_MAX_RX_DMA_SIZE; 2020 tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 | 2021 R92C_TCR_ERRSTEN1; 2022 trxdma = 0xf771; 2023 } 2024 2025 /* Initialize LLT table. */ 2026 error = rtwn_llt_init(sc, pagecnt); 2027 if (error != 0) 2028 return error; 2029 2030 /* Set number of pages for normal priority queue. */ 2031 rtwn_pci_write_2(sc, R92C_RQPN_NPQ, nqpages); 2032 rtwn_pci_write_4(sc, R92C_RQPN, 2033 /* Set number of pages for public queue. */ 2034 SM(R92C_RQPN_PUBQ, pagecnt) | 2035 /* Set number of pages for high priority queue. */ 2036 SM(R92C_RQPN_HPQ, hqpages) | 2037 /* Set number of pages for low priority queue. */ 2038 SM(R92C_RQPN_LPQ, lqpages) | 2039 /* Load values. */ 2040 R92C_RQPN_LD); 2041 2042 rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, boundary); 2043 rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, boundary); 2044 rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, 2045 boundary); 2046 rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, boundary); 2047 rtwn_pci_write_1(sc, R92C_TDECTRL + 1, boundary); 2048 2049 reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL); 2050 reg &= ~R92C_TRXDMA_CTRL_QMAP_M; 2051 reg |= trxdma; 2052 rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg); 2053 2054 rtwn_pci_write_4(sc, R92C_TCR, tcr); 2055 2056 /* Configure Tx DMA. */ 2057 rtwn_pci_write_4(sc, R92C_BKQ_DESA, 2058 sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr); 2059 rtwn_pci_write_4(sc, R92C_BEQ_DESA, 2060 sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr); 2061 rtwn_pci_write_4(sc, R92C_VIQ_DESA, 2062 sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr); 2063 rtwn_pci_write_4(sc, R92C_VOQ_DESA, 2064 sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr); 2065 rtwn_pci_write_4(sc, R92C_BCNQ_DESA, 2066 sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr); 2067 rtwn_pci_write_4(sc, R92C_MGQ_DESA, 2068 sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr); 2069 rtwn_pci_write_4(sc, R92C_HQ_DESA, 2070 sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr); 2071 2072 /* Configure Rx DMA. */ 2073 rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr); 2074 rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG+1, 0); 2075 2076 /* Set Tx/Rx transfer page boundary. */ 2077 rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, dmasize - 1); 2078 2079 /* Set Tx/Rx transfer page size. */ 2080 rtwn_pci_write_1(sc, R92C_PBP, 2081 SM(R92C_PBP_PSRX, R92C_PBP_128) | 2082 SM(R92C_PBP_PSTX, R92C_PBP_128)); 2083 2084 return (0); 2085 } 2086 2087 int 2088 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len) 2089 { 2090 struct rtwn_pci_softc *sc = cookie; 2091 uint32_t reg; 2092 int off, mlen, error = 0, i; 2093 2094 reg = rtwn_pci_read_4(sc, R92C_MCUFWDL); 2095 reg = RW(reg, R92C_MCUFWDL_PAGE, page); 2096 rtwn_pci_write_4(sc, R92C_MCUFWDL, reg); 2097 2098 DELAY(5); 2099 2100 off = R92C_FW_START_ADDR; 2101 while (len > 0) { 2102 if (len > 196) 2103 mlen = 196; 2104 else if (len > 4) 2105 mlen = 4; 2106 else 2107 mlen = 1; 2108 for (i = 0; i < mlen; i++) 2109 rtwn_pci_write_1(sc, off++, buf[i]); 2110 buf += mlen; 2111 len -= mlen; 2112 } 2113 2114 return (error); 2115 } 2116 2117 int 2118 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len) 2119 { 2120 struct rtwn_pci_softc *sc = cookie; 2121 const char *name; 2122 int error; 2123 2124 if (sc->sc_sc.chip & RTWN_CHIP_88E) 2125 name = "rtwn-rtl8188efw"; 2126 else if (sc->sc_sc.chip & RTWN_CHIP_23A) { 2127 if (sc->sc_sc.chip & RTWN_CHIP_UMC_A_CUT) 2128 name = "rtwn-rtl8723fw"; 2129 else 2130 name = "rtwn-rtl8723fw_B"; 2131 } else if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) == 2132 RTWN_CHIP_UMC_A_CUT) 2133 name = "rtwn-rtl8192cfwU"; 2134 else 2135 name = "rtwn-rtl8192cfwU_B"; 2136 2137 error = loadfirmware(name, fw, len); 2138 if (error) 2139 printf("%s: could not read firmware %s (error %d)\n", 2140 sc->sc_dev.dv_xname, name, error); 2141 return (error); 2142 } 2143 2144 void 2145 rtwn_mac_init(void *cookie) 2146 { 2147 struct rtwn_pci_softc *sc = cookie; 2148 int i; 2149 2150 /* Write MAC initialization values. */ 2151 if (sc->sc_sc.chip & RTWN_CHIP_88E) { 2152 for (i = 0; i < nitems(rtl8188eu_mac); i++) { 2153 if (rtl8188eu_mac[i].reg == R92C_GPIO_MUXCFG) 2154 continue; 2155 rtwn_pci_write_1(sc, rtl8188eu_mac[i].reg, 2156 rtl8188eu_mac[i].val); 2157 } 2158 rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x07); 2159 } else if (sc->sc_sc.chip & RTWN_CHIP_23A) { 2160 for (i = 0; i < nitems(rtl8192cu_mac); i++) { 2161 rtwn_pci_write_1(sc, rtl8192cu_mac[i].reg, 2162 rtl8192cu_mac[i].val); 2163 } 2164 rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x0a); 2165 } else { 2166 for (i = 0; i < nitems(rtl8192ce_mac); i++) 2167 rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg, 2168 rtl8192ce_mac[i].val); 2169 } 2170 } 2171 2172 void 2173 rtwn_bb_init(void *cookie) 2174 { 2175 struct rtwn_pci_softc *sc = cookie; 2176 const struct r92c_bb_prog *prog; 2177 uint32_t reg; 2178 int i; 2179 2180 /* Enable BB and RF. */ 2181 rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN, 2182 rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) | 2183 R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | 2184 R92C_SYS_FUNC_EN_DIO_RF); 2185 2186 if (!(sc->sc_sc.chip & RTWN_CHIP_88E)) 2187 rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); 2188 2189 rtwn_pci_write_1(sc, R92C_RF_CTRL, 2190 R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); 2191 2192 rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, 2193 R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA | 2194 R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST | 2195 R92C_SYS_FUNC_EN_BBRSTB); 2196 2197 if (!(sc->sc_sc.chip & RTWN_CHIP_88E)) { 2198 rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); 2199 } 2200 2201 rtwn_pci_write_4(sc, R92C_LEDCFG0, 2202 rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000); 2203 2204 /* Select BB programming. */ 2205 if (sc->sc_sc.chip & RTWN_CHIP_88E) 2206 prog = &rtl8188eu_bb_prog; 2207 else if (sc->sc_sc.chip & RTWN_CHIP_23A) 2208 prog = &rtl8723a_bb_prog; 2209 else if (!(sc->sc_sc.chip & RTWN_CHIP_92C)) 2210 prog = &rtl8192ce_bb_prog_1t; 2211 else 2212 prog = &rtl8192ce_bb_prog_2t; 2213 2214 /* Write BB initialization values. */ 2215 for (i = 0; i < prog->count; i++) { 2216 rtwn_bb_write(sc, prog->regs[i], prog->vals[i]); 2217 DELAY(1); 2218 } 2219 2220 if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) { 2221 /* 8192C 1T only configuration. */ 2222 reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO); 2223 reg = (reg & ~0x00000003) | 0x2; 2224 rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); 2225 2226 reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO); 2227 reg = (reg & ~0x00300033) | 0x00200022; 2228 rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); 2229 2230 reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING); 2231 reg = (reg & ~0xff000000) | 0x45 << 24; 2232 rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); 2233 2234 reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); 2235 reg = (reg & ~0x000000ff) | 0x23; 2236 rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); 2237 2238 reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); 2239 reg = (reg & ~0x00000030) | 1 << 4; 2240 rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); 2241 2242 reg = rtwn_bb_read(sc, 0xe74); 2243 reg = (reg & ~0x0c000000) | 2 << 26; 2244 rtwn_bb_write(sc, 0xe74, reg); 2245 reg = rtwn_bb_read(sc, 0xe78); 2246 reg = (reg & ~0x0c000000) | 2 << 26; 2247 rtwn_bb_write(sc, 0xe78, reg); 2248 reg = rtwn_bb_read(sc, 0xe7c); 2249 reg = (reg & ~0x0c000000) | 2 << 26; 2250 rtwn_bb_write(sc, 0xe7c, reg); 2251 reg = rtwn_bb_read(sc, 0xe80); 2252 reg = (reg & ~0x0c000000) | 2 << 26; 2253 rtwn_bb_write(sc, 0xe80, reg); 2254 reg = rtwn_bb_read(sc, 0xe88); 2255 reg = (reg & ~0x0c000000) | 2 << 26; 2256 rtwn_bb_write(sc, 0xe88, reg); 2257 } 2258 2259 /* Write AGC values. */ 2260 for (i = 0; i < prog->agccount; i++) { 2261 rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, 2262 prog->agcvals[i]); 2263 DELAY(1); 2264 } 2265 2266 if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR) 2267 sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR; 2268 } 2269 2270 void 2271 rtwn_calib_to(void *arg) 2272 { 2273 struct rtwn_pci_softc *sc = arg; 2274 struct ieee80211com *ic = &sc->sc_sc.sc_ic; 2275 int s; 2276 2277 s = splnet(); 2278 ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn); 2279 splx(s); 2280 2281 rtwn_calib(&sc->sc_sc); 2282 } 2283 2284 void 2285 rtwn_next_calib(void *cookie) 2286 { 2287 struct rtwn_pci_softc *sc = cookie; 2288 2289 timeout_add_sec(&sc->calib_to, 2); 2290 } 2291 2292 void 2293 rtwn_cancel_calib(void *cookie) 2294 { 2295 struct rtwn_pci_softc *sc = cookie; 2296 2297 if (timeout_initialized(&sc->calib_to)) 2298 timeout_del(&sc->calib_to); 2299 } 2300 2301 void 2302 rtwn_scan_to(void *arg) 2303 { 2304 struct rtwn_pci_softc *sc = arg; 2305 2306 rtwn_next_scan(&sc->sc_sc); 2307 } 2308 2309 void 2310 rtwn_pci_next_scan(void *cookie) 2311 { 2312 struct rtwn_pci_softc *sc = cookie; 2313 2314 timeout_add_msec(&sc->scan_to, 200); 2315 } 2316 2317 void 2318 rtwn_cancel_scan(void *cookie) 2319 { 2320 struct rtwn_pci_softc *sc = cookie; 2321 2322 if (timeout_initialized(&sc->scan_to)) 2323 timeout_del(&sc->scan_to); 2324 } 2325 2326 void 2327 rtwn_wait_async(void *cookie) 2328 { 2329 /* nothing to do */ 2330 } 2331 2332 void 2333 rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len) 2334 { 2335 struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf; 2336 int packets, tries, tx_ok, drop, expire, over; 2337 2338 if (len != sizeof(*rpt)) 2339 return; 2340 2341 if (sc->sc_sc.chip & RTWN_CHIP_23A) { 2342 struct r88e_tx_rpt_ccx *rxstat = (struct r88e_tx_rpt_ccx *)buf; 2343 2344 /* 2345 * we seem to get some garbage reports, so check macid makes 2346 * sense. 2347 */ 2348 if (MS(rxstat->rptb1, R88E_RPTB1_MACID) != R92C_MACID_BSS) { 2349 return; 2350 } 2351 2352 packets = 1; 2353 tx_ok = (rxstat->rptb1 & R88E_RPTB1_PKT_OK) ? 1 : 0; 2354 tries = MS(rxstat->rptb2, R88E_RPTB2_RETRY_CNT); 2355 expire = (rxstat->rptb2 & R88E_RPTB2_LIFE_EXPIRE); 2356 over = (rxstat->rptb2 & R88E_RPTB2_RETRY_OVER); 2357 drop = 0; 2358 } else { 2359 packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM); 2360 tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT); 2361 tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK); 2362 drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP); 2363 expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE); 2364 over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER); 2365 } 2366 2367 if (packets > 0) { 2368 sc->amn.amn_txcnt += packets; 2369 if (!tx_ok || tries > 1 || drop || expire || over) 2370 sc->amn.amn_retrycnt++; 2371 } 2372 } 2373 2374 void 2375 rtwn_poll_c2h_events(struct rtwn_pci_softc *sc) 2376 { 2377 const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt); 2378 uint8_t buf[R92C_C2H_MSG_MAX_LEN]; 2379 uint8_t id, len, status; 2380 int i; 2381 2382 /* Read current status. */ 2383 status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR); 2384 if (status == R92C_C2HEVT_HOST_CLOSE) 2385 return; /* nothing to do */ 2386 2387 if (status == R92C_C2HEVT_FW_CLOSE) { 2388 len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG); 2389 id = MS(len, R92C_C2H_EVTB0_ID); 2390 len = MS(len, R92C_C2H_EVTB0_LEN); 2391 2392 if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) { 2393 memset(buf, 0, sizeof(buf)); 2394 for (i = 0; i < len; i++) 2395 buf[i] = rtwn_pci_read_1(sc, off + i); 2396 rtwn_tx_report(sc, buf, len); 2397 } else 2398 DPRINTF(("unhandled C2H event %d (%d bytes)\n", 2399 id, len)); 2400 } 2401 2402 /* Prepare for next event. */ 2403 rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE); 2404 } 2405