1 /* $OpenBSD: if_ral.c,v 1.144 2017/10/26 15:00:28 mpi Exp $ */ 2 3 /*- 4 * Copyright (c) 2005, 2006 5 * Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Ralink Technology RT2500USB chipset driver 22 * http://www.ralinktech.com.tw/ 23 */ 24 25 #include "bpfilter.h" 26 27 #include <sys/param.h> 28 #include <sys/sockio.h> 29 #include <sys/mbuf.h> 30 #include <sys/kernel.h> 31 #include <sys/socket.h> 32 #include <sys/systm.h> 33 #include <sys/timeout.h> 34 #include <sys/conf.h> 35 #include <sys/device.h> 36 #include <sys/endian.h> 37 38 #include <machine/intr.h> 39 40 #if NBPFILTER > 0 41 #include <net/bpf.h> 42 #endif 43 #include <net/if.h> 44 #include <net/if_dl.h> 45 #include <net/if_media.h> 46 47 #include <netinet/in.h> 48 #include <netinet/if_ether.h> 49 50 #include <net80211/ieee80211_var.h> 51 #include <net80211/ieee80211_amrr.h> 52 #include <net80211/ieee80211_radiotap.h> 53 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usbdi.h> 56 #include <dev/usb/usbdi_util.h> 57 #include <dev/usb/usbdevs.h> 58 59 #include <dev/usb/if_ralreg.h> 60 #include <dev/usb/if_ralvar.h> 61 62 #ifdef URAL_DEBUG 63 #define DPRINTF(x) do { if (ural_debug) printf x; } while (0) 64 #define DPRINTFN(n, x) do { if (ural_debug >= (n)) printf x; } while (0) 65 int ural_debug = 0; 66 #else 67 #define DPRINTF(x) 68 #define DPRINTFN(n, x) 69 #endif 70 71 /* various supported device vendors/products */ 72 static const struct usb_devno ural_devs[] = { 73 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570 }, 74 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570_2 }, 75 { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050 }, 76 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G }, 77 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP }, 78 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS }, 79 { USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU }, 80 { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_RT2570 }, 81 { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG }, 82 { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254 }, 83 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54 }, 84 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI }, 85 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB }, 86 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI }, 87 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570 }, 88 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2 }, 89 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3 }, 90 { USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902W }, 91 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570 }, 92 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2 }, 93 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3 }, 94 { USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R }, 95 { USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570 }, 96 { USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570 }, 97 { USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570 } 98 }; 99 100 int ural_alloc_tx_list(struct ural_softc *); 101 void ural_free_tx_list(struct ural_softc *); 102 int ural_alloc_rx_list(struct ural_softc *); 103 void ural_free_rx_list(struct ural_softc *); 104 int ural_media_change(struct ifnet *); 105 void ural_next_scan(void *); 106 void ural_task(void *); 107 int ural_newstate(struct ieee80211com *, enum ieee80211_state, 108 int); 109 void ural_txeof(struct usbd_xfer *, void *, usbd_status); 110 void ural_rxeof(struct usbd_xfer *, void *, usbd_status); 111 #if NBPFILTER > 0 112 uint8_t ural_rxrate(const struct ural_rx_desc *); 113 #endif 114 int ural_ack_rate(struct ieee80211com *, int); 115 uint16_t ural_txtime(int, int, uint32_t); 116 uint8_t ural_plcp_signal(int); 117 void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, 118 uint32_t, int, int); 119 #ifndef IEEE80211_STA_ONLY 120 int ural_tx_bcn(struct ural_softc *, struct mbuf *, 121 struct ieee80211_node *); 122 #endif 123 int ural_tx_data(struct ural_softc *, struct mbuf *, 124 struct ieee80211_node *); 125 void ural_start(struct ifnet *); 126 void ural_watchdog(struct ifnet *); 127 int ural_ioctl(struct ifnet *, u_long, caddr_t); 128 void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); 129 uint16_t ural_read(struct ural_softc *, uint16_t); 130 void ural_read_multi(struct ural_softc *, uint16_t, void *, int); 131 void ural_write(struct ural_softc *, uint16_t, uint16_t); 132 void ural_write_multi(struct ural_softc *, uint16_t, void *, int); 133 void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); 134 uint8_t ural_bbp_read(struct ural_softc *, uint8_t); 135 void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); 136 void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); 137 void ural_disable_rf_tune(struct ural_softc *); 138 void ural_enable_tsf_sync(struct ural_softc *); 139 void ural_update_slot(struct ural_softc *); 140 void ural_set_txpreamble(struct ural_softc *); 141 void ural_set_basicrates(struct ural_softc *); 142 void ural_set_bssid(struct ural_softc *, const uint8_t *); 143 void ural_set_macaddr(struct ural_softc *, const uint8_t *); 144 void ural_update_promisc(struct ural_softc *); 145 const char *ural_get_rf(int); 146 void ural_read_eeprom(struct ural_softc *); 147 int ural_bbp_init(struct ural_softc *); 148 void ural_set_txantenna(struct ural_softc *, int); 149 void ural_set_rxantenna(struct ural_softc *, int); 150 int ural_init(struct ifnet *); 151 void ural_stop(struct ifnet *, int); 152 void ural_newassoc(struct ieee80211com *, struct ieee80211_node *, 153 int); 154 void ural_amrr_start(struct ural_softc *, struct ieee80211_node *); 155 void ural_amrr_timeout(void *); 156 void ural_amrr_update(struct usbd_xfer *, void *, 157 usbd_status status); 158 159 static const struct { 160 uint16_t reg; 161 uint16_t val; 162 } ural_def_mac[] = { 163 RAL_DEF_MAC 164 }; 165 166 static const struct { 167 uint8_t reg; 168 uint8_t val; 169 } ural_def_bbp[] = { 170 RAL_DEF_BBP 171 }; 172 173 static const uint32_t ural_rf2522_r2[] = RAL_RF2522_R2; 174 static const uint32_t ural_rf2523_r2[] = RAL_RF2523_R2; 175 static const uint32_t ural_rf2524_r2[] = RAL_RF2524_R2; 176 static const uint32_t ural_rf2525_r2[] = RAL_RF2525_R2; 177 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2; 178 static const uint32_t ural_rf2525e_r2[] = RAL_RF2525E_R2; 179 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2; 180 static const uint32_t ural_rf2526_r2[] = RAL_RF2526_R2; 181 182 int ural_match(struct device *, void *, void *); 183 void ural_attach(struct device *, struct device *, void *); 184 int ural_detach(struct device *, int); 185 186 struct cfdriver ural_cd = { 187 NULL, "ural", DV_IFNET 188 }; 189 190 const struct cfattach ural_ca = { 191 sizeof(struct ural_softc), ural_match, ural_attach, ural_detach 192 }; 193 194 int 195 ural_match(struct device *parent, void *match, void *aux) 196 { 197 struct usb_attach_arg *uaa = aux; 198 199 if (uaa->iface == NULL || uaa->configno != RAL_CONFIG_NO) 200 return UMATCH_NONE; 201 202 return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ? 203 UMATCH_VENDOR_PRODUCT : UMATCH_NONE; 204 } 205 206 void 207 ural_attach(struct device *parent, struct device *self, void *aux) 208 { 209 struct ural_softc *sc = (struct ural_softc *)self; 210 struct usb_attach_arg *uaa = aux; 211 struct ieee80211com *ic = &sc->sc_ic; 212 struct ifnet *ifp = &ic->ic_if; 213 usb_interface_descriptor_t *id; 214 usb_endpoint_descriptor_t *ed; 215 usbd_status error; 216 int i; 217 218 sc->sc_udev = uaa->device; 219 220 /* get the first interface handle */ 221 error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX, 222 &sc->sc_iface); 223 if (error != 0) { 224 printf("%s: could not get interface handle\n", 225 sc->sc_dev.dv_xname); 226 return; 227 } 228 229 /* 230 * Find endpoints. 231 */ 232 id = usbd_get_interface_descriptor(sc->sc_iface); 233 234 sc->sc_rx_no = sc->sc_tx_no = -1; 235 for (i = 0; i < id->bNumEndpoints; i++) { 236 ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); 237 if (ed == NULL) { 238 printf("%s: no endpoint descriptor for iface %d\n", 239 sc->sc_dev.dv_xname, i); 240 return; 241 } 242 243 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && 244 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) 245 sc->sc_rx_no = ed->bEndpointAddress; 246 else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && 247 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) 248 sc->sc_tx_no = ed->bEndpointAddress; 249 } 250 if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) { 251 printf("%s: missing endpoint\n", sc->sc_dev.dv_xname); 252 return; 253 } 254 255 usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC); 256 timeout_set(&sc->scan_to, ural_next_scan, sc); 257 258 sc->amrr.amrr_min_success_threshold = 1; 259 sc->amrr.amrr_max_success_threshold = 10; 260 timeout_set(&sc->amrr_to, ural_amrr_timeout, sc); 261 262 /* retrieve RT2570 rev. no */ 263 sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); 264 265 /* retrieve MAC address and various other things from EEPROM */ 266 ural_read_eeprom(sc); 267 268 printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n", 269 sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev, 270 ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr)); 271 272 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 273 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 274 ic->ic_state = IEEE80211_S_INIT; 275 276 /* set device capabilities */ 277 ic->ic_caps = 278 IEEE80211_C_MONITOR | /* monitor mode supported */ 279 #ifndef IEEE80211_STA_ONLY 280 IEEE80211_C_IBSS | /* IBSS mode supported */ 281 IEEE80211_C_HOSTAP | /* HostAp mode supported */ 282 #endif 283 IEEE80211_C_TXPMGT | /* tx power management */ 284 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 285 IEEE80211_C_SHSLOT | /* short slot time supported */ 286 IEEE80211_C_WEP | /* s/w WEP */ 287 IEEE80211_C_RSN; /* WPA/RSN */ 288 289 /* set supported .11b and .11g rates */ 290 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 291 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 292 293 /* set supported .11b and .11g channels (1 through 14) */ 294 for (i = 1; i <= 14; i++) { 295 ic->ic_channels[i].ic_freq = 296 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 297 ic->ic_channels[i].ic_flags = 298 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 299 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 300 } 301 302 ifp->if_softc = sc; 303 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 304 ifp->if_ioctl = ural_ioctl; 305 ifp->if_start = ural_start; 306 ifp->if_watchdog = ural_watchdog; 307 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 308 309 if_attach(ifp); 310 ieee80211_ifattach(ifp); 311 ic->ic_newassoc = ural_newassoc; 312 313 /* override state transition machine */ 314 sc->sc_newstate = ic->ic_newstate; 315 ic->ic_newstate = ural_newstate; 316 ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status); 317 318 #if NBPFILTER > 0 319 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 320 sizeof (struct ieee80211_frame) + 64); 321 322 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 323 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 324 sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT); 325 326 sc->sc_txtap_len = sizeof sc->sc_txtapu; 327 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 328 sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT); 329 #endif 330 } 331 332 int 333 ural_detach(struct device *self, int flags) 334 { 335 struct ural_softc *sc = (struct ural_softc *)self; 336 struct ifnet *ifp = &sc->sc_ic.ic_if; 337 int s; 338 339 s = splusb(); 340 341 if (timeout_initialized(&sc->scan_to)) 342 timeout_del(&sc->scan_to); 343 if (timeout_initialized(&sc->amrr_to)) 344 timeout_del(&sc->amrr_to); 345 346 usb_rem_wait_task(sc->sc_udev, &sc->sc_task); 347 348 usbd_ref_wait(sc->sc_udev); 349 350 if (ifp->if_softc != NULL) { 351 ieee80211_ifdetach(ifp); /* free all nodes */ 352 if_detach(ifp); 353 } 354 355 if (sc->amrr_xfer != NULL) { 356 usbd_free_xfer(sc->amrr_xfer); 357 sc->amrr_xfer = NULL; 358 } 359 360 if (sc->sc_rx_pipeh != NULL) { 361 usbd_abort_pipe(sc->sc_rx_pipeh); 362 usbd_close_pipe(sc->sc_rx_pipeh); 363 } 364 365 if (sc->sc_tx_pipeh != NULL) { 366 usbd_abort_pipe(sc->sc_tx_pipeh); 367 usbd_close_pipe(sc->sc_tx_pipeh); 368 } 369 370 ural_free_rx_list(sc); 371 ural_free_tx_list(sc); 372 373 splx(s); 374 375 return 0; 376 } 377 378 int 379 ural_alloc_tx_list(struct ural_softc *sc) 380 { 381 int i, error; 382 383 sc->tx_cur = sc->tx_queued = 0; 384 385 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 386 struct ural_tx_data *data = &sc->tx_data[i]; 387 388 data->sc = sc; 389 390 data->xfer = usbd_alloc_xfer(sc->sc_udev); 391 if (data->xfer == NULL) { 392 printf("%s: could not allocate tx xfer\n", 393 sc->sc_dev.dv_xname); 394 error = ENOMEM; 395 goto fail; 396 } 397 data->buf = usbd_alloc_buffer(data->xfer, 398 RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN); 399 if (data->buf == NULL) { 400 printf("%s: could not allocate tx buffer\n", 401 sc->sc_dev.dv_xname); 402 error = ENOMEM; 403 goto fail; 404 } 405 } 406 407 return 0; 408 409 fail: ural_free_tx_list(sc); 410 return error; 411 } 412 413 void 414 ural_free_tx_list(struct ural_softc *sc) 415 { 416 int i; 417 418 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 419 struct ural_tx_data *data = &sc->tx_data[i]; 420 421 if (data->xfer != NULL) { 422 usbd_free_xfer(data->xfer); 423 data->xfer = NULL; 424 } 425 /* 426 * The node has already been freed at that point so don't call 427 * ieee80211_release_node() here. 428 */ 429 data->ni = NULL; 430 } 431 } 432 433 int 434 ural_alloc_rx_list(struct ural_softc *sc) 435 { 436 int i, error; 437 438 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 439 struct ural_rx_data *data = &sc->rx_data[i]; 440 441 data->sc = sc; 442 443 data->xfer = usbd_alloc_xfer(sc->sc_udev); 444 if (data->xfer == NULL) { 445 printf("%s: could not allocate rx xfer\n", 446 sc->sc_dev.dv_xname); 447 error = ENOMEM; 448 goto fail; 449 } 450 if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) { 451 printf("%s: could not allocate rx buffer\n", 452 sc->sc_dev.dv_xname); 453 error = ENOMEM; 454 goto fail; 455 } 456 457 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 458 if (data->m == NULL) { 459 printf("%s: could not allocate rx mbuf\n", 460 sc->sc_dev.dv_xname); 461 error = ENOMEM; 462 goto fail; 463 } 464 MCLGET(data->m, M_DONTWAIT); 465 if (!(data->m->m_flags & M_EXT)) { 466 printf("%s: could not allocate rx mbuf cluster\n", 467 sc->sc_dev.dv_xname); 468 error = ENOMEM; 469 goto fail; 470 } 471 data->buf = mtod(data->m, uint8_t *); 472 } 473 474 return 0; 475 476 fail: ural_free_rx_list(sc); 477 return error; 478 } 479 480 void 481 ural_free_rx_list(struct ural_softc *sc) 482 { 483 int i; 484 485 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 486 struct ural_rx_data *data = &sc->rx_data[i]; 487 488 if (data->xfer != NULL) { 489 usbd_free_xfer(data->xfer); 490 data->xfer = NULL; 491 } 492 if (data->m != NULL) { 493 m_freem(data->m); 494 data->m = NULL; 495 } 496 } 497 } 498 499 int 500 ural_media_change(struct ifnet *ifp) 501 { 502 int error; 503 504 error = ieee80211_media_change(ifp); 505 if (error != ENETRESET) 506 return error; 507 508 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)) 509 ural_init(ifp); 510 511 return 0; 512 } 513 514 /* 515 * This function is called periodically (every 200ms) during scanning to 516 * switch from one channel to another. 517 */ 518 void 519 ural_next_scan(void *arg) 520 { 521 struct ural_softc *sc = arg; 522 struct ieee80211com *ic = &sc->sc_ic; 523 struct ifnet *ifp = &ic->ic_if; 524 525 if (usbd_is_dying(sc->sc_udev)) 526 return; 527 528 usbd_ref_incr(sc->sc_udev); 529 530 if (ic->ic_state == IEEE80211_S_SCAN) 531 ieee80211_next_scan(ifp); 532 533 usbd_ref_decr(sc->sc_udev); 534 } 535 536 void 537 ural_task(void *arg) 538 { 539 struct ural_softc *sc = arg; 540 struct ieee80211com *ic = &sc->sc_ic; 541 enum ieee80211_state ostate; 542 struct ieee80211_node *ni; 543 544 if (usbd_is_dying(sc->sc_udev)) 545 return; 546 547 ostate = ic->ic_state; 548 549 switch (sc->sc_state) { 550 case IEEE80211_S_INIT: 551 if (ostate == IEEE80211_S_RUN) { 552 /* abort TSF synchronization */ 553 ural_write(sc, RAL_TXRX_CSR19, 0); 554 555 /* force tx led to stop blinking */ 556 ural_write(sc, RAL_MAC_CSR20, 0); 557 } 558 break; 559 560 case IEEE80211_S_SCAN: 561 ural_set_chan(sc, ic->ic_bss->ni_chan); 562 if (!usbd_is_dying(sc->sc_udev)) 563 timeout_add_msec(&sc->scan_to, 200); 564 break; 565 566 case IEEE80211_S_AUTH: 567 ural_set_chan(sc, ic->ic_bss->ni_chan); 568 break; 569 570 case IEEE80211_S_ASSOC: 571 ural_set_chan(sc, ic->ic_bss->ni_chan); 572 break; 573 574 case IEEE80211_S_RUN: 575 ural_set_chan(sc, ic->ic_bss->ni_chan); 576 577 ni = ic->ic_bss; 578 579 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 580 ural_update_slot(sc); 581 ural_set_txpreamble(sc); 582 ural_set_basicrates(sc); 583 ural_set_bssid(sc, ni->ni_bssid); 584 } 585 586 #ifndef IEEE80211_STA_ONLY 587 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 588 ic->ic_opmode == IEEE80211_M_IBSS) { 589 struct mbuf *m = ieee80211_beacon_alloc(ic, ni); 590 if (m == NULL) { 591 printf("%s: could not allocate beacon\n", 592 sc->sc_dev.dv_xname); 593 return; 594 } 595 596 if (ural_tx_bcn(sc, m, ni) != 0) { 597 m_freem(m); 598 printf("%s: could not transmit beacon\n", 599 sc->sc_dev.dv_xname); 600 return; 601 } 602 603 /* beacon is no longer needed */ 604 m_freem(m); 605 } 606 #endif 607 608 /* make tx led blink on tx (controlled by ASIC) */ 609 ural_write(sc, RAL_MAC_CSR20, 1); 610 611 if (ic->ic_opmode != IEEE80211_M_MONITOR) 612 ural_enable_tsf_sync(sc); 613 614 if (ic->ic_opmode == IEEE80211_M_STA) { 615 /* fake a join to init the tx rate */ 616 ural_newassoc(ic, ic->ic_bss, 1); 617 618 /* enable automatic rate control in STA mode */ 619 if (ic->ic_fixed_rate == -1) 620 ural_amrr_start(sc, ic->ic_bss); 621 } 622 623 break; 624 } 625 626 sc->sc_newstate(ic, sc->sc_state, sc->sc_arg); 627 } 628 629 int 630 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 631 { 632 struct ural_softc *sc = ic->ic_if.if_softc; 633 634 usb_rem_task(sc->sc_udev, &sc->sc_task); 635 timeout_del(&sc->scan_to); 636 timeout_del(&sc->amrr_to); 637 638 /* do it in a process context */ 639 sc->sc_state = nstate; 640 sc->sc_arg = arg; 641 usb_add_task(sc->sc_udev, &sc->sc_task); 642 return 0; 643 } 644 645 /* quickly determine if a given rate is CCK or OFDM */ 646 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 647 648 #define RAL_ACK_SIZE 14 /* 10 + 4(FCS) */ 649 #define RAL_CTS_SIZE 14 /* 10 + 4(FCS) */ 650 651 #define RAL_SIFS 10 /* us */ 652 653 #define RAL_RXTX_TURNAROUND 5 /* us */ 654 655 void 656 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status) 657 { 658 struct ural_tx_data *data = priv; 659 struct ural_softc *sc = data->sc; 660 struct ieee80211com *ic = &sc->sc_ic; 661 struct ifnet *ifp = &ic->ic_if; 662 int s; 663 664 if (status != USBD_NORMAL_COMPLETION) { 665 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) 666 return; 667 668 printf("%s: could not transmit buffer: %s\n", 669 sc->sc_dev.dv_xname, usbd_errstr(status)); 670 671 if (status == USBD_STALLED) 672 usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh); 673 674 ifp->if_oerrors++; 675 return; 676 } 677 678 s = splnet(); 679 680 ieee80211_release_node(ic, data->ni); 681 data->ni = NULL; 682 683 sc->tx_queued--; 684 685 DPRINTFN(10, ("tx done\n")); 686 687 sc->sc_tx_timer = 0; 688 ifq_clr_oactive(&ifp->if_snd); 689 ural_start(ifp); 690 691 splx(s); 692 } 693 694 void 695 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status) 696 { 697 struct ural_rx_data *data = priv; 698 struct ural_softc *sc = data->sc; 699 struct ieee80211com *ic = &sc->sc_ic; 700 struct ifnet *ifp = &ic->ic_if; 701 const struct ural_rx_desc *desc; 702 struct ieee80211_frame *wh; 703 struct ieee80211_rxinfo rxi; 704 struct ieee80211_node *ni; 705 struct mbuf *mnew, *m; 706 int s, len; 707 708 if (status != USBD_NORMAL_COMPLETION) { 709 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) 710 return; 711 712 if (status == USBD_STALLED) 713 usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh); 714 goto skip; 715 } 716 717 usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL); 718 719 if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) { 720 DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname, 721 len)); 722 ifp->if_ierrors++; 723 goto skip; 724 } 725 726 /* rx descriptor is located at the end */ 727 desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE); 728 729 if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { 730 /* 731 * This should not happen since we did not request to receive 732 * those frames when we filled RAL_TXRX_CSR2. 733 */ 734 DPRINTFN(5, ("PHY or CRC error\n")); 735 ifp->if_ierrors++; 736 goto skip; 737 } 738 739 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 740 if (mnew == NULL) { 741 printf("%s: could not allocate rx mbuf\n", 742 sc->sc_dev.dv_xname); 743 ifp->if_ierrors++; 744 goto skip; 745 } 746 MCLGET(mnew, M_DONTWAIT); 747 if (!(mnew->m_flags & M_EXT)) { 748 printf("%s: could not allocate rx mbuf cluster\n", 749 sc->sc_dev.dv_xname); 750 m_freem(mnew); 751 ifp->if_ierrors++; 752 goto skip; 753 } 754 m = data->m; 755 data->m = mnew; 756 data->buf = mtod(data->m, uint8_t *); 757 758 /* finalize mbuf */ 759 m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff; 760 761 s = splnet(); 762 763 #if NBPFILTER > 0 764 if (sc->sc_drvbpf != NULL) { 765 struct mbuf mb; 766 struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; 767 768 tap->wr_flags = IEEE80211_RADIOTAP_F_FCS; 769 tap->wr_rate = ural_rxrate(desc); 770 tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 771 tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 772 tap->wr_antenna = sc->rx_ant; 773 tap->wr_antsignal = desc->rssi; 774 775 mb.m_data = (caddr_t)tap; 776 mb.m_len = sc->sc_rxtap_len; 777 mb.m_next = m; 778 mb.m_nextpkt = NULL; 779 mb.m_type = 0; 780 mb.m_flags = 0; 781 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 782 } 783 #endif 784 m_adj(m, -IEEE80211_CRC_LEN); /* trim FCS */ 785 786 wh = mtod(m, struct ieee80211_frame *); 787 ni = ieee80211_find_rxnode(ic, wh); 788 789 /* send the frame to the 802.11 layer */ 790 rxi.rxi_flags = 0; 791 rxi.rxi_rssi = desc->rssi; 792 rxi.rxi_tstamp = 0; /* unused */ 793 ieee80211_input(ifp, m, ni, &rxi); 794 795 /* node is no longer needed */ 796 ieee80211_release_node(ic, ni); 797 798 splx(s); 799 800 DPRINTFN(15, ("rx done\n")); 801 802 skip: /* setup a new transfer */ 803 usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, 804 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); 805 (void)usbd_transfer(xfer); 806 } 807 808 /* 809 * This function is only used by the Rx radiotap code. It returns the rate at 810 * which a given frame was received. 811 */ 812 #if NBPFILTER > 0 813 uint8_t 814 ural_rxrate(const struct ural_rx_desc *desc) 815 { 816 if (letoh32(desc->flags) & RAL_RX_OFDM) { 817 /* reverse function of ural_plcp_signal */ 818 switch (desc->rate) { 819 case 0xb: return 12; 820 case 0xf: return 18; 821 case 0xa: return 24; 822 case 0xe: return 36; 823 case 0x9: return 48; 824 case 0xd: return 72; 825 case 0x8: return 96; 826 case 0xc: return 108; 827 } 828 } else { 829 if (desc->rate == 10) 830 return 2; 831 if (desc->rate == 20) 832 return 4; 833 if (desc->rate == 55) 834 return 11; 835 if (desc->rate == 110) 836 return 22; 837 } 838 return 2; /* should not get there */ 839 } 840 #endif 841 842 /* 843 * Return the expected ack rate for a frame transmitted at rate `rate'. 844 */ 845 int 846 ural_ack_rate(struct ieee80211com *ic, int rate) 847 { 848 switch (rate) { 849 /* CCK rates */ 850 case 2: 851 return 2; 852 case 4: 853 case 11: 854 case 22: 855 return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate; 856 857 /* OFDM rates */ 858 case 12: 859 case 18: 860 return 12; 861 case 24: 862 case 36: 863 return 24; 864 case 48: 865 case 72: 866 case 96: 867 case 108: 868 return 48; 869 } 870 871 /* default to 1Mbps */ 872 return 2; 873 } 874 875 /* 876 * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'. 877 * The function automatically determines the operating mode depending on the 878 * given rate. `flags' indicates whether short preamble is in use or not. 879 */ 880 uint16_t 881 ural_txtime(int len, int rate, uint32_t flags) 882 { 883 uint16_t txtime; 884 885 if (RAL_RATE_IS_OFDM(rate)) { 886 /* IEEE Std 802.11g-2003, pp. 44 */ 887 txtime = (8 + 4 * len + 3 + rate - 1) / rate; 888 txtime = 16 + 4 + 4 * txtime + 6; 889 } else { 890 /* IEEE Std 802.11b-1999, pp. 28 */ 891 txtime = (16 * len + rate - 1) / rate; 892 if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE)) 893 txtime += 72 + 24; 894 else 895 txtime += 144 + 48; 896 } 897 return txtime; 898 } 899 900 uint8_t 901 ural_plcp_signal(int rate) 902 { 903 switch (rate) { 904 /* CCK rates (returned values are device-dependent) */ 905 case 2: return 0x0; 906 case 4: return 0x1; 907 case 11: return 0x2; 908 case 22: return 0x3; 909 910 /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ 911 case 12: return 0xb; 912 case 18: return 0xf; 913 case 24: return 0xa; 914 case 36: return 0xe; 915 case 48: return 0x9; 916 case 72: return 0xd; 917 case 96: return 0x8; 918 case 108: return 0xc; 919 920 /* unsupported rates (should not get there) */ 921 default: return 0xff; 922 } 923 } 924 925 void 926 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, 927 uint32_t flags, int len, int rate) 928 { 929 struct ieee80211com *ic = &sc->sc_ic; 930 uint16_t plcp_length; 931 int remainder; 932 933 desc->flags = htole32(flags); 934 desc->flags |= htole32(len << 16); 935 936 desc->wme = htole16( 937 RAL_AIFSN(2) | 938 RAL_LOGCWMIN(3) | 939 RAL_LOGCWMAX(5)); 940 941 /* setup PLCP fields */ 942 desc->plcp_signal = ural_plcp_signal(rate); 943 desc->plcp_service = 4; 944 945 len += IEEE80211_CRC_LEN; 946 if (RAL_RATE_IS_OFDM(rate)) { 947 desc->flags |= htole32(RAL_TX_OFDM); 948 949 plcp_length = len & 0xfff; 950 desc->plcp_length_hi = plcp_length >> 6; 951 desc->plcp_length_lo = plcp_length & 0x3f; 952 } else { 953 plcp_length = (16 * len + rate - 1) / rate; 954 if (rate == 22) { 955 remainder = (16 * len) % 22; 956 if (remainder != 0 && remainder < 7) 957 desc->plcp_service |= RAL_PLCP_LENGEXT; 958 } 959 desc->plcp_length_hi = plcp_length >> 8; 960 desc->plcp_length_lo = plcp_length & 0xff; 961 962 if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) 963 desc->plcp_signal |= 0x08; 964 } 965 966 desc->iv = 0; 967 desc->eiv = 0; 968 } 969 970 #define RAL_TX_TIMEOUT 5000 971 972 #ifndef IEEE80211_STA_ONLY 973 int 974 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 975 { 976 struct ural_tx_desc *desc; 977 struct usbd_xfer *xfer; 978 usbd_status error; 979 uint8_t cmd = 0; 980 uint8_t *buf; 981 int xferlen, rate = 2; 982 983 xfer = usbd_alloc_xfer(sc->sc_udev); 984 if (xfer == NULL) 985 return ENOMEM; 986 987 /* xfer length needs to be a multiple of two! */ 988 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; 989 990 buf = usbd_alloc_buffer(xfer, xferlen); 991 if (buf == NULL) { 992 usbd_free_xfer(xfer); 993 return ENOMEM; 994 } 995 996 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd, 997 USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL); 998 999 error = usbd_transfer(xfer); 1000 if (error != 0) { 1001 usbd_free_xfer(xfer); 1002 return error; 1003 } 1004 1005 desc = (struct ural_tx_desc *)buf; 1006 1007 m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE); 1008 ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, 1009 m0->m_pkthdr.len, rate); 1010 1011 DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n", 1012 m0->m_pkthdr.len, rate, xferlen)); 1013 1014 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen, 1015 USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS, 1016 RAL_TX_TIMEOUT, NULL); 1017 1018 error = usbd_transfer(xfer); 1019 usbd_free_xfer(xfer); 1020 1021 return error; 1022 } 1023 #endif 1024 1025 int 1026 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 1027 { 1028 struct ieee80211com *ic = &sc->sc_ic; 1029 struct ural_tx_desc *desc; 1030 struct ural_tx_data *data; 1031 struct ieee80211_frame *wh; 1032 struct ieee80211_key *k; 1033 uint32_t flags = RAL_TX_NEWSEQ; 1034 uint16_t dur; 1035 usbd_status error; 1036 int rate, xferlen, pktlen, needrts = 0, needcts = 0; 1037 1038 wh = mtod(m0, struct ieee80211_frame *); 1039 1040 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 1041 k = ieee80211_get_txkey(ic, wh, ni); 1042 1043 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 1044 return ENOBUFS; 1045 1046 /* packet header may have moved, reset our local pointer */ 1047 wh = mtod(m0, struct ieee80211_frame *); 1048 } 1049 1050 /* compute actual packet length (including CRC and crypto overhead) */ 1051 pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; 1052 1053 /* pickup a rate */ 1054 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 1055 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1056 IEEE80211_FC0_TYPE_MGT)) { 1057 /* mgmt/multicast frames are sent at the lowest avail. rate */ 1058 rate = ni->ni_rates.rs_rates[0]; 1059 } else if (ic->ic_fixed_rate != -1) { 1060 rate = ic->ic_sup_rates[ic->ic_curmode]. 1061 rs_rates[ic->ic_fixed_rate]; 1062 } else 1063 rate = ni->ni_rates.rs_rates[ni->ni_txrate]; 1064 if (rate == 0) 1065 rate = 2; /* XXX should not happen */ 1066 rate &= IEEE80211_RATE_VAL; 1067 1068 /* check if RTS/CTS or CTS-to-self protection must be used */ 1069 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1070 /* multicast frames are not sent at OFDM rates in 802.11b/g */ 1071 if (pktlen > ic->ic_rtsthreshold) { 1072 needrts = 1; /* RTS/CTS based on frame length */ 1073 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1074 RAL_RATE_IS_OFDM(rate)) { 1075 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 1076 needcts = 1; /* CTS-to-self */ 1077 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 1078 needrts = 1; /* RTS/CTS */ 1079 } 1080 } 1081 if (needrts || needcts) { 1082 struct mbuf *mprot; 1083 int protrate, ackrate; 1084 uint16_t dur; 1085 1086 protrate = 2; 1087 ackrate = ural_ack_rate(ic, rate); 1088 1089 dur = ural_txtime(pktlen, rate, ic->ic_flags) + 1090 ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) + 1091 2 * RAL_SIFS; 1092 if (needrts) { 1093 dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic, 1094 protrate), ic->ic_flags) + RAL_SIFS; 1095 mprot = ieee80211_get_rts(ic, wh, dur); 1096 } else { 1097 mprot = ieee80211_get_cts_to_self(ic, dur); 1098 } 1099 if (mprot == NULL) { 1100 printf("%s: could not allocate protection frame\n", 1101 sc->sc_dev.dv_xname); 1102 m_freem(m0); 1103 return ENOBUFS; 1104 } 1105 1106 data = &sc->tx_data[sc->tx_cur]; 1107 desc = (struct ural_tx_desc *)data->buf; 1108 1109 /* avoid multiple free() of the same node for each fragment */ 1110 data->ni = ieee80211_ref_node(ni); 1111 1112 m_copydata(mprot, 0, mprot->m_pkthdr.len, 1113 data->buf + RAL_TX_DESC_SIZE); 1114 ural_setup_tx_desc(sc, desc, 1115 (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7), 1116 mprot->m_pkthdr.len, protrate); 1117 1118 /* no roundup necessary here */ 1119 xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len; 1120 1121 /* XXX may want to pass the protection frame to BPF */ 1122 1123 /* mbuf is no longer needed */ 1124 m_freem(mprot); 1125 1126 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, 1127 xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, 1128 RAL_TX_TIMEOUT, ural_txeof); 1129 error = usbd_transfer(data->xfer); 1130 if (error != 0 && error != USBD_IN_PROGRESS) { 1131 m_freem(m0); 1132 return error; 1133 } 1134 1135 sc->tx_queued++; 1136 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; 1137 1138 flags |= RAL_TX_IFS_SIFS; 1139 } 1140 1141 data = &sc->tx_data[sc->tx_cur]; 1142 desc = (struct ural_tx_desc *)data->buf; 1143 1144 data->ni = ni; 1145 1146 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1147 flags |= RAL_TX_NEED_ACK; 1148 flags |= RAL_TX_RETRY(7); 1149 1150 dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate), 1151 ic->ic_flags) + RAL_SIFS; 1152 *(uint16_t *)wh->i_dur = htole16(dur); 1153 1154 #ifndef IEEE80211_STA_ONLY 1155 /* tell hardware to set timestamp in probe responses */ 1156 if ((wh->i_fc[0] & 1157 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 1158 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) 1159 flags |= RAL_TX_TIMESTAMP; 1160 #endif 1161 } 1162 1163 #if NBPFILTER > 0 1164 if (sc->sc_drvbpf != NULL) { 1165 struct mbuf mb; 1166 struct ural_tx_radiotap_header *tap = &sc->sc_txtap; 1167 1168 tap->wt_flags = 0; 1169 tap->wt_rate = rate; 1170 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1171 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1172 tap->wt_antenna = sc->tx_ant; 1173 1174 mb.m_data = (caddr_t)tap; 1175 mb.m_len = sc->sc_txtap_len; 1176 mb.m_next = m0; 1177 mb.m_nextpkt = NULL; 1178 mb.m_type = 0; 1179 mb.m_flags = 0; 1180 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1181 } 1182 #endif 1183 1184 m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); 1185 ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate); 1186 1187 /* align end on a 2-bytes boundary */ 1188 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; 1189 1190 /* 1191 * No space left in the last URB to store the extra 2 bytes, force 1192 * sending of another URB. 1193 */ 1194 if ((xferlen % 64) == 0) 1195 xferlen += 2; 1196 1197 DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n", 1198 m0->m_pkthdr.len, rate, xferlen)); 1199 1200 /* mbuf is no longer needed */ 1201 m_freem(m0); 1202 1203 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, 1204 USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); 1205 error = usbd_transfer(data->xfer); 1206 if (error != 0 && error != USBD_IN_PROGRESS) 1207 return error; 1208 1209 sc->tx_queued++; 1210 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; 1211 1212 return 0; 1213 } 1214 1215 void 1216 ural_start(struct ifnet *ifp) 1217 { 1218 struct ural_softc *sc = ifp->if_softc; 1219 struct ieee80211com *ic = &sc->sc_ic; 1220 struct ieee80211_node *ni; 1221 struct mbuf *m0; 1222 1223 /* 1224 * net80211 may still try to send management frames even if the 1225 * IFF_RUNNING flag is not set... 1226 */ 1227 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1228 return; 1229 1230 for (;;) { 1231 if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) { 1232 ifq_set_oactive(&ifp->if_snd); 1233 break; 1234 } 1235 1236 m0 = mq_dequeue(&ic->ic_mgtq); 1237 if (m0 != NULL) { 1238 ni = m0->m_pkthdr.ph_cookie; 1239 #if NBPFILTER > 0 1240 if (ic->ic_rawbpf != NULL) 1241 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 1242 #endif 1243 if (ural_tx_data(sc, m0, ni) != 0) 1244 break; 1245 1246 } else { 1247 if (ic->ic_state != IEEE80211_S_RUN) 1248 break; 1249 1250 IFQ_DEQUEUE(&ifp->if_snd, m0); 1251 if (m0 == NULL) 1252 break; 1253 #if NBPFILTER > 0 1254 if (ifp->if_bpf != NULL) 1255 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1256 #endif 1257 m0 = ieee80211_encap(ifp, m0, &ni); 1258 if (m0 == NULL) 1259 continue; 1260 #if NBPFILTER > 0 1261 if (ic->ic_rawbpf != NULL) 1262 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 1263 #endif 1264 if (ural_tx_data(sc, m0, ni) != 0) { 1265 if (ni != NULL) 1266 ieee80211_release_node(ic, ni); 1267 ifp->if_oerrors++; 1268 break; 1269 } 1270 } 1271 1272 sc->sc_tx_timer = 5; 1273 ifp->if_timer = 1; 1274 } 1275 } 1276 1277 void 1278 ural_watchdog(struct ifnet *ifp) 1279 { 1280 struct ural_softc *sc = ifp->if_softc; 1281 1282 ifp->if_timer = 0; 1283 1284 if (sc->sc_tx_timer > 0) { 1285 if (--sc->sc_tx_timer == 0) { 1286 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1287 /*ural_init(ifp); XXX needs a process context! */ 1288 ifp->if_oerrors++; 1289 return; 1290 } 1291 ifp->if_timer = 1; 1292 } 1293 1294 ieee80211_watchdog(ifp); 1295 } 1296 1297 int 1298 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1299 { 1300 struct ural_softc *sc = ifp->if_softc; 1301 struct ieee80211com *ic = &sc->sc_ic; 1302 int s, error = 0; 1303 1304 if (usbd_is_dying(sc->sc_udev)) 1305 return ENXIO; 1306 1307 usbd_ref_incr(sc->sc_udev); 1308 1309 s = splnet(); 1310 1311 switch (cmd) { 1312 case SIOCSIFADDR: 1313 ifp->if_flags |= IFF_UP; 1314 /* FALLTHROUGH */ 1315 case SIOCSIFFLAGS: 1316 if (ifp->if_flags & IFF_UP) { 1317 if (ifp->if_flags & IFF_RUNNING) 1318 ural_update_promisc(sc); 1319 else 1320 ural_init(ifp); 1321 } else { 1322 if (ifp->if_flags & IFF_RUNNING) 1323 ural_stop(ifp, 1); 1324 } 1325 break; 1326 1327 case SIOCS80211CHANNEL: 1328 /* 1329 * This allows for fast channel switching in monitor mode 1330 * (used by kismet). In IBSS mode, we must explicitly reset 1331 * the interface to generate a new beacon frame. 1332 */ 1333 error = ieee80211_ioctl(ifp, cmd, data); 1334 if (error == ENETRESET && 1335 ic->ic_opmode == IEEE80211_M_MONITOR) { 1336 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1337 (IFF_UP | IFF_RUNNING)) 1338 ural_set_chan(sc, ic->ic_ibss_chan); 1339 error = 0; 1340 } 1341 break; 1342 1343 default: 1344 error = ieee80211_ioctl(ifp, cmd, data); 1345 } 1346 1347 if (error == ENETRESET) { 1348 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1349 (IFF_UP | IFF_RUNNING)) 1350 ural_init(ifp); 1351 error = 0; 1352 } 1353 1354 splx(s); 1355 1356 usbd_ref_decr(sc->sc_udev); 1357 1358 return error; 1359 } 1360 1361 void 1362 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) 1363 { 1364 usb_device_request_t req; 1365 usbd_status error; 1366 1367 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1368 req.bRequest = RAL_READ_EEPROM; 1369 USETW(req.wValue, 0); 1370 USETW(req.wIndex, addr); 1371 USETW(req.wLength, len); 1372 1373 error = usbd_do_request(sc->sc_udev, &req, buf); 1374 if (error != 0) { 1375 printf("%s: could not read EEPROM: %s\n", 1376 sc->sc_dev.dv_xname, usbd_errstr(error)); 1377 } 1378 } 1379 1380 uint16_t 1381 ural_read(struct ural_softc *sc, uint16_t reg) 1382 { 1383 usb_device_request_t req; 1384 usbd_status error; 1385 uint16_t val; 1386 1387 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1388 req.bRequest = RAL_READ_MAC; 1389 USETW(req.wValue, 0); 1390 USETW(req.wIndex, reg); 1391 USETW(req.wLength, sizeof (uint16_t)); 1392 1393 error = usbd_do_request(sc->sc_udev, &req, &val); 1394 if (error != 0) { 1395 printf("%s: could not read MAC register: %s\n", 1396 sc->sc_dev.dv_xname, usbd_errstr(error)); 1397 return 0; 1398 } 1399 return letoh16(val); 1400 } 1401 1402 void 1403 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1404 { 1405 usb_device_request_t req; 1406 usbd_status error; 1407 1408 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1409 req.bRequest = RAL_READ_MULTI_MAC; 1410 USETW(req.wValue, 0); 1411 USETW(req.wIndex, reg); 1412 USETW(req.wLength, len); 1413 1414 error = usbd_do_request(sc->sc_udev, &req, buf); 1415 if (error != 0) { 1416 printf("%s: could not read MAC register: %s\n", 1417 sc->sc_dev.dv_xname, usbd_errstr(error)); 1418 } 1419 } 1420 1421 void 1422 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) 1423 { 1424 usb_device_request_t req; 1425 usbd_status error; 1426 1427 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1428 req.bRequest = RAL_WRITE_MAC; 1429 USETW(req.wValue, val); 1430 USETW(req.wIndex, reg); 1431 USETW(req.wLength, 0); 1432 1433 error = usbd_do_request(sc->sc_udev, &req, NULL); 1434 if (error != 0) { 1435 printf("%s: could not write MAC register: %s\n", 1436 sc->sc_dev.dv_xname, usbd_errstr(error)); 1437 } 1438 } 1439 1440 void 1441 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1442 { 1443 usb_device_request_t req; 1444 usbd_status error; 1445 1446 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1447 req.bRequest = RAL_WRITE_MULTI_MAC; 1448 USETW(req.wValue, 0); 1449 USETW(req.wIndex, reg); 1450 USETW(req.wLength, len); 1451 1452 error = usbd_do_request(sc->sc_udev, &req, buf); 1453 if (error != 0) { 1454 printf("%s: could not write MAC register: %s\n", 1455 sc->sc_dev.dv_xname, usbd_errstr(error)); 1456 } 1457 } 1458 1459 void 1460 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) 1461 { 1462 uint16_t tmp; 1463 int ntries; 1464 1465 for (ntries = 0; ntries < 5; ntries++) { 1466 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1467 break; 1468 } 1469 if (ntries == 5) { 1470 printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname); 1471 return; 1472 } 1473 1474 tmp = reg << 8 | val; 1475 ural_write(sc, RAL_PHY_CSR7, tmp); 1476 } 1477 1478 uint8_t 1479 ural_bbp_read(struct ural_softc *sc, uint8_t reg) 1480 { 1481 uint16_t val; 1482 int ntries; 1483 1484 val = RAL_BBP_WRITE | reg << 8; 1485 ural_write(sc, RAL_PHY_CSR7, val); 1486 1487 for (ntries = 0; ntries < 5; ntries++) { 1488 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1489 break; 1490 } 1491 if (ntries == 5) { 1492 printf("%s: could not read BBP\n", sc->sc_dev.dv_xname); 1493 return 0; 1494 } 1495 return ural_read(sc, RAL_PHY_CSR7) & 0xff; 1496 } 1497 1498 void 1499 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) 1500 { 1501 uint32_t tmp; 1502 int ntries; 1503 1504 for (ntries = 0; ntries < 5; ntries++) { 1505 if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) 1506 break; 1507 } 1508 if (ntries == 5) { 1509 printf("%s: could not write to RF\n", sc->sc_dev.dv_xname); 1510 return; 1511 } 1512 1513 tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); 1514 ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); 1515 ural_write(sc, RAL_PHY_CSR10, tmp >> 16); 1516 1517 /* remember last written value in sc */ 1518 sc->rf_regs[reg] = val; 1519 1520 DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff)); 1521 } 1522 1523 void 1524 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) 1525 { 1526 struct ieee80211com *ic = &sc->sc_ic; 1527 uint8_t power, tmp; 1528 u_int chan; 1529 1530 chan = ieee80211_chan2ieee(ic, c); 1531 if (chan == 0 || chan == IEEE80211_CHAN_ANY) 1532 return; 1533 1534 power = min(sc->txpow[chan - 1], 31); 1535 1536 DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power)); 1537 1538 switch (sc->rf_rev) { 1539 case RAL_RF_2522: 1540 ural_rf_write(sc, RAL_RF1, 0x00814); 1541 ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); 1542 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1543 break; 1544 1545 case RAL_RF_2523: 1546 ural_rf_write(sc, RAL_RF1, 0x08804); 1547 ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); 1548 ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); 1549 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1550 break; 1551 1552 case RAL_RF_2524: 1553 ural_rf_write(sc, RAL_RF1, 0x0c808); 1554 ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); 1555 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1556 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1557 break; 1558 1559 case RAL_RF_2525: 1560 ural_rf_write(sc, RAL_RF1, 0x08808); 1561 ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); 1562 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1563 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1564 1565 ural_rf_write(sc, RAL_RF1, 0x08808); 1566 ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); 1567 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1568 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1569 break; 1570 1571 case RAL_RF_2525E: 1572 ural_rf_write(sc, RAL_RF1, 0x08808); 1573 ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); 1574 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1575 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); 1576 break; 1577 1578 case RAL_RF_2526: 1579 ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); 1580 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1581 ural_rf_write(sc, RAL_RF1, 0x08804); 1582 1583 ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); 1584 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1585 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1586 break; 1587 } 1588 1589 if (ic->ic_opmode != IEEE80211_M_MONITOR && 1590 ic->ic_state != IEEE80211_S_SCAN) { 1591 /* set Japan filter bit for channel 14 */ 1592 tmp = ural_bbp_read(sc, 70); 1593 1594 tmp &= ~RAL_JAPAN_FILTER; 1595 if (chan == 14) 1596 tmp |= RAL_JAPAN_FILTER; 1597 1598 ural_bbp_write(sc, 70, tmp); 1599 1600 /* clear CRC errors */ 1601 ural_read(sc, RAL_STA_CSR0); 1602 1603 DELAY(1000); /* RF needs a 1ms delay here */ 1604 ural_disable_rf_tune(sc); 1605 } 1606 } 1607 1608 /* 1609 * Disable RF auto-tuning. 1610 */ 1611 void 1612 ural_disable_rf_tune(struct ural_softc *sc) 1613 { 1614 uint32_t tmp; 1615 1616 if (sc->rf_rev != RAL_RF_2523) { 1617 tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; 1618 ural_rf_write(sc, RAL_RF1, tmp); 1619 } 1620 1621 tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; 1622 ural_rf_write(sc, RAL_RF3, tmp); 1623 1624 DPRINTFN(2, ("disabling RF autotune\n")); 1625 } 1626 1627 /* 1628 * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF 1629 * synchronization. 1630 */ 1631 void 1632 ural_enable_tsf_sync(struct ural_softc *sc) 1633 { 1634 struct ieee80211com *ic = &sc->sc_ic; 1635 uint16_t logcwmin, preload, tmp; 1636 1637 /* first, disable TSF synchronization */ 1638 ural_write(sc, RAL_TXRX_CSR19, 0); 1639 1640 tmp = (16 * ic->ic_bss->ni_intval) << 4; 1641 ural_write(sc, RAL_TXRX_CSR18, tmp); 1642 1643 #ifndef IEEE80211_STA_ONLY 1644 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1645 logcwmin = 2; 1646 preload = 320; 1647 } else 1648 #endif 1649 { 1650 logcwmin = 0; 1651 preload = 6; 1652 } 1653 tmp = logcwmin << 12 | preload; 1654 ural_write(sc, RAL_TXRX_CSR20, tmp); 1655 1656 /* finally, enable TSF synchronization */ 1657 tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; 1658 if (ic->ic_opmode == IEEE80211_M_STA) 1659 tmp |= RAL_ENABLE_TSF_SYNC(1); 1660 #ifndef IEEE80211_STA_ONLY 1661 else 1662 tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; 1663 #endif 1664 ural_write(sc, RAL_TXRX_CSR19, tmp); 1665 1666 DPRINTF(("enabling TSF synchronization\n")); 1667 } 1668 1669 void 1670 ural_update_slot(struct ural_softc *sc) 1671 { 1672 struct ieee80211com *ic = &sc->sc_ic; 1673 uint16_t slottime, sifs, eifs; 1674 1675 slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 1676 IEEE80211_DUR_DS_SHSLOT : IEEE80211_DUR_DS_SLOT; 1677 1678 /* 1679 * These settings may sound a bit inconsistent but this is what the 1680 * reference driver does. 1681 */ 1682 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1683 sifs = 16 - RAL_RXTX_TURNAROUND; 1684 eifs = 364; 1685 } else { 1686 sifs = 10 - RAL_RXTX_TURNAROUND; 1687 eifs = 64; 1688 } 1689 1690 ural_write(sc, RAL_MAC_CSR10, slottime); 1691 ural_write(sc, RAL_MAC_CSR11, sifs); 1692 ural_write(sc, RAL_MAC_CSR12, eifs); 1693 } 1694 1695 void 1696 ural_set_txpreamble(struct ural_softc *sc) 1697 { 1698 uint16_t tmp; 1699 1700 tmp = ural_read(sc, RAL_TXRX_CSR10); 1701 1702 tmp &= ~RAL_SHORT_PREAMBLE; 1703 if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE) 1704 tmp |= RAL_SHORT_PREAMBLE; 1705 1706 ural_write(sc, RAL_TXRX_CSR10, tmp); 1707 } 1708 1709 void 1710 ural_set_basicrates(struct ural_softc *sc) 1711 { 1712 struct ieee80211com *ic = &sc->sc_ic; 1713 1714 /* update basic rate set */ 1715 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1716 /* 11b basic rates: 1, 2Mbps */ 1717 ural_write(sc, RAL_TXRX_CSR11, 0x3); 1718 } else { 1719 /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ 1720 ural_write(sc, RAL_TXRX_CSR11, 0xf); 1721 } 1722 } 1723 1724 void 1725 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) 1726 { 1727 uint16_t tmp; 1728 1729 tmp = bssid[0] | bssid[1] << 8; 1730 ural_write(sc, RAL_MAC_CSR5, tmp); 1731 1732 tmp = bssid[2] | bssid[3] << 8; 1733 ural_write(sc, RAL_MAC_CSR6, tmp); 1734 1735 tmp = bssid[4] | bssid[5] << 8; 1736 ural_write(sc, RAL_MAC_CSR7, tmp); 1737 1738 DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid))); 1739 } 1740 1741 void 1742 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr) 1743 { 1744 uint16_t tmp; 1745 1746 tmp = addr[0] | addr[1] << 8; 1747 ural_write(sc, RAL_MAC_CSR2, tmp); 1748 1749 tmp = addr[2] | addr[3] << 8; 1750 ural_write(sc, RAL_MAC_CSR3, tmp); 1751 1752 tmp = addr[4] | addr[5] << 8; 1753 ural_write(sc, RAL_MAC_CSR4, tmp); 1754 1755 DPRINTF(("setting MAC address to %s\n", 1756 ether_sprintf((uint8_t *)addr))); 1757 } 1758 1759 void 1760 ural_update_promisc(struct ural_softc *sc) 1761 { 1762 struct ifnet *ifp = &sc->sc_ic.ic_if; 1763 uint16_t tmp; 1764 1765 tmp = ural_read(sc, RAL_TXRX_CSR2); 1766 1767 tmp &= ~RAL_DROP_NOT_TO_ME; 1768 if (!(ifp->if_flags & IFF_PROMISC)) 1769 tmp |= RAL_DROP_NOT_TO_ME; 1770 1771 ural_write(sc, RAL_TXRX_CSR2, tmp); 1772 1773 DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? 1774 "entering" : "leaving")); 1775 } 1776 1777 const char * 1778 ural_get_rf(int rev) 1779 { 1780 switch (rev) { 1781 case RAL_RF_2522: return "RT2522"; 1782 case RAL_RF_2523: return "RT2523"; 1783 case RAL_RF_2524: return "RT2524"; 1784 case RAL_RF_2525: return "RT2525"; 1785 case RAL_RF_2525E: return "RT2525e"; 1786 case RAL_RF_2526: return "RT2526"; 1787 case RAL_RF_5222: return "RT5222"; 1788 default: return "unknown"; 1789 } 1790 } 1791 1792 void 1793 ural_read_eeprom(struct ural_softc *sc) 1794 { 1795 struct ieee80211com *ic = &sc->sc_ic; 1796 uint16_t val; 1797 1798 /* retrieve MAC/BBP type */ 1799 ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2); 1800 sc->macbbp_rev = letoh16(val); 1801 1802 ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); 1803 val = letoh16(val); 1804 sc->rf_rev = (val >> 11) & 0x7; 1805 sc->hw_radio = (val >> 10) & 0x1; 1806 sc->led_mode = (val >> 6) & 0x7; 1807 sc->rx_ant = (val >> 4) & 0x3; 1808 sc->tx_ant = (val >> 2) & 0x3; 1809 sc->nb_ant = val & 0x3; 1810 1811 /* read MAC address */ 1812 ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6); 1813 1814 /* read default values for BBP registers */ 1815 ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); 1816 1817 /* read Tx power for all b/g channels */ 1818 ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); 1819 } 1820 1821 int 1822 ural_bbp_init(struct ural_softc *sc) 1823 { 1824 int i, ntries; 1825 1826 /* wait for BBP to be ready */ 1827 for (ntries = 0; ntries < 100; ntries++) { 1828 if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) 1829 break; 1830 DELAY(1000); 1831 } 1832 if (ntries == 100) { 1833 printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname); 1834 return EIO; 1835 } 1836 1837 /* initialize BBP registers to default values */ 1838 for (i = 0; i < nitems(ural_def_bbp); i++) 1839 ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); 1840 1841 #if 0 1842 /* initialize BBP registers to values stored in EEPROM */ 1843 for (i = 0; i < 16; i++) { 1844 if (sc->bbp_prom[i].reg == 0xff) 1845 continue; 1846 ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); 1847 } 1848 #endif 1849 1850 return 0; 1851 } 1852 1853 void 1854 ural_set_txantenna(struct ural_softc *sc, int antenna) 1855 { 1856 uint16_t tmp; 1857 uint8_t tx; 1858 1859 tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; 1860 if (antenna == 1) 1861 tx |= RAL_BBP_ANTA; 1862 else if (antenna == 2) 1863 tx |= RAL_BBP_ANTB; 1864 else 1865 tx |= RAL_BBP_DIVERSITY; 1866 1867 /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ 1868 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || 1869 sc->rf_rev == RAL_RF_5222) 1870 tx |= RAL_BBP_FLIPIQ; 1871 1872 ural_bbp_write(sc, RAL_BBP_TX, tx); 1873 1874 /* update flags in PHY_CSR5 and PHY_CSR6 too */ 1875 tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; 1876 ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); 1877 1878 tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; 1879 ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); 1880 } 1881 1882 void 1883 ural_set_rxantenna(struct ural_softc *sc, int antenna) 1884 { 1885 uint8_t rx; 1886 1887 rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; 1888 if (antenna == 1) 1889 rx |= RAL_BBP_ANTA; 1890 else if (antenna == 2) 1891 rx |= RAL_BBP_ANTB; 1892 else 1893 rx |= RAL_BBP_DIVERSITY; 1894 1895 /* need to force no I/Q flip for RF 2525e and 2526 */ 1896 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) 1897 rx &= ~RAL_BBP_FLIPIQ; 1898 1899 ural_bbp_write(sc, RAL_BBP_RX, rx); 1900 } 1901 1902 int 1903 ural_init(struct ifnet *ifp) 1904 { 1905 struct ural_softc *sc = ifp->if_softc; 1906 struct ieee80211com *ic = &sc->sc_ic; 1907 uint16_t tmp; 1908 usbd_status error; 1909 int i, ntries; 1910 1911 ural_stop(ifp, 0); 1912 1913 /* initialize MAC registers to default values */ 1914 for (i = 0; i < nitems(ural_def_mac); i++) 1915 ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); 1916 1917 /* wait for BBP and RF to wake up (this can take a long time!) */ 1918 for (ntries = 0; ntries < 100; ntries++) { 1919 tmp = ural_read(sc, RAL_MAC_CSR17); 1920 if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == 1921 (RAL_BBP_AWAKE | RAL_RF_AWAKE)) 1922 break; 1923 DELAY(1000); 1924 } 1925 if (ntries == 100) { 1926 printf("%s: timeout waiting for BBP/RF to wakeup\n", 1927 sc->sc_dev.dv_xname); 1928 error = EIO; 1929 goto fail; 1930 } 1931 1932 /* we're ready! */ 1933 ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); 1934 1935 /* set basic rate set (will be updated later) */ 1936 ural_write(sc, RAL_TXRX_CSR11, 0x153); 1937 1938 error = ural_bbp_init(sc); 1939 if (error != 0) 1940 goto fail; 1941 1942 /* set default BSS channel */ 1943 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 1944 ural_set_chan(sc, ic->ic_bss->ni_chan); 1945 1946 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 1947 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 1948 1949 /* set default sensitivity */ 1950 ural_bbp_write(sc, 17, 0x48); 1951 1952 ural_set_txantenna(sc, 1); 1953 ural_set_rxantenna(sc, 1); 1954 1955 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 1956 ural_set_macaddr(sc, ic->ic_myaddr); 1957 1958 /* 1959 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31). 1960 */ 1961 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1962 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1963 ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE, 1964 k->k_key, IEEE80211_KEYBUF_SIZE); 1965 } 1966 1967 /* 1968 * Allocate xfer for AMRR statistics requests. 1969 */ 1970 sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev); 1971 if (sc->amrr_xfer == NULL) { 1972 printf("%s: could not allocate AMRR xfer\n", 1973 sc->sc_dev.dv_xname); 1974 goto fail; 1975 } 1976 1977 /* 1978 * Open Tx and Rx USB bulk pipes. 1979 */ 1980 error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE, 1981 &sc->sc_tx_pipeh); 1982 if (error != 0) { 1983 printf("%s: could not open Tx pipe: %s\n", 1984 sc->sc_dev.dv_xname, usbd_errstr(error)); 1985 goto fail; 1986 } 1987 error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE, 1988 &sc->sc_rx_pipeh); 1989 if (error != 0) { 1990 printf("%s: could not open Rx pipe: %s\n", 1991 sc->sc_dev.dv_xname, usbd_errstr(error)); 1992 goto fail; 1993 } 1994 1995 /* 1996 * Allocate Tx and Rx xfer queues. 1997 */ 1998 error = ural_alloc_tx_list(sc); 1999 if (error != 0) { 2000 printf("%s: could not allocate Tx list\n", 2001 sc->sc_dev.dv_xname); 2002 goto fail; 2003 } 2004 error = ural_alloc_rx_list(sc); 2005 if (error != 0) { 2006 printf("%s: could not allocate Rx list\n", 2007 sc->sc_dev.dv_xname); 2008 goto fail; 2009 } 2010 2011 /* 2012 * Start up the receive pipe. 2013 */ 2014 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 2015 struct ural_rx_data *data = &sc->rx_data[i]; 2016 2017 usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf, 2018 MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); 2019 error = usbd_transfer(data->xfer); 2020 if (error != 0 && error != USBD_IN_PROGRESS) { 2021 printf("%s: could not queue Rx transfer\n", 2022 sc->sc_dev.dv_xname); 2023 goto fail; 2024 } 2025 } 2026 2027 /* kick Rx */ 2028 tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR; 2029 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2030 tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR; 2031 #ifndef IEEE80211_STA_ONLY 2032 if (ic->ic_opmode != IEEE80211_M_HOSTAP) 2033 #endif 2034 tmp |= RAL_DROP_TODS; 2035 if (!(ifp->if_flags & IFF_PROMISC)) 2036 tmp |= RAL_DROP_NOT_TO_ME; 2037 } 2038 ural_write(sc, RAL_TXRX_CSR2, tmp); 2039 2040 ifq_clr_oactive(&ifp->if_snd); 2041 ifp->if_flags |= IFF_RUNNING; 2042 2043 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2044 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2045 else 2046 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2047 2048 return 0; 2049 2050 fail: ural_stop(ifp, 1); 2051 return error; 2052 } 2053 2054 void 2055 ural_stop(struct ifnet *ifp, int disable) 2056 { 2057 struct ural_softc *sc = ifp->if_softc; 2058 struct ieee80211com *ic = &sc->sc_ic; 2059 2060 sc->sc_tx_timer = 0; 2061 ifp->if_timer = 0; 2062 ifp->if_flags &= ~IFF_RUNNING; 2063 ifq_clr_oactive(&ifp->if_snd); 2064 2065 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); /* free all nodes */ 2066 2067 /* disable Rx */ 2068 ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); 2069 2070 /* reset ASIC and BBP (but won't reset MAC registers!) */ 2071 ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); 2072 ural_write(sc, RAL_MAC_CSR1, 0); 2073 2074 if (sc->amrr_xfer != NULL) { 2075 usbd_free_xfer(sc->amrr_xfer); 2076 sc->amrr_xfer = NULL; 2077 } 2078 if (sc->sc_rx_pipeh != NULL) { 2079 usbd_abort_pipe(sc->sc_rx_pipeh); 2080 usbd_close_pipe(sc->sc_rx_pipeh); 2081 sc->sc_rx_pipeh = NULL; 2082 } 2083 if (sc->sc_tx_pipeh != NULL) { 2084 usbd_abort_pipe(sc->sc_tx_pipeh); 2085 usbd_close_pipe(sc->sc_tx_pipeh); 2086 sc->sc_tx_pipeh = NULL; 2087 } 2088 2089 ural_free_rx_list(sc); 2090 ural_free_tx_list(sc); 2091 } 2092 2093 void 2094 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2095 { 2096 /* start with lowest Tx rate */ 2097 ni->ni_txrate = 0; 2098 } 2099 2100 void 2101 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni) 2102 { 2103 int i; 2104 2105 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 2106 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 2107 2108 ieee80211_amrr_node_init(&sc->amrr, &sc->amn); 2109 2110 /* set rate to some reasonable initial value */ 2111 for (i = ni->ni_rates.rs_nrates - 1; 2112 i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72; 2113 i--); 2114 ni->ni_txrate = i; 2115 2116 if (!usbd_is_dying(sc->sc_udev)) 2117 timeout_add_sec(&sc->amrr_to, 1); 2118 } 2119 2120 void 2121 ural_amrr_timeout(void *arg) 2122 { 2123 struct ural_softc *sc = arg; 2124 usb_device_request_t req; 2125 int s; 2126 2127 if (usbd_is_dying(sc->sc_udev)) 2128 return; 2129 2130 usbd_ref_incr(sc->sc_udev); 2131 2132 s = splusb(); 2133 2134 /* 2135 * Asynchronously read statistic registers (cleared by read). 2136 */ 2137 req.bmRequestType = UT_READ_VENDOR_DEVICE; 2138 req.bRequest = RAL_READ_MULTI_MAC; 2139 USETW(req.wValue, 0); 2140 USETW(req.wIndex, RAL_STA_CSR0); 2141 USETW(req.wLength, sizeof sc->sta); 2142 2143 usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc, 2144 USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0, 2145 ural_amrr_update); 2146 (void)usbd_transfer(sc->amrr_xfer); 2147 2148 splx(s); 2149 2150 usbd_ref_decr(sc->sc_udev); 2151 } 2152 2153 void 2154 ural_amrr_update(struct usbd_xfer *xfer, void *priv, 2155 usbd_status status) 2156 { 2157 struct ural_softc *sc = (struct ural_softc *)priv; 2158 struct ifnet *ifp = &sc->sc_ic.ic_if; 2159 2160 if (status != USBD_NORMAL_COMPLETION) { 2161 printf("%s: could not retrieve Tx statistics - cancelling " 2162 "automatic rate control\n", sc->sc_dev.dv_xname); 2163 return; 2164 } 2165 2166 /* count TX retry-fail as Tx errors */ 2167 ifp->if_oerrors += letoh16(sc->sta[9]); 2168 2169 sc->amn.amn_retrycnt = 2170 letoh16(sc->sta[7]) + /* TX one-retry ok count */ 2171 letoh16(sc->sta[8]) + /* TX more-retry ok count */ 2172 letoh16(sc->sta[9]); /* TX retry-fail count */ 2173 2174 sc->amn.amn_txcnt = 2175 sc->amn.amn_retrycnt + 2176 letoh16(sc->sta[6]); /* TX no-retry ok count */ 2177 2178 ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn); 2179 2180 if (!usbd_is_dying(sc->sc_udev)) 2181 timeout_add_sec(&sc->amrr_to, 1); 2182 } 2183