1 /* $OpenBSD: if_ral.c,v 1.109 2008/10/15 19:12:18 blambert Exp $ */ 2 3 /*- 4 * Copyright (c) 2005, 2006 5 * Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Ralink Technology RT2500USB chipset driver 22 * http://www.ralinktech.com.tw/ 23 */ 24 25 #include "bpfilter.h" 26 27 #include <sys/param.h> 28 #include <sys/sockio.h> 29 #include <sys/sysctl.h> 30 #include <sys/mbuf.h> 31 #include <sys/kernel.h> 32 #include <sys/socket.h> 33 #include <sys/systm.h> 34 #include <sys/timeout.h> 35 #include <sys/conf.h> 36 #include <sys/device.h> 37 38 #include <machine/bus.h> 39 #include <machine/endian.h> 40 #include <machine/intr.h> 41 42 #if NBPFILTER > 0 43 #include <net/bpf.h> 44 #endif 45 #include <net/if.h> 46 #include <net/if_arp.h> 47 #include <net/if_dl.h> 48 #include <net/if_media.h> 49 #include <net/if_types.h> 50 51 #include <netinet/in.h> 52 #include <netinet/in_systm.h> 53 #include <netinet/in_var.h> 54 #include <netinet/if_ether.h> 55 #include <netinet/ip.h> 56 57 #include <net80211/ieee80211_var.h> 58 #include <net80211/ieee80211_amrr.h> 59 #include <net80211/ieee80211_radiotap.h> 60 61 #include <dev/usb/usb.h> 62 #include <dev/usb/usbdi.h> 63 #include <dev/usb/usbdi_util.h> 64 #include <dev/usb/usbdevs.h> 65 66 #include <dev/usb/if_ralreg.h> 67 #include <dev/usb/if_ralvar.h> 68 69 #ifdef USB_DEBUG 70 #define URAL_DEBUG 71 #endif 72 73 #ifdef URAL_DEBUG 74 #define DPRINTF(x) do { if (ural_debug) printf x; } while (0) 75 #define DPRINTFN(n, x) do { if (ural_debug >= (n)) printf x; } while (0) 76 int ural_debug = 0; 77 #else 78 #define DPRINTF(x) 79 #define DPRINTFN(n, x) 80 #endif 81 82 /* various supported device vendors/products */ 83 static const struct usb_devno ural_devs[] = { 84 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570 }, 85 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570_2 }, 86 { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050 }, 87 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G }, 88 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP }, 89 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS }, 90 { USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU }, 91 { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_RT2570 }, 92 { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG }, 93 { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254 }, 94 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54 }, 95 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI }, 96 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB }, 97 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI }, 98 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570 }, 99 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2 }, 100 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3 }, 101 { USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902W }, 102 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570 }, 103 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2 }, 104 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3 }, 105 { USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R }, 106 { USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570 }, 107 { USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570 }, 108 { USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570 } 109 }; 110 111 int ural_alloc_tx_list(struct ural_softc *); 112 void ural_free_tx_list(struct ural_softc *); 113 int ural_alloc_rx_list(struct ural_softc *); 114 void ural_free_rx_list(struct ural_softc *); 115 int ural_media_change(struct ifnet *); 116 void ural_next_scan(void *); 117 void ural_task(void *); 118 int ural_newstate(struct ieee80211com *, enum ieee80211_state, 119 int); 120 void ural_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); 121 void ural_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); 122 #if NBPFILTER > 0 123 uint8_t ural_rxrate(const struct ural_rx_desc *); 124 #endif 125 int ural_ack_rate(struct ieee80211com *, int); 126 uint16_t ural_txtime(int, int, uint32_t); 127 uint8_t ural_plcp_signal(int); 128 void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, 129 uint32_t, int, int); 130 #ifndef IEEE80211_STA_ONLY 131 int ural_tx_bcn(struct ural_softc *, struct mbuf *, 132 struct ieee80211_node *); 133 #endif 134 int ural_tx_data(struct ural_softc *, struct mbuf *, 135 struct ieee80211_node *); 136 void ural_start(struct ifnet *); 137 void ural_watchdog(struct ifnet *); 138 int ural_ioctl(struct ifnet *, u_long, caddr_t); 139 void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); 140 uint16_t ural_read(struct ural_softc *, uint16_t); 141 void ural_read_multi(struct ural_softc *, uint16_t, void *, int); 142 void ural_write(struct ural_softc *, uint16_t, uint16_t); 143 void ural_write_multi(struct ural_softc *, uint16_t, void *, int); 144 void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); 145 uint8_t ural_bbp_read(struct ural_softc *, uint8_t); 146 void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); 147 void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); 148 void ural_disable_rf_tune(struct ural_softc *); 149 void ural_enable_tsf_sync(struct ural_softc *); 150 void ural_update_slot(struct ural_softc *); 151 void ural_set_txpreamble(struct ural_softc *); 152 void ural_set_basicrates(struct ural_softc *); 153 void ural_set_bssid(struct ural_softc *, const uint8_t *); 154 void ural_set_macaddr(struct ural_softc *, const uint8_t *); 155 void ural_update_promisc(struct ural_softc *); 156 const char *ural_get_rf(int); 157 void ural_read_eeprom(struct ural_softc *); 158 int ural_bbp_init(struct ural_softc *); 159 void ural_set_txantenna(struct ural_softc *, int); 160 void ural_set_rxantenna(struct ural_softc *, int); 161 int ural_init(struct ifnet *); 162 void ural_stop(struct ifnet *, int); 163 void ural_newassoc(struct ieee80211com *, struct ieee80211_node *, 164 int); 165 void ural_amrr_start(struct ural_softc *, struct ieee80211_node *); 166 void ural_amrr_timeout(void *); 167 void ural_amrr_update(usbd_xfer_handle, usbd_private_handle, 168 usbd_status status); 169 170 static const struct { 171 uint16_t reg; 172 uint16_t val; 173 } ural_def_mac[] = { 174 RAL_DEF_MAC 175 }; 176 177 static const struct { 178 uint8_t reg; 179 uint8_t val; 180 } ural_def_bbp[] = { 181 RAL_DEF_BBP 182 }; 183 184 static const uint32_t ural_rf2522_r2[] = RAL_RF2522_R2; 185 static const uint32_t ural_rf2523_r2[] = RAL_RF2523_R2; 186 static const uint32_t ural_rf2524_r2[] = RAL_RF2524_R2; 187 static const uint32_t ural_rf2525_r2[] = RAL_RF2525_R2; 188 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2; 189 static const uint32_t ural_rf2525e_r2[] = RAL_RF2525E_R2; 190 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2; 191 static const uint32_t ural_rf2526_r2[] = RAL_RF2526_R2; 192 193 int ural_match(struct device *, void *, void *); 194 void ural_attach(struct device *, struct device *, void *); 195 int ural_detach(struct device *, int); 196 int ural_activate(struct device *, enum devact); 197 198 struct cfdriver ural_cd = { 199 NULL, "ural", DV_IFNET 200 }; 201 202 const struct cfattach ural_ca = { 203 sizeof(struct ural_softc), 204 ural_match, 205 ural_attach, 206 ural_detach, 207 ural_activate, 208 }; 209 210 int 211 ural_match(struct device *parent, void *match, void *aux) 212 { 213 struct usb_attach_arg *uaa = aux; 214 215 if (uaa->iface != NULL) 216 return UMATCH_NONE; 217 218 return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ? 219 UMATCH_VENDOR_PRODUCT : UMATCH_NONE; 220 } 221 222 void 223 ural_attach(struct device *parent, struct device *self, void *aux) 224 { 225 struct ural_softc *sc = (struct ural_softc *)self; 226 struct usb_attach_arg *uaa = aux; 227 struct ieee80211com *ic = &sc->sc_ic; 228 struct ifnet *ifp = &ic->ic_if; 229 usb_interface_descriptor_t *id; 230 usb_endpoint_descriptor_t *ed; 231 usbd_status error; 232 int i; 233 234 sc->sc_udev = uaa->device; 235 236 if (usbd_set_config_no(sc->sc_udev, RAL_CONFIG_NO, 0) != 0) { 237 printf("%s: could not set configuration no\n", 238 sc->sc_dev.dv_xname); 239 return; 240 } 241 242 /* get the first interface handle */ 243 error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX, 244 &sc->sc_iface); 245 if (error != 0) { 246 printf("%s: could not get interface handle\n", 247 sc->sc_dev.dv_xname); 248 return; 249 } 250 251 /* 252 * Find endpoints. 253 */ 254 id = usbd_get_interface_descriptor(sc->sc_iface); 255 256 sc->sc_rx_no = sc->sc_tx_no = -1; 257 for (i = 0; i < id->bNumEndpoints; i++) { 258 ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); 259 if (ed == NULL) { 260 printf("%s: no endpoint descriptor for iface %d\n", 261 sc->sc_dev.dv_xname, i); 262 return; 263 } 264 265 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && 266 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) 267 sc->sc_rx_no = ed->bEndpointAddress; 268 else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && 269 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) 270 sc->sc_tx_no = ed->bEndpointAddress; 271 } 272 if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) { 273 printf("%s: missing endpoint\n", sc->sc_dev.dv_xname); 274 return; 275 } 276 277 usb_init_task(&sc->sc_task, ural_task, sc); 278 timeout_set(&sc->scan_to, ural_next_scan, sc); 279 280 sc->amrr.amrr_min_success_threshold = 1; 281 sc->amrr.amrr_max_success_threshold = 10; 282 timeout_set(&sc->amrr_to, ural_amrr_timeout, sc); 283 284 /* retrieve RT2570 rev. no */ 285 sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); 286 287 /* retrieve MAC address and various other things from EEPROM */ 288 ural_read_eeprom(sc); 289 290 printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n", 291 sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev, 292 ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr)); 293 294 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 295 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 296 ic->ic_state = IEEE80211_S_INIT; 297 298 /* set device capabilities */ 299 ic->ic_caps = 300 IEEE80211_C_MONITOR | /* monitor mode supported */ 301 #ifndef IEEE80211_STA_ONLY 302 IEEE80211_C_IBSS | /* IBSS mode supported */ 303 IEEE80211_C_HOSTAP | /* HostAp mode supported */ 304 #endif 305 IEEE80211_C_TXPMGT | /* tx power management */ 306 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 307 IEEE80211_C_SHSLOT | /* short slot time supported */ 308 IEEE80211_C_WEP | /* s/w WEP */ 309 IEEE80211_C_RSN; /* WPA/RSN */ 310 311 /* set supported .11b and .11g rates */ 312 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 313 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 314 315 /* set supported .11b and .11g channels (1 through 14) */ 316 for (i = 1; i <= 14; i++) { 317 ic->ic_channels[i].ic_freq = 318 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 319 ic->ic_channels[i].ic_flags = 320 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 321 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 322 } 323 324 ifp->if_softc = sc; 325 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 326 ifp->if_init = ural_init; 327 ifp->if_ioctl = ural_ioctl; 328 ifp->if_start = ural_start; 329 ifp->if_watchdog = ural_watchdog; 330 IFQ_SET_READY(&ifp->if_snd); 331 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 332 333 if_attach(ifp); 334 ieee80211_ifattach(ifp); 335 ic->ic_newassoc = ural_newassoc; 336 337 /* override state transition machine */ 338 sc->sc_newstate = ic->ic_newstate; 339 ic->ic_newstate = ural_newstate; 340 ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status); 341 342 #if NBPFILTER > 0 343 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 344 sizeof (struct ieee80211_frame) + 64); 345 346 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 347 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 348 sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT); 349 350 sc->sc_txtap_len = sizeof sc->sc_txtapu; 351 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 352 sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT); 353 #endif 354 355 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, 356 &sc->sc_dev); 357 } 358 359 int 360 ural_detach(struct device *self, int flags) 361 { 362 struct ural_softc *sc = (struct ural_softc *)self; 363 struct ifnet *ifp = &sc->sc_ic.ic_if; 364 int s; 365 366 s = splusb(); 367 368 ieee80211_ifdetach(ifp); /* free all nodes */ 369 if_detach(ifp); 370 371 usb_rem_task(sc->sc_udev, &sc->sc_task); 372 timeout_del(&sc->scan_to); 373 timeout_del(&sc->amrr_to); 374 375 if (sc->amrr_xfer != NULL) { 376 usbd_free_xfer(sc->amrr_xfer); 377 sc->amrr_xfer = NULL; 378 } 379 380 if (sc->sc_rx_pipeh != NULL) { 381 usbd_abort_pipe(sc->sc_rx_pipeh); 382 usbd_close_pipe(sc->sc_rx_pipeh); 383 } 384 385 if (sc->sc_tx_pipeh != NULL) { 386 usbd_abort_pipe(sc->sc_tx_pipeh); 387 usbd_close_pipe(sc->sc_tx_pipeh); 388 } 389 390 ural_free_rx_list(sc); 391 ural_free_tx_list(sc); 392 393 splx(s); 394 395 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, 396 &sc->sc_dev); 397 398 return 0; 399 } 400 401 int 402 ural_alloc_tx_list(struct ural_softc *sc) 403 { 404 int i, error; 405 406 sc->tx_cur = sc->tx_queued = 0; 407 408 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 409 struct ural_tx_data *data = &sc->tx_data[i]; 410 411 data->sc = sc; 412 413 data->xfer = usbd_alloc_xfer(sc->sc_udev); 414 if (data->xfer == NULL) { 415 printf("%s: could not allocate tx xfer\n", 416 sc->sc_dev.dv_xname); 417 error = ENOMEM; 418 goto fail; 419 } 420 data->buf = usbd_alloc_buffer(data->xfer, 421 RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN); 422 if (data->buf == NULL) { 423 printf("%s: could not allocate tx buffer\n", 424 sc->sc_dev.dv_xname); 425 error = ENOMEM; 426 goto fail; 427 } 428 } 429 430 return 0; 431 432 fail: ural_free_tx_list(sc); 433 return error; 434 } 435 436 void 437 ural_free_tx_list(struct ural_softc *sc) 438 { 439 int i; 440 441 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 442 struct ural_tx_data *data = &sc->tx_data[i]; 443 444 if (data->xfer != NULL) { 445 usbd_free_xfer(data->xfer); 446 data->xfer = NULL; 447 } 448 /* 449 * The node has already been freed at that point so don't call 450 * ieee80211_release_node() here. 451 */ 452 data->ni = NULL; 453 } 454 } 455 456 int 457 ural_alloc_rx_list(struct ural_softc *sc) 458 { 459 int i, error; 460 461 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 462 struct ural_rx_data *data = &sc->rx_data[i]; 463 464 data->sc = sc; 465 466 data->xfer = usbd_alloc_xfer(sc->sc_udev); 467 if (data->xfer == NULL) { 468 printf("%s: could not allocate rx xfer\n", 469 sc->sc_dev.dv_xname); 470 error = ENOMEM; 471 goto fail; 472 } 473 if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) { 474 printf("%s: could not allocate rx buffer\n", 475 sc->sc_dev.dv_xname); 476 error = ENOMEM; 477 goto fail; 478 } 479 480 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 481 if (data->m == NULL) { 482 printf("%s: could not allocate rx mbuf\n", 483 sc->sc_dev.dv_xname); 484 error = ENOMEM; 485 goto fail; 486 } 487 MCLGET(data->m, M_DONTWAIT); 488 if (!(data->m->m_flags & M_EXT)) { 489 printf("%s: could not allocate rx mbuf cluster\n", 490 sc->sc_dev.dv_xname); 491 error = ENOMEM; 492 goto fail; 493 } 494 data->buf = mtod(data->m, uint8_t *); 495 } 496 497 return 0; 498 499 fail: ural_free_rx_list(sc); 500 return error; 501 } 502 503 void 504 ural_free_rx_list(struct ural_softc *sc) 505 { 506 int i; 507 508 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 509 struct ural_rx_data *data = &sc->rx_data[i]; 510 511 if (data->xfer != NULL) { 512 usbd_free_xfer(data->xfer); 513 data->xfer = NULL; 514 } 515 if (data->m != NULL) { 516 m_freem(data->m); 517 data->m = NULL; 518 } 519 } 520 } 521 522 int 523 ural_media_change(struct ifnet *ifp) 524 { 525 int error; 526 527 error = ieee80211_media_change(ifp); 528 if (error != ENETRESET) 529 return error; 530 531 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)) 532 ural_init(ifp); 533 534 return 0; 535 } 536 537 /* 538 * This function is called periodically (every 200ms) during scanning to 539 * switch from one channel to another. 540 */ 541 void 542 ural_next_scan(void *arg) 543 { 544 struct ural_softc *sc = arg; 545 struct ieee80211com *ic = &sc->sc_ic; 546 struct ifnet *ifp = &ic->ic_if; 547 548 if (ic->ic_state == IEEE80211_S_SCAN) 549 ieee80211_next_scan(ifp); 550 } 551 552 void 553 ural_task(void *arg) 554 { 555 struct ural_softc *sc = arg; 556 struct ieee80211com *ic = &sc->sc_ic; 557 enum ieee80211_state ostate; 558 struct ieee80211_node *ni; 559 560 ostate = ic->ic_state; 561 562 switch (sc->sc_state) { 563 case IEEE80211_S_INIT: 564 if (ostate == IEEE80211_S_RUN) { 565 /* abort TSF synchronization */ 566 ural_write(sc, RAL_TXRX_CSR19, 0); 567 568 /* force tx led to stop blinking */ 569 ural_write(sc, RAL_MAC_CSR20, 0); 570 } 571 break; 572 573 case IEEE80211_S_SCAN: 574 ural_set_chan(sc, ic->ic_bss->ni_chan); 575 timeout_add(&sc->scan_to, hz / 5); 576 break; 577 578 case IEEE80211_S_AUTH: 579 ural_set_chan(sc, ic->ic_bss->ni_chan); 580 break; 581 582 case IEEE80211_S_ASSOC: 583 ural_set_chan(sc, ic->ic_bss->ni_chan); 584 break; 585 586 case IEEE80211_S_RUN: 587 ural_set_chan(sc, ic->ic_bss->ni_chan); 588 589 ni = ic->ic_bss; 590 591 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 592 ural_update_slot(sc); 593 ural_set_txpreamble(sc); 594 ural_set_basicrates(sc); 595 ural_set_bssid(sc, ni->ni_bssid); 596 } 597 598 #ifndef IEEE80211_STA_ONLY 599 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 600 ic->ic_opmode == IEEE80211_M_IBSS) { 601 struct mbuf *m = ieee80211_beacon_alloc(ic, ni); 602 if (m == NULL) { 603 printf("%s: could not allocate beacon\n", 604 sc->sc_dev.dv_xname); 605 return; 606 } 607 608 if (ural_tx_bcn(sc, m, ni) != 0) { 609 m_freem(m); 610 printf("%s: could not transmit beacon\n", 611 sc->sc_dev.dv_xname); 612 return; 613 } 614 615 /* beacon is no longer needed */ 616 m_freem(m); 617 } 618 #endif 619 620 /* make tx led blink on tx (controlled by ASIC) */ 621 ural_write(sc, RAL_MAC_CSR20, 1); 622 623 if (ic->ic_opmode != IEEE80211_M_MONITOR) 624 ural_enable_tsf_sync(sc); 625 626 if (ic->ic_opmode == IEEE80211_M_STA) { 627 /* fake a join to init the tx rate */ 628 ural_newassoc(ic, ic->ic_bss, 1); 629 630 /* enable automatic rate control in STA mode */ 631 if (ic->ic_fixed_rate == -1) 632 ural_amrr_start(sc, ic->ic_bss); 633 } 634 635 break; 636 } 637 638 sc->sc_newstate(ic, sc->sc_state, sc->sc_arg); 639 } 640 641 int 642 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 643 { 644 struct ural_softc *sc = ic->ic_if.if_softc; 645 646 usb_rem_task(sc->sc_udev, &sc->sc_task); 647 timeout_del(&sc->scan_to); 648 timeout_del(&sc->amrr_to); 649 650 /* do it in a process context */ 651 sc->sc_state = nstate; 652 sc->sc_arg = arg; 653 usb_add_task(sc->sc_udev, &sc->sc_task); 654 return 0; 655 } 656 657 /* quickly determine if a given rate is CCK or OFDM */ 658 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 659 660 #define RAL_ACK_SIZE 14 /* 10 + 4(FCS) */ 661 #define RAL_CTS_SIZE 14 /* 10 + 4(FCS) */ 662 663 #define RAL_SIFS 10 /* us */ 664 665 #define RAL_RXTX_TURNAROUND 5 /* us */ 666 667 void 668 ural_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) 669 { 670 struct ural_tx_data *data = priv; 671 struct ural_softc *sc = data->sc; 672 struct ieee80211com *ic = &sc->sc_ic; 673 struct ifnet *ifp = &ic->ic_if; 674 int s; 675 676 if (status != USBD_NORMAL_COMPLETION) { 677 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) 678 return; 679 680 printf("%s: could not transmit buffer: %s\n", 681 sc->sc_dev.dv_xname, usbd_errstr(status)); 682 683 if (status == USBD_STALLED) 684 usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh); 685 686 ifp->if_oerrors++; 687 return; 688 } 689 690 s = splnet(); 691 692 ieee80211_release_node(ic, data->ni); 693 data->ni = NULL; 694 695 sc->tx_queued--; 696 ifp->if_opackets++; 697 698 DPRINTFN(10, ("tx done\n")); 699 700 sc->sc_tx_timer = 0; 701 ifp->if_flags &= ~IFF_OACTIVE; 702 ural_start(ifp); 703 704 splx(s); 705 } 706 707 void 708 ural_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) 709 { 710 struct ural_rx_data *data = priv; 711 struct ural_softc *sc = data->sc; 712 struct ieee80211com *ic = &sc->sc_ic; 713 struct ifnet *ifp = &ic->ic_if; 714 const struct ural_rx_desc *desc; 715 struct ieee80211_frame *wh; 716 struct ieee80211_rxinfo rxi; 717 struct ieee80211_node *ni; 718 struct mbuf *mnew, *m; 719 int s, len; 720 721 if (status != USBD_NORMAL_COMPLETION) { 722 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) 723 return; 724 725 if (status == USBD_STALLED) 726 usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh); 727 goto skip; 728 } 729 730 usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL); 731 732 if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) { 733 DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname, 734 len)); 735 ifp->if_ierrors++; 736 goto skip; 737 } 738 739 /* rx descriptor is located at the end */ 740 desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE); 741 742 if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { 743 /* 744 * This should not happen since we did not request to receive 745 * those frames when we filled RAL_TXRX_CSR2. 746 */ 747 DPRINTFN(5, ("PHY or CRC error\n")); 748 ifp->if_ierrors++; 749 goto skip; 750 } 751 752 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 753 if (mnew == NULL) { 754 printf("%s: could not allocate rx mbuf\n", 755 sc->sc_dev.dv_xname); 756 ifp->if_ierrors++; 757 goto skip; 758 } 759 MCLGET(mnew, M_DONTWAIT); 760 if (!(mnew->m_flags & M_EXT)) { 761 printf("%s: could not allocate rx mbuf cluster\n", 762 sc->sc_dev.dv_xname); 763 m_freem(mnew); 764 ifp->if_ierrors++; 765 goto skip; 766 } 767 m = data->m; 768 data->m = mnew; 769 data->buf = mtod(data->m, uint8_t *); 770 771 /* finalize mbuf */ 772 m->m_pkthdr.rcvif = ifp; 773 m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff; 774 775 s = splnet(); 776 777 #if NBPFILTER > 0 778 if (sc->sc_drvbpf != NULL) { 779 struct mbuf mb; 780 struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; 781 782 tap->wr_flags = IEEE80211_RADIOTAP_F_FCS; 783 tap->wr_rate = ural_rxrate(desc); 784 tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 785 tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 786 tap->wr_antenna = sc->rx_ant; 787 tap->wr_antsignal = desc->rssi; 788 789 mb.m_data = (caddr_t)tap; 790 mb.m_len = sc->sc_rxtap_len; 791 mb.m_next = m; 792 mb.m_nextpkt = NULL; 793 mb.m_type = 0; 794 mb.m_flags = 0; 795 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 796 } 797 #endif 798 m_adj(m, -IEEE80211_CRC_LEN); /* trim FCS */ 799 800 wh = mtod(m, struct ieee80211_frame *); 801 ni = ieee80211_find_rxnode(ic, wh); 802 803 /* send the frame to the 802.11 layer */ 804 rxi.rxi_flags = 0; 805 rxi.rxi_rssi = desc->rssi; 806 rxi.rxi_tstamp = 0; /* unused */ 807 ieee80211_input(ifp, m, ni, &rxi); 808 809 /* node is no longer needed */ 810 ieee80211_release_node(ic, ni); 811 812 splx(s); 813 814 DPRINTFN(15, ("rx done\n")); 815 816 skip: /* setup a new transfer */ 817 usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, 818 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); 819 (void)usbd_transfer(xfer); 820 } 821 822 /* 823 * This function is only used by the Rx radiotap code. It returns the rate at 824 * which a given frame was received. 825 */ 826 #if NBPFILTER > 0 827 uint8_t 828 ural_rxrate(const struct ural_rx_desc *desc) 829 { 830 if (letoh32(desc->flags) & RAL_RX_OFDM) { 831 /* reverse function of ural_plcp_signal */ 832 switch (desc->rate) { 833 case 0xb: return 12; 834 case 0xf: return 18; 835 case 0xa: return 24; 836 case 0xe: return 36; 837 case 0x9: return 48; 838 case 0xd: return 72; 839 case 0x8: return 96; 840 case 0xc: return 108; 841 } 842 } else { 843 if (desc->rate == 10) 844 return 2; 845 if (desc->rate == 20) 846 return 4; 847 if (desc->rate == 55) 848 return 11; 849 if (desc->rate == 110) 850 return 22; 851 } 852 return 2; /* should not get there */ 853 } 854 #endif 855 856 /* 857 * Return the expected ack rate for a frame transmitted at rate `rate'. 858 */ 859 int 860 ural_ack_rate(struct ieee80211com *ic, int rate) 861 { 862 switch (rate) { 863 /* CCK rates */ 864 case 2: 865 return 2; 866 case 4: 867 case 11: 868 case 22: 869 return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate; 870 871 /* OFDM rates */ 872 case 12: 873 case 18: 874 return 12; 875 case 24: 876 case 36: 877 return 24; 878 case 48: 879 case 72: 880 case 96: 881 case 108: 882 return 48; 883 } 884 885 /* default to 1Mbps */ 886 return 2; 887 } 888 889 /* 890 * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'. 891 * The function automatically determines the operating mode depending on the 892 * given rate. `flags' indicates whether short preamble is in use or not. 893 */ 894 uint16_t 895 ural_txtime(int len, int rate, uint32_t flags) 896 { 897 uint16_t txtime; 898 899 if (RAL_RATE_IS_OFDM(rate)) { 900 /* IEEE Std 802.11g-2003, pp. 44 */ 901 txtime = (8 + 4 * len + 3 + rate - 1) / rate; 902 txtime = 16 + 4 + 4 * txtime + 6; 903 } else { 904 /* IEEE Std 802.11b-1999, pp. 28 */ 905 txtime = (16 * len + rate - 1) / rate; 906 if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE)) 907 txtime += 72 + 24; 908 else 909 txtime += 144 + 48; 910 } 911 return txtime; 912 } 913 914 uint8_t 915 ural_plcp_signal(int rate) 916 { 917 switch (rate) { 918 /* CCK rates (returned values are device-dependent) */ 919 case 2: return 0x0; 920 case 4: return 0x1; 921 case 11: return 0x2; 922 case 22: return 0x3; 923 924 /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ 925 case 12: return 0xb; 926 case 18: return 0xf; 927 case 24: return 0xa; 928 case 36: return 0xe; 929 case 48: return 0x9; 930 case 72: return 0xd; 931 case 96: return 0x8; 932 case 108: return 0xc; 933 934 /* unsupported rates (should not get there) */ 935 default: return 0xff; 936 } 937 } 938 939 void 940 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, 941 uint32_t flags, int len, int rate) 942 { 943 struct ieee80211com *ic = &sc->sc_ic; 944 uint16_t plcp_length; 945 int remainder; 946 947 desc->flags = htole32(flags); 948 desc->flags |= htole32(len << 16); 949 950 desc->wme = htole16( 951 RAL_AIFSN(2) | 952 RAL_LOGCWMIN(3) | 953 RAL_LOGCWMAX(5)); 954 955 /* setup PLCP fields */ 956 desc->plcp_signal = ural_plcp_signal(rate); 957 desc->plcp_service = 4; 958 959 len += IEEE80211_CRC_LEN; 960 if (RAL_RATE_IS_OFDM(rate)) { 961 desc->flags |= htole32(RAL_TX_OFDM); 962 963 plcp_length = len & 0xfff; 964 desc->plcp_length_hi = plcp_length >> 6; 965 desc->plcp_length_lo = plcp_length & 0x3f; 966 } else { 967 plcp_length = (16 * len + rate - 1) / rate; 968 if (rate == 22) { 969 remainder = (16 * len) % 22; 970 if (remainder != 0 && remainder < 7) 971 desc->plcp_service |= RAL_PLCP_LENGEXT; 972 } 973 desc->plcp_length_hi = plcp_length >> 8; 974 desc->plcp_length_lo = plcp_length & 0xff; 975 976 if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) 977 desc->plcp_signal |= 0x08; 978 } 979 980 desc->iv = 0; 981 desc->eiv = 0; 982 } 983 984 #define RAL_TX_TIMEOUT 5000 985 986 #ifndef IEEE80211_STA_ONLY 987 int 988 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 989 { 990 struct ural_tx_desc *desc; 991 usbd_xfer_handle xfer; 992 usbd_status error; 993 uint8_t cmd = 0; 994 uint8_t *buf; 995 int xferlen, rate = 2; 996 997 xfer = usbd_alloc_xfer(sc->sc_udev); 998 if (xfer == NULL) 999 return ENOMEM; 1000 1001 /* xfer length needs to be a multiple of two! */ 1002 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; 1003 1004 buf = usbd_alloc_buffer(xfer, xferlen); 1005 if (buf == NULL) { 1006 usbd_free_xfer(xfer); 1007 return ENOMEM; 1008 } 1009 1010 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd, 1011 USBD_FORCE_SHORT_XFER, RAL_TX_TIMEOUT, NULL); 1012 1013 error = usbd_sync_transfer(xfer); 1014 if (error != 0) { 1015 usbd_free_xfer(xfer); 1016 return error; 1017 } 1018 1019 desc = (struct ural_tx_desc *)buf; 1020 1021 m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE); 1022 ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, 1023 m0->m_pkthdr.len, rate); 1024 1025 DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n", 1026 m0->m_pkthdr.len, rate, xferlen)); 1027 1028 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen, 1029 USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, NULL); 1030 1031 error = usbd_sync_transfer(xfer); 1032 usbd_free_xfer(xfer); 1033 1034 return error; 1035 } 1036 #endif 1037 1038 int 1039 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 1040 { 1041 struct ieee80211com *ic = &sc->sc_ic; 1042 struct ural_tx_desc *desc; 1043 struct ural_tx_data *data; 1044 struct ieee80211_frame *wh; 1045 struct ieee80211_key *k; 1046 uint32_t flags = RAL_TX_NEWSEQ; 1047 uint16_t dur; 1048 usbd_status error; 1049 int rate, xferlen, pktlen, needrts = 0, needcts = 0; 1050 1051 wh = mtod(m0, struct ieee80211_frame *); 1052 1053 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 1054 k = ieee80211_get_txkey(ic, wh, ni); 1055 1056 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 1057 return ENOBUFS; 1058 1059 /* packet header may have moved, reset our local pointer */ 1060 wh = mtod(m0, struct ieee80211_frame *); 1061 } 1062 1063 /* compute actual packet length (including CRC and crypto overhead) */ 1064 pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; 1065 1066 /* pickup a rate */ 1067 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 1068 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1069 IEEE80211_FC0_TYPE_MGT)) { 1070 /* mgmt/multicast frames are sent at the lowest avail. rate */ 1071 rate = ni->ni_rates.rs_rates[0]; 1072 } else if (ic->ic_fixed_rate != -1) { 1073 rate = ic->ic_sup_rates[ic->ic_curmode]. 1074 rs_rates[ic->ic_fixed_rate]; 1075 } else 1076 rate = ni->ni_rates.rs_rates[ni->ni_txrate]; 1077 if (rate == 0) 1078 rate = 2; /* XXX should not happen */ 1079 rate &= IEEE80211_RATE_VAL; 1080 1081 /* check if RTS/CTS or CTS-to-self protection must be used */ 1082 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1083 /* multicast frames are not sent at OFDM rates in 802.11b/g */ 1084 if (pktlen > ic->ic_rtsthreshold) { 1085 needrts = 1; /* RTS/CTS based on frame length */ 1086 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1087 RAL_RATE_IS_OFDM(rate)) { 1088 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 1089 needcts = 1; /* CTS-to-self */ 1090 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 1091 needrts = 1; /* RTS/CTS */ 1092 } 1093 } 1094 if (needrts || needcts) { 1095 struct mbuf *mprot; 1096 int protrate, ackrate; 1097 uint16_t dur; 1098 1099 protrate = 2; 1100 ackrate = ural_ack_rate(ic, rate); 1101 1102 dur = ural_txtime(pktlen, rate, ic->ic_flags) + 1103 ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) + 1104 2 * RAL_SIFS; 1105 if (needrts) { 1106 dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic, 1107 protrate), ic->ic_flags) + RAL_SIFS; 1108 mprot = ieee80211_get_rts(ic, wh, dur); 1109 } else { 1110 mprot = ieee80211_get_cts_to_self(ic, dur); 1111 } 1112 if (mprot == NULL) { 1113 printf("%s: could not allocate protection frame\n", 1114 sc->sc_dev.dv_xname); 1115 m_freem(m0); 1116 return ENOBUFS; 1117 } 1118 1119 data = &sc->tx_data[sc->tx_cur]; 1120 desc = (struct ural_tx_desc *)data->buf; 1121 1122 /* avoid multiple free() of the same node for each fragment */ 1123 data->ni = ieee80211_ref_node(ni); 1124 1125 m_copydata(mprot, 0, mprot->m_pkthdr.len, 1126 data->buf + RAL_TX_DESC_SIZE); 1127 ural_setup_tx_desc(sc, desc, 1128 (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7), 1129 mprot->m_pkthdr.len, protrate); 1130 1131 /* no roundup necessary here */ 1132 xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len; 1133 1134 /* XXX may want to pass the protection frame to BPF */ 1135 1136 /* mbuf is no longer needed */ 1137 m_freem(mprot); 1138 1139 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, 1140 xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, 1141 RAL_TX_TIMEOUT, ural_txeof); 1142 error = usbd_transfer(data->xfer); 1143 if (error != 0 && error != USBD_IN_PROGRESS) { 1144 m_freem(m0); 1145 return error; 1146 } 1147 1148 sc->tx_queued++; 1149 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; 1150 1151 flags |= RAL_TX_IFS_SIFS; 1152 } 1153 1154 data = &sc->tx_data[sc->tx_cur]; 1155 desc = (struct ural_tx_desc *)data->buf; 1156 1157 data->ni = ni; 1158 1159 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1160 flags |= RAL_TX_NEED_ACK; 1161 flags |= RAL_TX_RETRY(7); 1162 1163 dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate), 1164 ic->ic_flags) + RAL_SIFS; 1165 *(uint16_t *)wh->i_dur = htole16(dur); 1166 1167 #ifndef IEEE80211_STA_ONLY 1168 /* tell hardware to set timestamp in probe responses */ 1169 if ((wh->i_fc[0] & 1170 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 1171 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) 1172 flags |= RAL_TX_TIMESTAMP; 1173 #endif 1174 } 1175 1176 #if NBPFILTER > 0 1177 if (sc->sc_drvbpf != NULL) { 1178 struct mbuf mb; 1179 struct ural_tx_radiotap_header *tap = &sc->sc_txtap; 1180 1181 tap->wt_flags = 0; 1182 tap->wt_rate = rate; 1183 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1184 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1185 tap->wt_antenna = sc->tx_ant; 1186 1187 mb.m_data = (caddr_t)tap; 1188 mb.m_len = sc->sc_txtap_len; 1189 mb.m_next = m0; 1190 mb.m_nextpkt = NULL; 1191 mb.m_type = 0; 1192 mb.m_flags = 0; 1193 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1194 } 1195 #endif 1196 1197 m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); 1198 ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate); 1199 1200 /* align end on a 2-bytes boundary */ 1201 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; 1202 1203 /* 1204 * No space left in the last URB to store the extra 2 bytes, force 1205 * sending of another URB. 1206 */ 1207 if ((xferlen % 64) == 0) 1208 xferlen += 2; 1209 1210 DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n", 1211 m0->m_pkthdr.len, rate, xferlen)); 1212 1213 /* mbuf is no longer needed */ 1214 m_freem(m0); 1215 1216 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, 1217 USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); 1218 error = usbd_transfer(data->xfer); 1219 if (error != 0 && error != USBD_IN_PROGRESS) 1220 return error; 1221 1222 sc->tx_queued++; 1223 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; 1224 1225 return 0; 1226 } 1227 1228 void 1229 ural_start(struct ifnet *ifp) 1230 { 1231 struct ural_softc *sc = ifp->if_softc; 1232 struct ieee80211com *ic = &sc->sc_ic; 1233 struct ieee80211_node *ni; 1234 struct mbuf *m0; 1235 1236 /* 1237 * net80211 may still try to send management frames even if the 1238 * IFF_RUNNING flag is not set... 1239 */ 1240 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1241 return; 1242 1243 for (;;) { 1244 IF_POLL(&ic->ic_mgtq, m0); 1245 if (m0 != NULL) { 1246 if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) { 1247 ifp->if_flags |= IFF_OACTIVE; 1248 break; 1249 } 1250 IF_DEQUEUE(&ic->ic_mgtq, m0); 1251 1252 ni = (struct ieee80211_node *)m0->m_pkthdr.rcvif; 1253 m0->m_pkthdr.rcvif = NULL; 1254 #if NBPFILTER > 0 1255 if (ic->ic_rawbpf != NULL) 1256 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 1257 #endif 1258 if (ural_tx_data(sc, m0, ni) != 0) 1259 break; 1260 1261 } else { 1262 if (ic->ic_state != IEEE80211_S_RUN) 1263 break; 1264 IFQ_POLL(&ifp->if_snd, m0); 1265 if (m0 == NULL) 1266 break; 1267 if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) { 1268 ifp->if_flags |= IFF_OACTIVE; 1269 break; 1270 } 1271 IFQ_DEQUEUE(&ifp->if_snd, m0); 1272 #if NBPFILTER > 0 1273 if (ifp->if_bpf != NULL) 1274 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1275 #endif 1276 m0 = ieee80211_encap(ifp, m0, &ni); 1277 if (m0 == NULL) 1278 continue; 1279 #if NBPFILTER > 0 1280 if (ic->ic_rawbpf != NULL) 1281 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 1282 #endif 1283 if (ural_tx_data(sc, m0, ni) != 0) { 1284 if (ni != NULL) 1285 ieee80211_release_node(ic, ni); 1286 ifp->if_oerrors++; 1287 break; 1288 } 1289 } 1290 1291 sc->sc_tx_timer = 5; 1292 ifp->if_timer = 1; 1293 } 1294 } 1295 1296 void 1297 ural_watchdog(struct ifnet *ifp) 1298 { 1299 struct ural_softc *sc = ifp->if_softc; 1300 1301 ifp->if_timer = 0; 1302 1303 if (sc->sc_tx_timer > 0) { 1304 if (--sc->sc_tx_timer == 0) { 1305 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1306 /*ural_init(ifp); XXX needs a process context! */ 1307 ifp->if_oerrors++; 1308 return; 1309 } 1310 ifp->if_timer = 1; 1311 } 1312 1313 ieee80211_watchdog(ifp); 1314 } 1315 1316 int 1317 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1318 { 1319 struct ural_softc *sc = ifp->if_softc; 1320 struct ieee80211com *ic = &sc->sc_ic; 1321 struct ifaddr *ifa; 1322 struct ifreq *ifr; 1323 int s, error = 0; 1324 1325 s = splnet(); 1326 1327 switch (cmd) { 1328 case SIOCSIFADDR: 1329 ifa = (struct ifaddr *)data; 1330 ifp->if_flags |= IFF_UP; 1331 #ifdef INET 1332 if (ifa->ifa_addr->sa_family == AF_INET) 1333 arp_ifinit(&ic->ic_ac, ifa); 1334 #endif 1335 /* FALLTHROUGH */ 1336 case SIOCSIFFLAGS: 1337 if (ifp->if_flags & IFF_UP) { 1338 if (ifp->if_flags & IFF_RUNNING) 1339 ural_update_promisc(sc); 1340 else 1341 ural_init(ifp); 1342 } else { 1343 if (ifp->if_flags & IFF_RUNNING) 1344 ural_stop(ifp, 1); 1345 } 1346 break; 1347 1348 case SIOCADDMULTI: 1349 case SIOCDELMULTI: 1350 ifr = (struct ifreq *)data; 1351 error = (cmd == SIOCADDMULTI) ? 1352 ether_addmulti(ifr, &ic->ic_ac) : 1353 ether_delmulti(ifr, &ic->ic_ac); 1354 1355 if (error == ENETRESET) 1356 error = 0; 1357 break; 1358 1359 case SIOCS80211CHANNEL: 1360 /* 1361 * This allows for fast channel switching in monitor mode 1362 * (used by kismet). In IBSS mode, we must explicitly reset 1363 * the interface to generate a new beacon frame. 1364 */ 1365 error = ieee80211_ioctl(ifp, cmd, data); 1366 if (error == ENETRESET && 1367 ic->ic_opmode == IEEE80211_M_MONITOR) { 1368 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1369 (IFF_UP | IFF_RUNNING)) 1370 ural_set_chan(sc, ic->ic_ibss_chan); 1371 error = 0; 1372 } 1373 break; 1374 1375 default: 1376 error = ieee80211_ioctl(ifp, cmd, data); 1377 } 1378 1379 if (error == ENETRESET) { 1380 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1381 (IFF_UP | IFF_RUNNING)) 1382 ural_init(ifp); 1383 error = 0; 1384 } 1385 1386 splx(s); 1387 1388 return error; 1389 } 1390 1391 void 1392 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) 1393 { 1394 usb_device_request_t req; 1395 usbd_status error; 1396 1397 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1398 req.bRequest = RAL_READ_EEPROM; 1399 USETW(req.wValue, 0); 1400 USETW(req.wIndex, addr); 1401 USETW(req.wLength, len); 1402 1403 error = usbd_do_request(sc->sc_udev, &req, buf); 1404 if (error != 0) { 1405 printf("%s: could not read EEPROM: %s\n", 1406 sc->sc_dev.dv_xname, usbd_errstr(error)); 1407 } 1408 } 1409 1410 uint16_t 1411 ural_read(struct ural_softc *sc, uint16_t reg) 1412 { 1413 usb_device_request_t req; 1414 usbd_status error; 1415 uint16_t val; 1416 1417 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1418 req.bRequest = RAL_READ_MAC; 1419 USETW(req.wValue, 0); 1420 USETW(req.wIndex, reg); 1421 USETW(req.wLength, sizeof (uint16_t)); 1422 1423 error = usbd_do_request(sc->sc_udev, &req, &val); 1424 if (error != 0) { 1425 printf("%s: could not read MAC register: %s\n", 1426 sc->sc_dev.dv_xname, usbd_errstr(error)); 1427 return 0; 1428 } 1429 return letoh16(val); 1430 } 1431 1432 void 1433 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1434 { 1435 usb_device_request_t req; 1436 usbd_status error; 1437 1438 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1439 req.bRequest = RAL_READ_MULTI_MAC; 1440 USETW(req.wValue, 0); 1441 USETW(req.wIndex, reg); 1442 USETW(req.wLength, len); 1443 1444 error = usbd_do_request(sc->sc_udev, &req, buf); 1445 if (error != 0) { 1446 printf("%s: could not read MAC register: %s\n", 1447 sc->sc_dev.dv_xname, usbd_errstr(error)); 1448 } 1449 } 1450 1451 void 1452 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) 1453 { 1454 usb_device_request_t req; 1455 usbd_status error; 1456 1457 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1458 req.bRequest = RAL_WRITE_MAC; 1459 USETW(req.wValue, val); 1460 USETW(req.wIndex, reg); 1461 USETW(req.wLength, 0); 1462 1463 error = usbd_do_request(sc->sc_udev, &req, NULL); 1464 if (error != 0) { 1465 printf("%s: could not write MAC register: %s\n", 1466 sc->sc_dev.dv_xname, usbd_errstr(error)); 1467 } 1468 } 1469 1470 void 1471 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1472 { 1473 usb_device_request_t req; 1474 usbd_status error; 1475 1476 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1477 req.bRequest = RAL_WRITE_MULTI_MAC; 1478 USETW(req.wValue, 0); 1479 USETW(req.wIndex, reg); 1480 USETW(req.wLength, len); 1481 1482 error = usbd_do_request(sc->sc_udev, &req, buf); 1483 if (error != 0) { 1484 printf("%s: could not write MAC register: %s\n", 1485 sc->sc_dev.dv_xname, usbd_errstr(error)); 1486 } 1487 } 1488 1489 void 1490 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) 1491 { 1492 uint16_t tmp; 1493 int ntries; 1494 1495 for (ntries = 0; ntries < 5; ntries++) { 1496 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1497 break; 1498 } 1499 if (ntries == 5) { 1500 printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname); 1501 return; 1502 } 1503 1504 tmp = reg << 8 | val; 1505 ural_write(sc, RAL_PHY_CSR7, tmp); 1506 } 1507 1508 uint8_t 1509 ural_bbp_read(struct ural_softc *sc, uint8_t reg) 1510 { 1511 uint16_t val; 1512 int ntries; 1513 1514 val = RAL_BBP_WRITE | reg << 8; 1515 ural_write(sc, RAL_PHY_CSR7, val); 1516 1517 for (ntries = 0; ntries < 5; ntries++) { 1518 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1519 break; 1520 } 1521 if (ntries == 5) { 1522 printf("%s: could not read BBP\n", sc->sc_dev.dv_xname); 1523 return 0; 1524 } 1525 return ural_read(sc, RAL_PHY_CSR7) & 0xff; 1526 } 1527 1528 void 1529 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) 1530 { 1531 uint32_t tmp; 1532 int ntries; 1533 1534 for (ntries = 0; ntries < 5; ntries++) { 1535 if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) 1536 break; 1537 } 1538 if (ntries == 5) { 1539 printf("%s: could not write to RF\n", sc->sc_dev.dv_xname); 1540 return; 1541 } 1542 1543 tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); 1544 ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); 1545 ural_write(sc, RAL_PHY_CSR10, tmp >> 16); 1546 1547 /* remember last written value in sc */ 1548 sc->rf_regs[reg] = val; 1549 1550 DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff)); 1551 } 1552 1553 void 1554 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) 1555 { 1556 struct ieee80211com *ic = &sc->sc_ic; 1557 uint8_t power, tmp; 1558 u_int chan; 1559 1560 chan = ieee80211_chan2ieee(ic, c); 1561 if (chan == 0 || chan == IEEE80211_CHAN_ANY) 1562 return; 1563 1564 power = min(sc->txpow[chan - 1], 31); 1565 1566 DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power)); 1567 1568 switch (sc->rf_rev) { 1569 case RAL_RF_2522: 1570 ural_rf_write(sc, RAL_RF1, 0x00814); 1571 ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); 1572 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1573 break; 1574 1575 case RAL_RF_2523: 1576 ural_rf_write(sc, RAL_RF1, 0x08804); 1577 ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); 1578 ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); 1579 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1580 break; 1581 1582 case RAL_RF_2524: 1583 ural_rf_write(sc, RAL_RF1, 0x0c808); 1584 ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); 1585 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1586 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1587 break; 1588 1589 case RAL_RF_2525: 1590 ural_rf_write(sc, RAL_RF1, 0x08808); 1591 ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); 1592 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1593 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1594 1595 ural_rf_write(sc, RAL_RF1, 0x08808); 1596 ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); 1597 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1598 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1599 break; 1600 1601 case RAL_RF_2525E: 1602 ural_rf_write(sc, RAL_RF1, 0x08808); 1603 ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); 1604 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1605 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); 1606 break; 1607 1608 case RAL_RF_2526: 1609 ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); 1610 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1611 ural_rf_write(sc, RAL_RF1, 0x08804); 1612 1613 ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); 1614 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1615 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1616 break; 1617 } 1618 1619 if (ic->ic_opmode != IEEE80211_M_MONITOR && 1620 ic->ic_state != IEEE80211_S_SCAN) { 1621 /* set Japan filter bit for channel 14 */ 1622 tmp = ural_bbp_read(sc, 70); 1623 1624 tmp &= ~RAL_JAPAN_FILTER; 1625 if (chan == 14) 1626 tmp |= RAL_JAPAN_FILTER; 1627 1628 ural_bbp_write(sc, 70, tmp); 1629 1630 /* clear CRC errors */ 1631 ural_read(sc, RAL_STA_CSR0); 1632 1633 DELAY(1000); /* RF needs a 1ms delay here */ 1634 ural_disable_rf_tune(sc); 1635 } 1636 } 1637 1638 /* 1639 * Disable RF auto-tuning. 1640 */ 1641 void 1642 ural_disable_rf_tune(struct ural_softc *sc) 1643 { 1644 uint32_t tmp; 1645 1646 if (sc->rf_rev != RAL_RF_2523) { 1647 tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; 1648 ural_rf_write(sc, RAL_RF1, tmp); 1649 } 1650 1651 tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; 1652 ural_rf_write(sc, RAL_RF3, tmp); 1653 1654 DPRINTFN(2, ("disabling RF autotune\n")); 1655 } 1656 1657 /* 1658 * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF 1659 * synchronization. 1660 */ 1661 void 1662 ural_enable_tsf_sync(struct ural_softc *sc) 1663 { 1664 struct ieee80211com *ic = &sc->sc_ic; 1665 uint16_t logcwmin, preload, tmp; 1666 1667 /* first, disable TSF synchronization */ 1668 ural_write(sc, RAL_TXRX_CSR19, 0); 1669 1670 tmp = (16 * ic->ic_bss->ni_intval) << 4; 1671 ural_write(sc, RAL_TXRX_CSR18, tmp); 1672 1673 #ifndef IEEE80211_STA_ONLY 1674 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1675 logcwmin = 2; 1676 preload = 320; 1677 } else 1678 #endif 1679 { 1680 logcwmin = 0; 1681 preload = 6; 1682 } 1683 tmp = logcwmin << 12 | preload; 1684 ural_write(sc, RAL_TXRX_CSR20, tmp); 1685 1686 /* finally, enable TSF synchronization */ 1687 tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; 1688 if (ic->ic_opmode == IEEE80211_M_STA) 1689 tmp |= RAL_ENABLE_TSF_SYNC(1); 1690 #ifndef IEEE80211_STA_ONLY 1691 else 1692 tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; 1693 #endif 1694 ural_write(sc, RAL_TXRX_CSR19, tmp); 1695 1696 DPRINTF(("enabling TSF synchronization\n")); 1697 } 1698 1699 void 1700 ural_update_slot(struct ural_softc *sc) 1701 { 1702 struct ieee80211com *ic = &sc->sc_ic; 1703 uint16_t slottime, sifs, eifs; 1704 1705 slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; 1706 1707 /* 1708 * These settings may sound a bit inconsistent but this is what the 1709 * reference driver does. 1710 */ 1711 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1712 sifs = 16 - RAL_RXTX_TURNAROUND; 1713 eifs = 364; 1714 } else { 1715 sifs = 10 - RAL_RXTX_TURNAROUND; 1716 eifs = 64; 1717 } 1718 1719 ural_write(sc, RAL_MAC_CSR10, slottime); 1720 ural_write(sc, RAL_MAC_CSR11, sifs); 1721 ural_write(sc, RAL_MAC_CSR12, eifs); 1722 } 1723 1724 void 1725 ural_set_txpreamble(struct ural_softc *sc) 1726 { 1727 uint16_t tmp; 1728 1729 tmp = ural_read(sc, RAL_TXRX_CSR10); 1730 1731 tmp &= ~RAL_SHORT_PREAMBLE; 1732 if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE) 1733 tmp |= RAL_SHORT_PREAMBLE; 1734 1735 ural_write(sc, RAL_TXRX_CSR10, tmp); 1736 } 1737 1738 void 1739 ural_set_basicrates(struct ural_softc *sc) 1740 { 1741 struct ieee80211com *ic = &sc->sc_ic; 1742 1743 /* update basic rate set */ 1744 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1745 /* 11b basic rates: 1, 2Mbps */ 1746 ural_write(sc, RAL_TXRX_CSR11, 0x3); 1747 } else { 1748 /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ 1749 ural_write(sc, RAL_TXRX_CSR11, 0xf); 1750 } 1751 } 1752 1753 void 1754 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) 1755 { 1756 uint16_t tmp; 1757 1758 tmp = bssid[0] | bssid[1] << 8; 1759 ural_write(sc, RAL_MAC_CSR5, tmp); 1760 1761 tmp = bssid[2] | bssid[3] << 8; 1762 ural_write(sc, RAL_MAC_CSR6, tmp); 1763 1764 tmp = bssid[4] | bssid[5] << 8; 1765 ural_write(sc, RAL_MAC_CSR7, tmp); 1766 1767 DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid))); 1768 } 1769 1770 void 1771 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr) 1772 { 1773 uint16_t tmp; 1774 1775 tmp = addr[0] | addr[1] << 8; 1776 ural_write(sc, RAL_MAC_CSR2, tmp); 1777 1778 tmp = addr[2] | addr[3] << 8; 1779 ural_write(sc, RAL_MAC_CSR3, tmp); 1780 1781 tmp = addr[4] | addr[5] << 8; 1782 ural_write(sc, RAL_MAC_CSR4, tmp); 1783 1784 DPRINTF(("setting MAC address to %s\n", 1785 ether_sprintf((uint8_t *)addr))); 1786 } 1787 1788 void 1789 ural_update_promisc(struct ural_softc *sc) 1790 { 1791 struct ifnet *ifp = &sc->sc_ic.ic_if; 1792 uint16_t tmp; 1793 1794 tmp = ural_read(sc, RAL_TXRX_CSR2); 1795 1796 tmp &= ~RAL_DROP_NOT_TO_ME; 1797 if (!(ifp->if_flags & IFF_PROMISC)) 1798 tmp |= RAL_DROP_NOT_TO_ME; 1799 1800 ural_write(sc, RAL_TXRX_CSR2, tmp); 1801 1802 DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? 1803 "entering" : "leaving")); 1804 } 1805 1806 const char * 1807 ural_get_rf(int rev) 1808 { 1809 switch (rev) { 1810 case RAL_RF_2522: return "RT2522"; 1811 case RAL_RF_2523: return "RT2523"; 1812 case RAL_RF_2524: return "RT2524"; 1813 case RAL_RF_2525: return "RT2525"; 1814 case RAL_RF_2525E: return "RT2525e"; 1815 case RAL_RF_2526: return "RT2526"; 1816 case RAL_RF_5222: return "RT5222"; 1817 default: return "unknown"; 1818 } 1819 } 1820 1821 void 1822 ural_read_eeprom(struct ural_softc *sc) 1823 { 1824 struct ieee80211com *ic = &sc->sc_ic; 1825 uint16_t val; 1826 1827 /* retrieve MAC/BBP type */ 1828 ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2); 1829 sc->macbbp_rev = letoh16(val); 1830 1831 ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); 1832 val = letoh16(val); 1833 sc->rf_rev = (val >> 11) & 0x7; 1834 sc->hw_radio = (val >> 10) & 0x1; 1835 sc->led_mode = (val >> 6) & 0x7; 1836 sc->rx_ant = (val >> 4) & 0x3; 1837 sc->tx_ant = (val >> 2) & 0x3; 1838 sc->nb_ant = val & 0x3; 1839 1840 /* read MAC address */ 1841 ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6); 1842 1843 /* read default values for BBP registers */ 1844 ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); 1845 1846 /* read Tx power for all b/g channels */ 1847 ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); 1848 } 1849 1850 int 1851 ural_bbp_init(struct ural_softc *sc) 1852 { 1853 #define N(a) (sizeof (a) / sizeof ((a)[0])) 1854 int i, ntries; 1855 1856 /* wait for BBP to be ready */ 1857 for (ntries = 0; ntries < 100; ntries++) { 1858 if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) 1859 break; 1860 DELAY(1000); 1861 } 1862 if (ntries == 100) { 1863 printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname); 1864 return EIO; 1865 } 1866 1867 /* initialize BBP registers to default values */ 1868 for (i = 0; i < N(ural_def_bbp); i++) 1869 ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); 1870 1871 #if 0 1872 /* initialize BBP registers to values stored in EEPROM */ 1873 for (i = 0; i < 16; i++) { 1874 if (sc->bbp_prom[i].reg == 0xff) 1875 continue; 1876 ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); 1877 } 1878 #endif 1879 1880 return 0; 1881 #undef N 1882 } 1883 1884 void 1885 ural_set_txantenna(struct ural_softc *sc, int antenna) 1886 { 1887 uint16_t tmp; 1888 uint8_t tx; 1889 1890 tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; 1891 if (antenna == 1) 1892 tx |= RAL_BBP_ANTA; 1893 else if (antenna == 2) 1894 tx |= RAL_BBP_ANTB; 1895 else 1896 tx |= RAL_BBP_DIVERSITY; 1897 1898 /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ 1899 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || 1900 sc->rf_rev == RAL_RF_5222) 1901 tx |= RAL_BBP_FLIPIQ; 1902 1903 ural_bbp_write(sc, RAL_BBP_TX, tx); 1904 1905 /* update flags in PHY_CSR5 and PHY_CSR6 too */ 1906 tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; 1907 ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); 1908 1909 tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; 1910 ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); 1911 } 1912 1913 void 1914 ural_set_rxantenna(struct ural_softc *sc, int antenna) 1915 { 1916 uint8_t rx; 1917 1918 rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; 1919 if (antenna == 1) 1920 rx |= RAL_BBP_ANTA; 1921 else if (antenna == 2) 1922 rx |= RAL_BBP_ANTB; 1923 else 1924 rx |= RAL_BBP_DIVERSITY; 1925 1926 /* need to force no I/Q flip for RF 2525e and 2526 */ 1927 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) 1928 rx &= ~RAL_BBP_FLIPIQ; 1929 1930 ural_bbp_write(sc, RAL_BBP_RX, rx); 1931 } 1932 1933 int 1934 ural_init(struct ifnet *ifp) 1935 { 1936 #define N(a) (sizeof (a) / sizeof ((a)[0])) 1937 struct ural_softc *sc = ifp->if_softc; 1938 struct ieee80211com *ic = &sc->sc_ic; 1939 uint16_t tmp; 1940 usbd_status error; 1941 int i, ntries; 1942 1943 ural_stop(ifp, 0); 1944 1945 /* initialize MAC registers to default values */ 1946 for (i = 0; i < N(ural_def_mac); i++) 1947 ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); 1948 1949 /* wait for BBP and RF to wake up (this can take a long time!) */ 1950 for (ntries = 0; ntries < 100; ntries++) { 1951 tmp = ural_read(sc, RAL_MAC_CSR17); 1952 if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == 1953 (RAL_BBP_AWAKE | RAL_RF_AWAKE)) 1954 break; 1955 DELAY(1000); 1956 } 1957 if (ntries == 100) { 1958 printf("%s: timeout waiting for BBP/RF to wakeup\n", 1959 sc->sc_dev.dv_xname); 1960 error = EIO; 1961 goto fail; 1962 } 1963 1964 /* we're ready! */ 1965 ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); 1966 1967 /* set basic rate set (will be updated later) */ 1968 ural_write(sc, RAL_TXRX_CSR11, 0x153); 1969 1970 error = ural_bbp_init(sc); 1971 if (error != 0) 1972 goto fail; 1973 1974 /* set default BSS channel */ 1975 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 1976 ural_set_chan(sc, ic->ic_bss->ni_chan); 1977 1978 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 1979 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 1980 1981 /* set default sensitivity */ 1982 ural_bbp_write(sc, 17, 0x48); 1983 1984 ural_set_txantenna(sc, 1); 1985 ural_set_rxantenna(sc, 1); 1986 1987 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 1988 ural_set_macaddr(sc, ic->ic_myaddr); 1989 1990 /* 1991 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31). 1992 */ 1993 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1994 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1995 ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE, 1996 k->k_key, IEEE80211_KEYBUF_SIZE); 1997 } 1998 1999 /* 2000 * Allocate xfer for AMRR statistics requests. 2001 */ 2002 sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev); 2003 if (sc->amrr_xfer == NULL) { 2004 printf("%s: could not allocate AMRR xfer\n", 2005 sc->sc_dev.dv_xname); 2006 goto fail; 2007 } 2008 2009 /* 2010 * Open Tx and Rx USB bulk pipes. 2011 */ 2012 error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE, 2013 &sc->sc_tx_pipeh); 2014 if (error != 0) { 2015 printf("%s: could not open Tx pipe: %s\n", 2016 sc->sc_dev.dv_xname, usbd_errstr(error)); 2017 goto fail; 2018 } 2019 error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE, 2020 &sc->sc_rx_pipeh); 2021 if (error != 0) { 2022 printf("%s: could not open Rx pipe: %s\n", 2023 sc->sc_dev.dv_xname, usbd_errstr(error)); 2024 goto fail; 2025 } 2026 2027 /* 2028 * Allocate Tx and Rx xfer queues. 2029 */ 2030 error = ural_alloc_tx_list(sc); 2031 if (error != 0) { 2032 printf("%s: could not allocate Tx list\n", 2033 sc->sc_dev.dv_xname); 2034 goto fail; 2035 } 2036 error = ural_alloc_rx_list(sc); 2037 if (error != 0) { 2038 printf("%s: could not allocate Rx list\n", 2039 sc->sc_dev.dv_xname); 2040 goto fail; 2041 } 2042 2043 /* 2044 * Start up the receive pipe. 2045 */ 2046 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 2047 struct ural_rx_data *data = &sc->rx_data[i]; 2048 2049 usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf, 2050 MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); 2051 error = usbd_transfer(data->xfer); 2052 if (error != 0 && error != USBD_IN_PROGRESS) { 2053 printf("%s: could not queue Rx transfer\n", 2054 sc->sc_dev.dv_xname); 2055 goto fail; 2056 } 2057 } 2058 2059 /* kick Rx */ 2060 tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR; 2061 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2062 tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR; 2063 #ifndef IEEE80211_STA_ONLY 2064 if (ic->ic_opmode != IEEE80211_M_HOSTAP) 2065 #endif 2066 tmp |= RAL_DROP_TODS; 2067 if (!(ifp->if_flags & IFF_PROMISC)) 2068 tmp |= RAL_DROP_NOT_TO_ME; 2069 } 2070 ural_write(sc, RAL_TXRX_CSR2, tmp); 2071 2072 ifp->if_flags &= ~IFF_OACTIVE; 2073 ifp->if_flags |= IFF_RUNNING; 2074 2075 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2076 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2077 else 2078 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2079 2080 return 0; 2081 2082 fail: ural_stop(ifp, 1); 2083 return error; 2084 #undef N 2085 } 2086 2087 void 2088 ural_stop(struct ifnet *ifp, int disable) 2089 { 2090 struct ural_softc *sc = ifp->if_softc; 2091 struct ieee80211com *ic = &sc->sc_ic; 2092 2093 sc->sc_tx_timer = 0; 2094 ifp->if_timer = 0; 2095 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2096 2097 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); /* free all nodes */ 2098 2099 /* disable Rx */ 2100 ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); 2101 2102 /* reset ASIC and BBP (but won't reset MAC registers!) */ 2103 ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); 2104 ural_write(sc, RAL_MAC_CSR1, 0); 2105 2106 if (sc->amrr_xfer != NULL) { 2107 usbd_free_xfer(sc->amrr_xfer); 2108 sc->amrr_xfer = NULL; 2109 } 2110 if (sc->sc_rx_pipeh != NULL) { 2111 usbd_abort_pipe(sc->sc_rx_pipeh); 2112 usbd_close_pipe(sc->sc_rx_pipeh); 2113 sc->sc_rx_pipeh = NULL; 2114 } 2115 if (sc->sc_tx_pipeh != NULL) { 2116 usbd_abort_pipe(sc->sc_tx_pipeh); 2117 usbd_close_pipe(sc->sc_tx_pipeh); 2118 sc->sc_tx_pipeh = NULL; 2119 } 2120 2121 ural_free_rx_list(sc); 2122 ural_free_tx_list(sc); 2123 } 2124 2125 void 2126 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2127 { 2128 /* start with lowest Tx rate */ 2129 ni->ni_txrate = 0; 2130 } 2131 2132 void 2133 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni) 2134 { 2135 int i; 2136 2137 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 2138 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 2139 2140 ieee80211_amrr_node_init(&sc->amrr, &sc->amn); 2141 2142 /* set rate to some reasonable initial value */ 2143 for (i = ni->ni_rates.rs_nrates - 1; 2144 i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72; 2145 i--); 2146 ni->ni_txrate = i; 2147 2148 timeout_add_sec(&sc->amrr_to, 1); 2149 } 2150 2151 void 2152 ural_amrr_timeout(void *arg) 2153 { 2154 struct ural_softc *sc = arg; 2155 usb_device_request_t req; 2156 int s; 2157 2158 s = splusb(); 2159 2160 /* 2161 * Asynchronously read statistic registers (cleared by read). 2162 */ 2163 req.bmRequestType = UT_READ_VENDOR_DEVICE; 2164 req.bRequest = RAL_READ_MULTI_MAC; 2165 USETW(req.wValue, 0); 2166 USETW(req.wIndex, RAL_STA_CSR0); 2167 USETW(req.wLength, sizeof sc->sta); 2168 2169 usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc, 2170 USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0, 2171 ural_amrr_update); 2172 (void)usbd_transfer(sc->amrr_xfer); 2173 2174 splx(s); 2175 } 2176 2177 void 2178 ural_amrr_update(usbd_xfer_handle xfer, usbd_private_handle priv, 2179 usbd_status status) 2180 { 2181 struct ural_softc *sc = (struct ural_softc *)priv; 2182 struct ifnet *ifp = &sc->sc_ic.ic_if; 2183 2184 if (status != USBD_NORMAL_COMPLETION) { 2185 printf("%s: could not retrieve Tx statistics - cancelling " 2186 "automatic rate control\n", sc->sc_dev.dv_xname); 2187 return; 2188 } 2189 2190 /* count TX retry-fail as Tx errors */ 2191 ifp->if_oerrors += letoh16(sc->sta[9]); 2192 2193 sc->amn.amn_retrycnt = 2194 letoh16(sc->sta[7]) + /* TX one-retry ok count */ 2195 letoh16(sc->sta[8]) + /* TX more-retry ok count */ 2196 letoh16(sc->sta[9]); /* TX retry-fail count */ 2197 2198 sc->amn.amn_txcnt = 2199 sc->amn.amn_retrycnt + 2200 letoh16(sc->sta[6]); /* TX no-retry ok count */ 2201 2202 ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn); 2203 2204 timeout_add_sec(&sc->amrr_to, 1); 2205 } 2206 2207 int 2208 ural_activate(struct device *self, enum devact act) 2209 { 2210 switch (act) { 2211 case DVACT_ACTIVATE: 2212 break; 2213 2214 case DVACT_DEACTIVATE: 2215 break; 2216 } 2217 2218 return 0; 2219 } 2220