1 /* $OpenBSD: if_ral.c,v 1.128 2014/07/13 15:52:49 mpi Exp $ */ 2 3 /*- 4 * Copyright (c) 2005, 2006 5 * Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Ralink Technology RT2500USB chipset driver 22 * http://www.ralinktech.com.tw/ 23 */ 24 25 #include "bpfilter.h" 26 27 #include <sys/param.h> 28 #include <sys/sockio.h> 29 #include <sys/mbuf.h> 30 #include <sys/kernel.h> 31 #include <sys/socket.h> 32 #include <sys/systm.h> 33 #include <sys/timeout.h> 34 #include <sys/conf.h> 35 #include <sys/device.h> 36 37 #include <machine/bus.h> 38 #include <machine/endian.h> 39 #include <machine/intr.h> 40 41 #if NBPFILTER > 0 42 #include <net/bpf.h> 43 #endif 44 #include <net/if.h> 45 #include <net/if_arp.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_types.h> 49 50 #include <netinet/in.h> 51 #include <netinet/if_ether.h> 52 53 #include <net80211/ieee80211_var.h> 54 #include <net80211/ieee80211_amrr.h> 55 #include <net80211/ieee80211_radiotap.h> 56 57 #include <dev/usb/usb.h> 58 #include <dev/usb/usbdi.h> 59 #include <dev/usb/usbdi_util.h> 60 #include <dev/usb/usbdevs.h> 61 62 #include <dev/usb/if_ralreg.h> 63 #include <dev/usb/if_ralvar.h> 64 65 #ifdef URAL_DEBUG 66 #define DPRINTF(x) do { if (ural_debug) printf x; } while (0) 67 #define DPRINTFN(n, x) do { if (ural_debug >= (n)) printf x; } while (0) 68 int ural_debug = 0; 69 #else 70 #define DPRINTF(x) 71 #define DPRINTFN(n, x) 72 #endif 73 74 /* various supported device vendors/products */ 75 static const struct usb_devno ural_devs[] = { 76 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570 }, 77 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570_2 }, 78 { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050 }, 79 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G }, 80 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP }, 81 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS }, 82 { USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU }, 83 { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_RT2570 }, 84 { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG }, 85 { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254 }, 86 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54 }, 87 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI }, 88 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB }, 89 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI }, 90 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570 }, 91 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2 }, 92 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3 }, 93 { USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902W }, 94 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570 }, 95 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2 }, 96 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3 }, 97 { USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R }, 98 { USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570 }, 99 { USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570 }, 100 { USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570 } 101 }; 102 103 int ural_alloc_tx_list(struct ural_softc *); 104 void ural_free_tx_list(struct ural_softc *); 105 int ural_alloc_rx_list(struct ural_softc *); 106 void ural_free_rx_list(struct ural_softc *); 107 int ural_media_change(struct ifnet *); 108 void ural_next_scan(void *); 109 void ural_task(void *); 110 int ural_newstate(struct ieee80211com *, enum ieee80211_state, 111 int); 112 void ural_txeof(struct usbd_xfer *, void *, usbd_status); 113 void ural_rxeof(struct usbd_xfer *, void *, usbd_status); 114 #if NBPFILTER > 0 115 uint8_t ural_rxrate(const struct ural_rx_desc *); 116 #endif 117 int ural_ack_rate(struct ieee80211com *, int); 118 uint16_t ural_txtime(int, int, uint32_t); 119 uint8_t ural_plcp_signal(int); 120 void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, 121 uint32_t, int, int); 122 #ifndef IEEE80211_STA_ONLY 123 int ural_tx_bcn(struct ural_softc *, struct mbuf *, 124 struct ieee80211_node *); 125 #endif 126 int ural_tx_data(struct ural_softc *, struct mbuf *, 127 struct ieee80211_node *); 128 void ural_start(struct ifnet *); 129 void ural_watchdog(struct ifnet *); 130 int ural_ioctl(struct ifnet *, u_long, caddr_t); 131 void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); 132 uint16_t ural_read(struct ural_softc *, uint16_t); 133 void ural_read_multi(struct ural_softc *, uint16_t, void *, int); 134 void ural_write(struct ural_softc *, uint16_t, uint16_t); 135 void ural_write_multi(struct ural_softc *, uint16_t, void *, int); 136 void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); 137 uint8_t ural_bbp_read(struct ural_softc *, uint8_t); 138 void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); 139 void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); 140 void ural_disable_rf_tune(struct ural_softc *); 141 void ural_enable_tsf_sync(struct ural_softc *); 142 void ural_update_slot(struct ural_softc *); 143 void ural_set_txpreamble(struct ural_softc *); 144 void ural_set_basicrates(struct ural_softc *); 145 void ural_set_bssid(struct ural_softc *, const uint8_t *); 146 void ural_set_macaddr(struct ural_softc *, const uint8_t *); 147 void ural_update_promisc(struct ural_softc *); 148 const char *ural_get_rf(int); 149 void ural_read_eeprom(struct ural_softc *); 150 int ural_bbp_init(struct ural_softc *); 151 void ural_set_txantenna(struct ural_softc *, int); 152 void ural_set_rxantenna(struct ural_softc *, int); 153 int ural_init(struct ifnet *); 154 void ural_stop(struct ifnet *, int); 155 void ural_newassoc(struct ieee80211com *, struct ieee80211_node *, 156 int); 157 void ural_amrr_start(struct ural_softc *, struct ieee80211_node *); 158 void ural_amrr_timeout(void *); 159 void ural_amrr_update(struct usbd_xfer *, void *, 160 usbd_status status); 161 162 static const struct { 163 uint16_t reg; 164 uint16_t val; 165 } ural_def_mac[] = { 166 RAL_DEF_MAC 167 }; 168 169 static const struct { 170 uint8_t reg; 171 uint8_t val; 172 } ural_def_bbp[] = { 173 RAL_DEF_BBP 174 }; 175 176 static const uint32_t ural_rf2522_r2[] = RAL_RF2522_R2; 177 static const uint32_t ural_rf2523_r2[] = RAL_RF2523_R2; 178 static const uint32_t ural_rf2524_r2[] = RAL_RF2524_R2; 179 static const uint32_t ural_rf2525_r2[] = RAL_RF2525_R2; 180 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2; 181 static const uint32_t ural_rf2525e_r2[] = RAL_RF2525E_R2; 182 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2; 183 static const uint32_t ural_rf2526_r2[] = RAL_RF2526_R2; 184 185 int ural_match(struct device *, void *, void *); 186 void ural_attach(struct device *, struct device *, void *); 187 int ural_detach(struct device *, int); 188 189 struct cfdriver ural_cd = { 190 NULL, "ural", DV_IFNET 191 }; 192 193 const struct cfattach ural_ca = { 194 sizeof(struct ural_softc), ural_match, ural_attach, ural_detach 195 }; 196 197 int 198 ural_match(struct device *parent, void *match, void *aux) 199 { 200 struct usb_attach_arg *uaa = aux; 201 202 if (uaa->iface != NULL) 203 return UMATCH_NONE; 204 205 return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ? 206 UMATCH_VENDOR_PRODUCT : UMATCH_NONE; 207 } 208 209 void 210 ural_attach(struct device *parent, struct device *self, void *aux) 211 { 212 struct ural_softc *sc = (struct ural_softc *)self; 213 struct usb_attach_arg *uaa = aux; 214 struct ieee80211com *ic = &sc->sc_ic; 215 struct ifnet *ifp = &ic->ic_if; 216 usb_interface_descriptor_t *id; 217 usb_endpoint_descriptor_t *ed; 218 usbd_status error; 219 int i; 220 221 sc->sc_udev = uaa->device; 222 223 if (usbd_set_config_no(sc->sc_udev, RAL_CONFIG_NO, 0) != 0) { 224 printf("%s: could not set configuration no\n", 225 sc->sc_dev.dv_xname); 226 return; 227 } 228 229 /* get the first interface handle */ 230 error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX, 231 &sc->sc_iface); 232 if (error != 0) { 233 printf("%s: could not get interface handle\n", 234 sc->sc_dev.dv_xname); 235 return; 236 } 237 238 /* 239 * Find endpoints. 240 */ 241 id = usbd_get_interface_descriptor(sc->sc_iface); 242 243 sc->sc_rx_no = sc->sc_tx_no = -1; 244 for (i = 0; i < id->bNumEndpoints; i++) { 245 ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); 246 if (ed == NULL) { 247 printf("%s: no endpoint descriptor for iface %d\n", 248 sc->sc_dev.dv_xname, i); 249 return; 250 } 251 252 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && 253 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) 254 sc->sc_rx_no = ed->bEndpointAddress; 255 else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && 256 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) 257 sc->sc_tx_no = ed->bEndpointAddress; 258 } 259 if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) { 260 printf("%s: missing endpoint\n", sc->sc_dev.dv_xname); 261 return; 262 } 263 264 usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC); 265 timeout_set(&sc->scan_to, ural_next_scan, sc); 266 267 sc->amrr.amrr_min_success_threshold = 1; 268 sc->amrr.amrr_max_success_threshold = 10; 269 timeout_set(&sc->amrr_to, ural_amrr_timeout, sc); 270 271 /* retrieve RT2570 rev. no */ 272 sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); 273 274 /* retrieve MAC address and various other things from EEPROM */ 275 ural_read_eeprom(sc); 276 277 printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n", 278 sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev, 279 ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr)); 280 281 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 282 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 283 ic->ic_state = IEEE80211_S_INIT; 284 285 /* set device capabilities */ 286 ic->ic_caps = 287 IEEE80211_C_MONITOR | /* monitor mode supported */ 288 #ifndef IEEE80211_STA_ONLY 289 IEEE80211_C_IBSS | /* IBSS mode supported */ 290 IEEE80211_C_HOSTAP | /* HostAp mode supported */ 291 #endif 292 IEEE80211_C_TXPMGT | /* tx power management */ 293 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 294 IEEE80211_C_SHSLOT | /* short slot time supported */ 295 IEEE80211_C_WEP | /* s/w WEP */ 296 IEEE80211_C_RSN; /* WPA/RSN */ 297 298 /* set supported .11b and .11g rates */ 299 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 300 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 301 302 /* set supported .11b and .11g channels (1 through 14) */ 303 for (i = 1; i <= 14; i++) { 304 ic->ic_channels[i].ic_freq = 305 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 306 ic->ic_channels[i].ic_flags = 307 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 308 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 309 } 310 311 ifp->if_softc = sc; 312 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 313 ifp->if_ioctl = ural_ioctl; 314 ifp->if_start = ural_start; 315 ifp->if_watchdog = ural_watchdog; 316 IFQ_SET_READY(&ifp->if_snd); 317 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 318 319 if_attach(ifp); 320 ieee80211_ifattach(ifp); 321 ic->ic_newassoc = ural_newassoc; 322 323 /* override state transition machine */ 324 sc->sc_newstate = ic->ic_newstate; 325 ic->ic_newstate = ural_newstate; 326 ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status); 327 328 #if NBPFILTER > 0 329 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO, 330 sizeof (struct ieee80211_frame) + 64); 331 332 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 333 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 334 sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT); 335 336 sc->sc_txtap_len = sizeof sc->sc_txtapu; 337 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 338 sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT); 339 #endif 340 } 341 342 int 343 ural_detach(struct device *self, int flags) 344 { 345 struct ural_softc *sc = (struct ural_softc *)self; 346 struct ifnet *ifp = &sc->sc_ic.ic_if; 347 int s; 348 349 s = splusb(); 350 351 if (timeout_initialized(&sc->scan_to)) 352 timeout_del(&sc->scan_to); 353 if (timeout_initialized(&sc->amrr_to)) 354 timeout_del(&sc->amrr_to); 355 356 usb_rem_wait_task(sc->sc_udev, &sc->sc_task); 357 358 usbd_ref_wait(sc->sc_udev); 359 360 if (ifp->if_softc != NULL) { 361 ieee80211_ifdetach(ifp); /* free all nodes */ 362 if_detach(ifp); 363 } 364 365 if (sc->amrr_xfer != NULL) { 366 usbd_free_xfer(sc->amrr_xfer); 367 sc->amrr_xfer = NULL; 368 } 369 370 if (sc->sc_rx_pipeh != NULL) { 371 usbd_abort_pipe(sc->sc_rx_pipeh); 372 usbd_close_pipe(sc->sc_rx_pipeh); 373 } 374 375 if (sc->sc_tx_pipeh != NULL) { 376 usbd_abort_pipe(sc->sc_tx_pipeh); 377 usbd_close_pipe(sc->sc_tx_pipeh); 378 } 379 380 ural_free_rx_list(sc); 381 ural_free_tx_list(sc); 382 383 splx(s); 384 385 return 0; 386 } 387 388 int 389 ural_alloc_tx_list(struct ural_softc *sc) 390 { 391 int i, error; 392 393 sc->tx_cur = sc->tx_queued = 0; 394 395 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 396 struct ural_tx_data *data = &sc->tx_data[i]; 397 398 data->sc = sc; 399 400 data->xfer = usbd_alloc_xfer(sc->sc_udev); 401 if (data->xfer == NULL) { 402 printf("%s: could not allocate tx xfer\n", 403 sc->sc_dev.dv_xname); 404 error = ENOMEM; 405 goto fail; 406 } 407 data->buf = usbd_alloc_buffer(data->xfer, 408 RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN); 409 if (data->buf == NULL) { 410 printf("%s: could not allocate tx buffer\n", 411 sc->sc_dev.dv_xname); 412 error = ENOMEM; 413 goto fail; 414 } 415 } 416 417 return 0; 418 419 fail: ural_free_tx_list(sc); 420 return error; 421 } 422 423 void 424 ural_free_tx_list(struct ural_softc *sc) 425 { 426 int i; 427 428 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 429 struct ural_tx_data *data = &sc->tx_data[i]; 430 431 if (data->xfer != NULL) { 432 usbd_free_xfer(data->xfer); 433 data->xfer = NULL; 434 } 435 /* 436 * The node has already been freed at that point so don't call 437 * ieee80211_release_node() here. 438 */ 439 data->ni = NULL; 440 } 441 } 442 443 int 444 ural_alloc_rx_list(struct ural_softc *sc) 445 { 446 int i, error; 447 448 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 449 struct ural_rx_data *data = &sc->rx_data[i]; 450 451 data->sc = sc; 452 453 data->xfer = usbd_alloc_xfer(sc->sc_udev); 454 if (data->xfer == NULL) { 455 printf("%s: could not allocate rx xfer\n", 456 sc->sc_dev.dv_xname); 457 error = ENOMEM; 458 goto fail; 459 } 460 if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) { 461 printf("%s: could not allocate rx buffer\n", 462 sc->sc_dev.dv_xname); 463 error = ENOMEM; 464 goto fail; 465 } 466 467 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 468 if (data->m == NULL) { 469 printf("%s: could not allocate rx mbuf\n", 470 sc->sc_dev.dv_xname); 471 error = ENOMEM; 472 goto fail; 473 } 474 MCLGET(data->m, M_DONTWAIT); 475 if (!(data->m->m_flags & M_EXT)) { 476 printf("%s: could not allocate rx mbuf cluster\n", 477 sc->sc_dev.dv_xname); 478 error = ENOMEM; 479 goto fail; 480 } 481 data->buf = mtod(data->m, uint8_t *); 482 } 483 484 return 0; 485 486 fail: ural_free_rx_list(sc); 487 return error; 488 } 489 490 void 491 ural_free_rx_list(struct ural_softc *sc) 492 { 493 int i; 494 495 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 496 struct ural_rx_data *data = &sc->rx_data[i]; 497 498 if (data->xfer != NULL) { 499 usbd_free_xfer(data->xfer); 500 data->xfer = NULL; 501 } 502 if (data->m != NULL) { 503 m_freem(data->m); 504 data->m = NULL; 505 } 506 } 507 } 508 509 int 510 ural_media_change(struct ifnet *ifp) 511 { 512 int error; 513 514 error = ieee80211_media_change(ifp); 515 if (error != ENETRESET) 516 return error; 517 518 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)) 519 ural_init(ifp); 520 521 return 0; 522 } 523 524 /* 525 * This function is called periodically (every 200ms) during scanning to 526 * switch from one channel to another. 527 */ 528 void 529 ural_next_scan(void *arg) 530 { 531 struct ural_softc *sc = arg; 532 struct ieee80211com *ic = &sc->sc_ic; 533 struct ifnet *ifp = &ic->ic_if; 534 535 if (usbd_is_dying(sc->sc_udev)) 536 return; 537 538 usbd_ref_incr(sc->sc_udev); 539 540 if (ic->ic_state == IEEE80211_S_SCAN) 541 ieee80211_next_scan(ifp); 542 543 usbd_ref_decr(sc->sc_udev); 544 } 545 546 void 547 ural_task(void *arg) 548 { 549 struct ural_softc *sc = arg; 550 struct ieee80211com *ic = &sc->sc_ic; 551 enum ieee80211_state ostate; 552 struct ieee80211_node *ni; 553 554 if (usbd_is_dying(sc->sc_udev)) 555 return; 556 557 ostate = ic->ic_state; 558 559 switch (sc->sc_state) { 560 case IEEE80211_S_INIT: 561 if (ostate == IEEE80211_S_RUN) { 562 /* abort TSF synchronization */ 563 ural_write(sc, RAL_TXRX_CSR19, 0); 564 565 /* force tx led to stop blinking */ 566 ural_write(sc, RAL_MAC_CSR20, 0); 567 } 568 break; 569 570 case IEEE80211_S_SCAN: 571 ural_set_chan(sc, ic->ic_bss->ni_chan); 572 if (!usbd_is_dying(sc->sc_udev)) 573 timeout_add_msec(&sc->scan_to, 200); 574 break; 575 576 case IEEE80211_S_AUTH: 577 ural_set_chan(sc, ic->ic_bss->ni_chan); 578 break; 579 580 case IEEE80211_S_ASSOC: 581 ural_set_chan(sc, ic->ic_bss->ni_chan); 582 break; 583 584 case IEEE80211_S_RUN: 585 ural_set_chan(sc, ic->ic_bss->ni_chan); 586 587 ni = ic->ic_bss; 588 589 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 590 ural_update_slot(sc); 591 ural_set_txpreamble(sc); 592 ural_set_basicrates(sc); 593 ural_set_bssid(sc, ni->ni_bssid); 594 } 595 596 #ifndef IEEE80211_STA_ONLY 597 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 598 ic->ic_opmode == IEEE80211_M_IBSS) { 599 struct mbuf *m = ieee80211_beacon_alloc(ic, ni); 600 if (m == NULL) { 601 printf("%s: could not allocate beacon\n", 602 sc->sc_dev.dv_xname); 603 return; 604 } 605 606 if (ural_tx_bcn(sc, m, ni) != 0) { 607 m_freem(m); 608 printf("%s: could not transmit beacon\n", 609 sc->sc_dev.dv_xname); 610 return; 611 } 612 613 /* beacon is no longer needed */ 614 m_freem(m); 615 } 616 #endif 617 618 /* make tx led blink on tx (controlled by ASIC) */ 619 ural_write(sc, RAL_MAC_CSR20, 1); 620 621 if (ic->ic_opmode != IEEE80211_M_MONITOR) 622 ural_enable_tsf_sync(sc); 623 624 if (ic->ic_opmode == IEEE80211_M_STA) { 625 /* fake a join to init the tx rate */ 626 ural_newassoc(ic, ic->ic_bss, 1); 627 628 /* enable automatic rate control in STA mode */ 629 if (ic->ic_fixed_rate == -1) 630 ural_amrr_start(sc, ic->ic_bss); 631 } 632 633 break; 634 } 635 636 sc->sc_newstate(ic, sc->sc_state, sc->sc_arg); 637 } 638 639 int 640 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 641 { 642 struct ural_softc *sc = ic->ic_if.if_softc; 643 644 usb_rem_task(sc->sc_udev, &sc->sc_task); 645 timeout_del(&sc->scan_to); 646 timeout_del(&sc->amrr_to); 647 648 /* do it in a process context */ 649 sc->sc_state = nstate; 650 sc->sc_arg = arg; 651 usb_add_task(sc->sc_udev, &sc->sc_task); 652 return 0; 653 } 654 655 /* quickly determine if a given rate is CCK or OFDM */ 656 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 657 658 #define RAL_ACK_SIZE 14 /* 10 + 4(FCS) */ 659 #define RAL_CTS_SIZE 14 /* 10 + 4(FCS) */ 660 661 #define RAL_SIFS 10 /* us */ 662 663 #define RAL_RXTX_TURNAROUND 5 /* us */ 664 665 void 666 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status) 667 { 668 struct ural_tx_data *data = priv; 669 struct ural_softc *sc = data->sc; 670 struct ieee80211com *ic = &sc->sc_ic; 671 struct ifnet *ifp = &ic->ic_if; 672 int s; 673 674 if (status != USBD_NORMAL_COMPLETION) { 675 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) 676 return; 677 678 printf("%s: could not transmit buffer: %s\n", 679 sc->sc_dev.dv_xname, usbd_errstr(status)); 680 681 if (status == USBD_STALLED) 682 usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh); 683 684 ifp->if_oerrors++; 685 return; 686 } 687 688 s = splnet(); 689 690 ieee80211_release_node(ic, data->ni); 691 data->ni = NULL; 692 693 sc->tx_queued--; 694 ifp->if_opackets++; 695 696 DPRINTFN(10, ("tx done\n")); 697 698 sc->sc_tx_timer = 0; 699 ifp->if_flags &= ~IFF_OACTIVE; 700 ural_start(ifp); 701 702 splx(s); 703 } 704 705 void 706 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status) 707 { 708 struct ural_rx_data *data = priv; 709 struct ural_softc *sc = data->sc; 710 struct ieee80211com *ic = &sc->sc_ic; 711 struct ifnet *ifp = &ic->ic_if; 712 const struct ural_rx_desc *desc; 713 struct ieee80211_frame *wh; 714 struct ieee80211_rxinfo rxi; 715 struct ieee80211_node *ni; 716 struct mbuf *mnew, *m; 717 int s, len; 718 719 if (status != USBD_NORMAL_COMPLETION) { 720 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) 721 return; 722 723 if (status == USBD_STALLED) 724 usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh); 725 goto skip; 726 } 727 728 usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL); 729 730 if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) { 731 DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname, 732 len)); 733 ifp->if_ierrors++; 734 goto skip; 735 } 736 737 /* rx descriptor is located at the end */ 738 desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE); 739 740 if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { 741 /* 742 * This should not happen since we did not request to receive 743 * those frames when we filled RAL_TXRX_CSR2. 744 */ 745 DPRINTFN(5, ("PHY or CRC error\n")); 746 ifp->if_ierrors++; 747 goto skip; 748 } 749 750 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 751 if (mnew == NULL) { 752 printf("%s: could not allocate rx mbuf\n", 753 sc->sc_dev.dv_xname); 754 ifp->if_ierrors++; 755 goto skip; 756 } 757 MCLGET(mnew, M_DONTWAIT); 758 if (!(mnew->m_flags & M_EXT)) { 759 printf("%s: could not allocate rx mbuf cluster\n", 760 sc->sc_dev.dv_xname); 761 m_freem(mnew); 762 ifp->if_ierrors++; 763 goto skip; 764 } 765 m = data->m; 766 data->m = mnew; 767 data->buf = mtod(data->m, uint8_t *); 768 769 /* finalize mbuf */ 770 m->m_pkthdr.rcvif = ifp; 771 m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff; 772 773 s = splnet(); 774 775 #if NBPFILTER > 0 776 if (sc->sc_drvbpf != NULL) { 777 struct mbuf mb; 778 struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; 779 780 tap->wr_flags = IEEE80211_RADIOTAP_F_FCS; 781 tap->wr_rate = ural_rxrate(desc); 782 tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 783 tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 784 tap->wr_antenna = sc->rx_ant; 785 tap->wr_antsignal = desc->rssi; 786 787 mb.m_data = (caddr_t)tap; 788 mb.m_len = sc->sc_rxtap_len; 789 mb.m_next = m; 790 mb.m_nextpkt = NULL; 791 mb.m_type = 0; 792 mb.m_flags = 0; 793 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN); 794 } 795 #endif 796 m_adj(m, -IEEE80211_CRC_LEN); /* trim FCS */ 797 798 wh = mtod(m, struct ieee80211_frame *); 799 ni = ieee80211_find_rxnode(ic, wh); 800 801 /* send the frame to the 802.11 layer */ 802 rxi.rxi_flags = 0; 803 rxi.rxi_rssi = desc->rssi; 804 rxi.rxi_tstamp = 0; /* unused */ 805 ieee80211_input(ifp, m, ni, &rxi); 806 807 /* node is no longer needed */ 808 ieee80211_release_node(ic, ni); 809 810 splx(s); 811 812 DPRINTFN(15, ("rx done\n")); 813 814 skip: /* setup a new transfer */ 815 usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, 816 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); 817 (void)usbd_transfer(xfer); 818 } 819 820 /* 821 * This function is only used by the Rx radiotap code. It returns the rate at 822 * which a given frame was received. 823 */ 824 #if NBPFILTER > 0 825 uint8_t 826 ural_rxrate(const struct ural_rx_desc *desc) 827 { 828 if (letoh32(desc->flags) & RAL_RX_OFDM) { 829 /* reverse function of ural_plcp_signal */ 830 switch (desc->rate) { 831 case 0xb: return 12; 832 case 0xf: return 18; 833 case 0xa: return 24; 834 case 0xe: return 36; 835 case 0x9: return 48; 836 case 0xd: return 72; 837 case 0x8: return 96; 838 case 0xc: return 108; 839 } 840 } else { 841 if (desc->rate == 10) 842 return 2; 843 if (desc->rate == 20) 844 return 4; 845 if (desc->rate == 55) 846 return 11; 847 if (desc->rate == 110) 848 return 22; 849 } 850 return 2; /* should not get there */ 851 } 852 #endif 853 854 /* 855 * Return the expected ack rate for a frame transmitted at rate `rate'. 856 */ 857 int 858 ural_ack_rate(struct ieee80211com *ic, int rate) 859 { 860 switch (rate) { 861 /* CCK rates */ 862 case 2: 863 return 2; 864 case 4: 865 case 11: 866 case 22: 867 return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate; 868 869 /* OFDM rates */ 870 case 12: 871 case 18: 872 return 12; 873 case 24: 874 case 36: 875 return 24; 876 case 48: 877 case 72: 878 case 96: 879 case 108: 880 return 48; 881 } 882 883 /* default to 1Mbps */ 884 return 2; 885 } 886 887 /* 888 * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'. 889 * The function automatically determines the operating mode depending on the 890 * given rate. `flags' indicates whether short preamble is in use or not. 891 */ 892 uint16_t 893 ural_txtime(int len, int rate, uint32_t flags) 894 { 895 uint16_t txtime; 896 897 if (RAL_RATE_IS_OFDM(rate)) { 898 /* IEEE Std 802.11g-2003, pp. 44 */ 899 txtime = (8 + 4 * len + 3 + rate - 1) / rate; 900 txtime = 16 + 4 + 4 * txtime + 6; 901 } else { 902 /* IEEE Std 802.11b-1999, pp. 28 */ 903 txtime = (16 * len + rate - 1) / rate; 904 if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE)) 905 txtime += 72 + 24; 906 else 907 txtime += 144 + 48; 908 } 909 return txtime; 910 } 911 912 uint8_t 913 ural_plcp_signal(int rate) 914 { 915 switch (rate) { 916 /* CCK rates (returned values are device-dependent) */ 917 case 2: return 0x0; 918 case 4: return 0x1; 919 case 11: return 0x2; 920 case 22: return 0x3; 921 922 /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ 923 case 12: return 0xb; 924 case 18: return 0xf; 925 case 24: return 0xa; 926 case 36: return 0xe; 927 case 48: return 0x9; 928 case 72: return 0xd; 929 case 96: return 0x8; 930 case 108: return 0xc; 931 932 /* unsupported rates (should not get there) */ 933 default: return 0xff; 934 } 935 } 936 937 void 938 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, 939 uint32_t flags, int len, int rate) 940 { 941 struct ieee80211com *ic = &sc->sc_ic; 942 uint16_t plcp_length; 943 int remainder; 944 945 desc->flags = htole32(flags); 946 desc->flags |= htole32(len << 16); 947 948 desc->wme = htole16( 949 RAL_AIFSN(2) | 950 RAL_LOGCWMIN(3) | 951 RAL_LOGCWMAX(5)); 952 953 /* setup PLCP fields */ 954 desc->plcp_signal = ural_plcp_signal(rate); 955 desc->plcp_service = 4; 956 957 len += IEEE80211_CRC_LEN; 958 if (RAL_RATE_IS_OFDM(rate)) { 959 desc->flags |= htole32(RAL_TX_OFDM); 960 961 plcp_length = len & 0xfff; 962 desc->plcp_length_hi = plcp_length >> 6; 963 desc->plcp_length_lo = plcp_length & 0x3f; 964 } else { 965 plcp_length = (16 * len + rate - 1) / rate; 966 if (rate == 22) { 967 remainder = (16 * len) % 22; 968 if (remainder != 0 && remainder < 7) 969 desc->plcp_service |= RAL_PLCP_LENGEXT; 970 } 971 desc->plcp_length_hi = plcp_length >> 8; 972 desc->plcp_length_lo = plcp_length & 0xff; 973 974 if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) 975 desc->plcp_signal |= 0x08; 976 } 977 978 desc->iv = 0; 979 desc->eiv = 0; 980 } 981 982 #define RAL_TX_TIMEOUT 5000 983 984 #ifndef IEEE80211_STA_ONLY 985 int 986 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 987 { 988 struct ural_tx_desc *desc; 989 struct usbd_xfer *xfer; 990 usbd_status error; 991 uint8_t cmd = 0; 992 uint8_t *buf; 993 int xferlen, rate = 2; 994 995 xfer = usbd_alloc_xfer(sc->sc_udev); 996 if (xfer == NULL) 997 return ENOMEM; 998 999 /* xfer length needs to be a multiple of two! */ 1000 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; 1001 1002 buf = usbd_alloc_buffer(xfer, xferlen); 1003 if (buf == NULL) { 1004 usbd_free_xfer(xfer); 1005 return ENOMEM; 1006 } 1007 1008 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd, 1009 USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL); 1010 1011 error = usbd_transfer(xfer); 1012 if (error != 0) { 1013 usbd_free_xfer(xfer); 1014 return error; 1015 } 1016 1017 desc = (struct ural_tx_desc *)buf; 1018 1019 m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE); 1020 ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, 1021 m0->m_pkthdr.len, rate); 1022 1023 DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n", 1024 m0->m_pkthdr.len, rate, xferlen)); 1025 1026 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen, 1027 USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS, 1028 RAL_TX_TIMEOUT, NULL); 1029 1030 error = usbd_transfer(xfer); 1031 usbd_free_xfer(xfer); 1032 1033 return error; 1034 } 1035 #endif 1036 1037 int 1038 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 1039 { 1040 struct ieee80211com *ic = &sc->sc_ic; 1041 struct ural_tx_desc *desc; 1042 struct ural_tx_data *data; 1043 struct ieee80211_frame *wh; 1044 struct ieee80211_key *k; 1045 uint32_t flags = RAL_TX_NEWSEQ; 1046 uint16_t dur; 1047 usbd_status error; 1048 int rate, xferlen, pktlen, needrts = 0, needcts = 0; 1049 1050 wh = mtod(m0, struct ieee80211_frame *); 1051 1052 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 1053 k = ieee80211_get_txkey(ic, wh, ni); 1054 1055 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL) 1056 return ENOBUFS; 1057 1058 /* packet header may have moved, reset our local pointer */ 1059 wh = mtod(m0, struct ieee80211_frame *); 1060 } 1061 1062 /* compute actual packet length (including CRC and crypto overhead) */ 1063 pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; 1064 1065 /* pickup a rate */ 1066 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 1067 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1068 IEEE80211_FC0_TYPE_MGT)) { 1069 /* mgmt/multicast frames are sent at the lowest avail. rate */ 1070 rate = ni->ni_rates.rs_rates[0]; 1071 } else if (ic->ic_fixed_rate != -1) { 1072 rate = ic->ic_sup_rates[ic->ic_curmode]. 1073 rs_rates[ic->ic_fixed_rate]; 1074 } else 1075 rate = ni->ni_rates.rs_rates[ni->ni_txrate]; 1076 if (rate == 0) 1077 rate = 2; /* XXX should not happen */ 1078 rate &= IEEE80211_RATE_VAL; 1079 1080 /* check if RTS/CTS or CTS-to-self protection must be used */ 1081 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1082 /* multicast frames are not sent at OFDM rates in 802.11b/g */ 1083 if (pktlen > ic->ic_rtsthreshold) { 1084 needrts = 1; /* RTS/CTS based on frame length */ 1085 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1086 RAL_RATE_IS_OFDM(rate)) { 1087 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 1088 needcts = 1; /* CTS-to-self */ 1089 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 1090 needrts = 1; /* RTS/CTS */ 1091 } 1092 } 1093 if (needrts || needcts) { 1094 struct mbuf *mprot; 1095 int protrate, ackrate; 1096 uint16_t dur; 1097 1098 protrate = 2; 1099 ackrate = ural_ack_rate(ic, rate); 1100 1101 dur = ural_txtime(pktlen, rate, ic->ic_flags) + 1102 ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) + 1103 2 * RAL_SIFS; 1104 if (needrts) { 1105 dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic, 1106 protrate), ic->ic_flags) + RAL_SIFS; 1107 mprot = ieee80211_get_rts(ic, wh, dur); 1108 } else { 1109 mprot = ieee80211_get_cts_to_self(ic, dur); 1110 } 1111 if (mprot == NULL) { 1112 printf("%s: could not allocate protection frame\n", 1113 sc->sc_dev.dv_xname); 1114 m_freem(m0); 1115 return ENOBUFS; 1116 } 1117 1118 data = &sc->tx_data[sc->tx_cur]; 1119 desc = (struct ural_tx_desc *)data->buf; 1120 1121 /* avoid multiple free() of the same node for each fragment */ 1122 data->ni = ieee80211_ref_node(ni); 1123 1124 m_copydata(mprot, 0, mprot->m_pkthdr.len, 1125 data->buf + RAL_TX_DESC_SIZE); 1126 ural_setup_tx_desc(sc, desc, 1127 (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7), 1128 mprot->m_pkthdr.len, protrate); 1129 1130 /* no roundup necessary here */ 1131 xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len; 1132 1133 /* XXX may want to pass the protection frame to BPF */ 1134 1135 /* mbuf is no longer needed */ 1136 m_freem(mprot); 1137 1138 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, 1139 xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, 1140 RAL_TX_TIMEOUT, ural_txeof); 1141 error = usbd_transfer(data->xfer); 1142 if (error != 0 && error != USBD_IN_PROGRESS) { 1143 m_freem(m0); 1144 return error; 1145 } 1146 1147 sc->tx_queued++; 1148 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; 1149 1150 flags |= RAL_TX_IFS_SIFS; 1151 } 1152 1153 data = &sc->tx_data[sc->tx_cur]; 1154 desc = (struct ural_tx_desc *)data->buf; 1155 1156 data->ni = ni; 1157 1158 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1159 flags |= RAL_TX_NEED_ACK; 1160 flags |= RAL_TX_RETRY(7); 1161 1162 dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate), 1163 ic->ic_flags) + RAL_SIFS; 1164 *(uint16_t *)wh->i_dur = htole16(dur); 1165 1166 #ifndef IEEE80211_STA_ONLY 1167 /* tell hardware to set timestamp in probe responses */ 1168 if ((wh->i_fc[0] & 1169 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 1170 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) 1171 flags |= RAL_TX_TIMESTAMP; 1172 #endif 1173 } 1174 1175 #if NBPFILTER > 0 1176 if (sc->sc_drvbpf != NULL) { 1177 struct mbuf mb; 1178 struct ural_tx_radiotap_header *tap = &sc->sc_txtap; 1179 1180 tap->wt_flags = 0; 1181 tap->wt_rate = rate; 1182 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 1183 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 1184 tap->wt_antenna = sc->tx_ant; 1185 1186 mb.m_data = (caddr_t)tap; 1187 mb.m_len = sc->sc_txtap_len; 1188 mb.m_next = m0; 1189 mb.m_nextpkt = NULL; 1190 mb.m_type = 0; 1191 mb.m_flags = 0; 1192 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT); 1193 } 1194 #endif 1195 1196 m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); 1197 ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate); 1198 1199 /* align end on a 2-bytes boundary */ 1200 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; 1201 1202 /* 1203 * No space left in the last URB to store the extra 2 bytes, force 1204 * sending of another URB. 1205 */ 1206 if ((xferlen % 64) == 0) 1207 xferlen += 2; 1208 1209 DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n", 1210 m0->m_pkthdr.len, rate, xferlen)); 1211 1212 /* mbuf is no longer needed */ 1213 m_freem(m0); 1214 1215 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, 1216 USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); 1217 error = usbd_transfer(data->xfer); 1218 if (error != 0 && error != USBD_IN_PROGRESS) 1219 return error; 1220 1221 sc->tx_queued++; 1222 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; 1223 1224 return 0; 1225 } 1226 1227 void 1228 ural_start(struct ifnet *ifp) 1229 { 1230 struct ural_softc *sc = ifp->if_softc; 1231 struct ieee80211com *ic = &sc->sc_ic; 1232 struct ieee80211_node *ni; 1233 struct mbuf *m0; 1234 1235 /* 1236 * net80211 may still try to send management frames even if the 1237 * IFF_RUNNING flag is not set... 1238 */ 1239 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1240 return; 1241 1242 for (;;) { 1243 IF_POLL(&ic->ic_mgtq, m0); 1244 if (m0 != NULL) { 1245 if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) { 1246 ifp->if_flags |= IFF_OACTIVE; 1247 break; 1248 } 1249 IF_DEQUEUE(&ic->ic_mgtq, m0); 1250 1251 ni = m0->m_pkthdr.ph_cookie; 1252 #if NBPFILTER > 0 1253 if (ic->ic_rawbpf != NULL) 1254 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 1255 #endif 1256 if (ural_tx_data(sc, m0, ni) != 0) 1257 break; 1258 1259 } else { 1260 if (ic->ic_state != IEEE80211_S_RUN) 1261 break; 1262 IFQ_POLL(&ifp->if_snd, m0); 1263 if (m0 == NULL) 1264 break; 1265 if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) { 1266 ifp->if_flags |= IFF_OACTIVE; 1267 break; 1268 } 1269 IFQ_DEQUEUE(&ifp->if_snd, m0); 1270 #if NBPFILTER > 0 1271 if (ifp->if_bpf != NULL) 1272 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1273 #endif 1274 m0 = ieee80211_encap(ifp, m0, &ni); 1275 if (m0 == NULL) 1276 continue; 1277 #if NBPFILTER > 0 1278 if (ic->ic_rawbpf != NULL) 1279 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT); 1280 #endif 1281 if (ural_tx_data(sc, m0, ni) != 0) { 1282 if (ni != NULL) 1283 ieee80211_release_node(ic, ni); 1284 ifp->if_oerrors++; 1285 break; 1286 } 1287 } 1288 1289 sc->sc_tx_timer = 5; 1290 ifp->if_timer = 1; 1291 } 1292 } 1293 1294 void 1295 ural_watchdog(struct ifnet *ifp) 1296 { 1297 struct ural_softc *sc = ifp->if_softc; 1298 1299 ifp->if_timer = 0; 1300 1301 if (sc->sc_tx_timer > 0) { 1302 if (--sc->sc_tx_timer == 0) { 1303 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1304 /*ural_init(ifp); XXX needs a process context! */ 1305 ifp->if_oerrors++; 1306 return; 1307 } 1308 ifp->if_timer = 1; 1309 } 1310 1311 ieee80211_watchdog(ifp); 1312 } 1313 1314 int 1315 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1316 { 1317 struct ural_softc *sc = ifp->if_softc; 1318 struct ieee80211com *ic = &sc->sc_ic; 1319 struct ifaddr *ifa; 1320 struct ifreq *ifr; 1321 int s, error = 0; 1322 1323 if (usbd_is_dying(sc->sc_udev)) 1324 return ENXIO; 1325 1326 usbd_ref_incr(sc->sc_udev); 1327 1328 s = splnet(); 1329 1330 switch (cmd) { 1331 case SIOCSIFADDR: 1332 ifa = (struct ifaddr *)data; 1333 ifp->if_flags |= IFF_UP; 1334 #ifdef INET 1335 if (ifa->ifa_addr->sa_family == AF_INET) 1336 arp_ifinit(&ic->ic_ac, ifa); 1337 #endif 1338 /* FALLTHROUGH */ 1339 case SIOCSIFFLAGS: 1340 if (ifp->if_flags & IFF_UP) { 1341 if (ifp->if_flags & IFF_RUNNING) 1342 ural_update_promisc(sc); 1343 else 1344 ural_init(ifp); 1345 } else { 1346 if (ifp->if_flags & IFF_RUNNING) 1347 ural_stop(ifp, 1); 1348 } 1349 break; 1350 1351 case SIOCADDMULTI: 1352 case SIOCDELMULTI: 1353 ifr = (struct ifreq *)data; 1354 error = (cmd == SIOCADDMULTI) ? 1355 ether_addmulti(ifr, &ic->ic_ac) : 1356 ether_delmulti(ifr, &ic->ic_ac); 1357 1358 if (error == ENETRESET) 1359 error = 0; 1360 break; 1361 1362 case SIOCS80211CHANNEL: 1363 /* 1364 * This allows for fast channel switching in monitor mode 1365 * (used by kismet). In IBSS mode, we must explicitly reset 1366 * the interface to generate a new beacon frame. 1367 */ 1368 error = ieee80211_ioctl(ifp, cmd, data); 1369 if (error == ENETRESET && 1370 ic->ic_opmode == IEEE80211_M_MONITOR) { 1371 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1372 (IFF_UP | IFF_RUNNING)) 1373 ural_set_chan(sc, ic->ic_ibss_chan); 1374 error = 0; 1375 } 1376 break; 1377 1378 default: 1379 error = ieee80211_ioctl(ifp, cmd, data); 1380 } 1381 1382 if (error == ENETRESET) { 1383 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1384 (IFF_UP | IFF_RUNNING)) 1385 ural_init(ifp); 1386 error = 0; 1387 } 1388 1389 splx(s); 1390 1391 usbd_ref_decr(sc->sc_udev); 1392 1393 return error; 1394 } 1395 1396 void 1397 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) 1398 { 1399 usb_device_request_t req; 1400 usbd_status error; 1401 1402 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1403 req.bRequest = RAL_READ_EEPROM; 1404 USETW(req.wValue, 0); 1405 USETW(req.wIndex, addr); 1406 USETW(req.wLength, len); 1407 1408 error = usbd_do_request(sc->sc_udev, &req, buf); 1409 if (error != 0) { 1410 printf("%s: could not read EEPROM: %s\n", 1411 sc->sc_dev.dv_xname, usbd_errstr(error)); 1412 } 1413 } 1414 1415 uint16_t 1416 ural_read(struct ural_softc *sc, uint16_t reg) 1417 { 1418 usb_device_request_t req; 1419 usbd_status error; 1420 uint16_t val; 1421 1422 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1423 req.bRequest = RAL_READ_MAC; 1424 USETW(req.wValue, 0); 1425 USETW(req.wIndex, reg); 1426 USETW(req.wLength, sizeof (uint16_t)); 1427 1428 error = usbd_do_request(sc->sc_udev, &req, &val); 1429 if (error != 0) { 1430 printf("%s: could not read MAC register: %s\n", 1431 sc->sc_dev.dv_xname, usbd_errstr(error)); 1432 return 0; 1433 } 1434 return letoh16(val); 1435 } 1436 1437 void 1438 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1439 { 1440 usb_device_request_t req; 1441 usbd_status error; 1442 1443 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1444 req.bRequest = RAL_READ_MULTI_MAC; 1445 USETW(req.wValue, 0); 1446 USETW(req.wIndex, reg); 1447 USETW(req.wLength, len); 1448 1449 error = usbd_do_request(sc->sc_udev, &req, buf); 1450 if (error != 0) { 1451 printf("%s: could not read MAC register: %s\n", 1452 sc->sc_dev.dv_xname, usbd_errstr(error)); 1453 } 1454 } 1455 1456 void 1457 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) 1458 { 1459 usb_device_request_t req; 1460 usbd_status error; 1461 1462 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1463 req.bRequest = RAL_WRITE_MAC; 1464 USETW(req.wValue, val); 1465 USETW(req.wIndex, reg); 1466 USETW(req.wLength, 0); 1467 1468 error = usbd_do_request(sc->sc_udev, &req, NULL); 1469 if (error != 0) { 1470 printf("%s: could not write MAC register: %s\n", 1471 sc->sc_dev.dv_xname, usbd_errstr(error)); 1472 } 1473 } 1474 1475 void 1476 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1477 { 1478 usb_device_request_t req; 1479 usbd_status error; 1480 1481 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1482 req.bRequest = RAL_WRITE_MULTI_MAC; 1483 USETW(req.wValue, 0); 1484 USETW(req.wIndex, reg); 1485 USETW(req.wLength, len); 1486 1487 error = usbd_do_request(sc->sc_udev, &req, buf); 1488 if (error != 0) { 1489 printf("%s: could not write MAC register: %s\n", 1490 sc->sc_dev.dv_xname, usbd_errstr(error)); 1491 } 1492 } 1493 1494 void 1495 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) 1496 { 1497 uint16_t tmp; 1498 int ntries; 1499 1500 for (ntries = 0; ntries < 5; ntries++) { 1501 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1502 break; 1503 } 1504 if (ntries == 5) { 1505 printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname); 1506 return; 1507 } 1508 1509 tmp = reg << 8 | val; 1510 ural_write(sc, RAL_PHY_CSR7, tmp); 1511 } 1512 1513 uint8_t 1514 ural_bbp_read(struct ural_softc *sc, uint8_t reg) 1515 { 1516 uint16_t val; 1517 int ntries; 1518 1519 val = RAL_BBP_WRITE | reg << 8; 1520 ural_write(sc, RAL_PHY_CSR7, val); 1521 1522 for (ntries = 0; ntries < 5; ntries++) { 1523 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1524 break; 1525 } 1526 if (ntries == 5) { 1527 printf("%s: could not read BBP\n", sc->sc_dev.dv_xname); 1528 return 0; 1529 } 1530 return ural_read(sc, RAL_PHY_CSR7) & 0xff; 1531 } 1532 1533 void 1534 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) 1535 { 1536 uint32_t tmp; 1537 int ntries; 1538 1539 for (ntries = 0; ntries < 5; ntries++) { 1540 if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) 1541 break; 1542 } 1543 if (ntries == 5) { 1544 printf("%s: could not write to RF\n", sc->sc_dev.dv_xname); 1545 return; 1546 } 1547 1548 tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); 1549 ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); 1550 ural_write(sc, RAL_PHY_CSR10, tmp >> 16); 1551 1552 /* remember last written value in sc */ 1553 sc->rf_regs[reg] = val; 1554 1555 DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff)); 1556 } 1557 1558 void 1559 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) 1560 { 1561 struct ieee80211com *ic = &sc->sc_ic; 1562 uint8_t power, tmp; 1563 u_int chan; 1564 1565 chan = ieee80211_chan2ieee(ic, c); 1566 if (chan == 0 || chan == IEEE80211_CHAN_ANY) 1567 return; 1568 1569 power = min(sc->txpow[chan - 1], 31); 1570 1571 DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power)); 1572 1573 switch (sc->rf_rev) { 1574 case RAL_RF_2522: 1575 ural_rf_write(sc, RAL_RF1, 0x00814); 1576 ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); 1577 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1578 break; 1579 1580 case RAL_RF_2523: 1581 ural_rf_write(sc, RAL_RF1, 0x08804); 1582 ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); 1583 ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); 1584 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1585 break; 1586 1587 case RAL_RF_2524: 1588 ural_rf_write(sc, RAL_RF1, 0x0c808); 1589 ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); 1590 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1591 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1592 break; 1593 1594 case RAL_RF_2525: 1595 ural_rf_write(sc, RAL_RF1, 0x08808); 1596 ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); 1597 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1598 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1599 1600 ural_rf_write(sc, RAL_RF1, 0x08808); 1601 ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); 1602 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1603 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1604 break; 1605 1606 case RAL_RF_2525E: 1607 ural_rf_write(sc, RAL_RF1, 0x08808); 1608 ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); 1609 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1610 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); 1611 break; 1612 1613 case RAL_RF_2526: 1614 ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); 1615 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1616 ural_rf_write(sc, RAL_RF1, 0x08804); 1617 1618 ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); 1619 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1620 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1621 break; 1622 } 1623 1624 if (ic->ic_opmode != IEEE80211_M_MONITOR && 1625 ic->ic_state != IEEE80211_S_SCAN) { 1626 /* set Japan filter bit for channel 14 */ 1627 tmp = ural_bbp_read(sc, 70); 1628 1629 tmp &= ~RAL_JAPAN_FILTER; 1630 if (chan == 14) 1631 tmp |= RAL_JAPAN_FILTER; 1632 1633 ural_bbp_write(sc, 70, tmp); 1634 1635 /* clear CRC errors */ 1636 ural_read(sc, RAL_STA_CSR0); 1637 1638 DELAY(1000); /* RF needs a 1ms delay here */ 1639 ural_disable_rf_tune(sc); 1640 } 1641 } 1642 1643 /* 1644 * Disable RF auto-tuning. 1645 */ 1646 void 1647 ural_disable_rf_tune(struct ural_softc *sc) 1648 { 1649 uint32_t tmp; 1650 1651 if (sc->rf_rev != RAL_RF_2523) { 1652 tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; 1653 ural_rf_write(sc, RAL_RF1, tmp); 1654 } 1655 1656 tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; 1657 ural_rf_write(sc, RAL_RF3, tmp); 1658 1659 DPRINTFN(2, ("disabling RF autotune\n")); 1660 } 1661 1662 /* 1663 * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF 1664 * synchronization. 1665 */ 1666 void 1667 ural_enable_tsf_sync(struct ural_softc *sc) 1668 { 1669 struct ieee80211com *ic = &sc->sc_ic; 1670 uint16_t logcwmin, preload, tmp; 1671 1672 /* first, disable TSF synchronization */ 1673 ural_write(sc, RAL_TXRX_CSR19, 0); 1674 1675 tmp = (16 * ic->ic_bss->ni_intval) << 4; 1676 ural_write(sc, RAL_TXRX_CSR18, tmp); 1677 1678 #ifndef IEEE80211_STA_ONLY 1679 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1680 logcwmin = 2; 1681 preload = 320; 1682 } else 1683 #endif 1684 { 1685 logcwmin = 0; 1686 preload = 6; 1687 } 1688 tmp = logcwmin << 12 | preload; 1689 ural_write(sc, RAL_TXRX_CSR20, tmp); 1690 1691 /* finally, enable TSF synchronization */ 1692 tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; 1693 if (ic->ic_opmode == IEEE80211_M_STA) 1694 tmp |= RAL_ENABLE_TSF_SYNC(1); 1695 #ifndef IEEE80211_STA_ONLY 1696 else 1697 tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; 1698 #endif 1699 ural_write(sc, RAL_TXRX_CSR19, tmp); 1700 1701 DPRINTF(("enabling TSF synchronization\n")); 1702 } 1703 1704 void 1705 ural_update_slot(struct ural_softc *sc) 1706 { 1707 struct ieee80211com *ic = &sc->sc_ic; 1708 uint16_t slottime, sifs, eifs; 1709 1710 slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; 1711 1712 /* 1713 * These settings may sound a bit inconsistent but this is what the 1714 * reference driver does. 1715 */ 1716 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1717 sifs = 16 - RAL_RXTX_TURNAROUND; 1718 eifs = 364; 1719 } else { 1720 sifs = 10 - RAL_RXTX_TURNAROUND; 1721 eifs = 64; 1722 } 1723 1724 ural_write(sc, RAL_MAC_CSR10, slottime); 1725 ural_write(sc, RAL_MAC_CSR11, sifs); 1726 ural_write(sc, RAL_MAC_CSR12, eifs); 1727 } 1728 1729 void 1730 ural_set_txpreamble(struct ural_softc *sc) 1731 { 1732 uint16_t tmp; 1733 1734 tmp = ural_read(sc, RAL_TXRX_CSR10); 1735 1736 tmp &= ~RAL_SHORT_PREAMBLE; 1737 if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE) 1738 tmp |= RAL_SHORT_PREAMBLE; 1739 1740 ural_write(sc, RAL_TXRX_CSR10, tmp); 1741 } 1742 1743 void 1744 ural_set_basicrates(struct ural_softc *sc) 1745 { 1746 struct ieee80211com *ic = &sc->sc_ic; 1747 1748 /* update basic rate set */ 1749 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1750 /* 11b basic rates: 1, 2Mbps */ 1751 ural_write(sc, RAL_TXRX_CSR11, 0x3); 1752 } else { 1753 /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ 1754 ural_write(sc, RAL_TXRX_CSR11, 0xf); 1755 } 1756 } 1757 1758 void 1759 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) 1760 { 1761 uint16_t tmp; 1762 1763 tmp = bssid[0] | bssid[1] << 8; 1764 ural_write(sc, RAL_MAC_CSR5, tmp); 1765 1766 tmp = bssid[2] | bssid[3] << 8; 1767 ural_write(sc, RAL_MAC_CSR6, tmp); 1768 1769 tmp = bssid[4] | bssid[5] << 8; 1770 ural_write(sc, RAL_MAC_CSR7, tmp); 1771 1772 DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid))); 1773 } 1774 1775 void 1776 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr) 1777 { 1778 uint16_t tmp; 1779 1780 tmp = addr[0] | addr[1] << 8; 1781 ural_write(sc, RAL_MAC_CSR2, tmp); 1782 1783 tmp = addr[2] | addr[3] << 8; 1784 ural_write(sc, RAL_MAC_CSR3, tmp); 1785 1786 tmp = addr[4] | addr[5] << 8; 1787 ural_write(sc, RAL_MAC_CSR4, tmp); 1788 1789 DPRINTF(("setting MAC address to %s\n", 1790 ether_sprintf((uint8_t *)addr))); 1791 } 1792 1793 void 1794 ural_update_promisc(struct ural_softc *sc) 1795 { 1796 struct ifnet *ifp = &sc->sc_ic.ic_if; 1797 uint16_t tmp; 1798 1799 tmp = ural_read(sc, RAL_TXRX_CSR2); 1800 1801 tmp &= ~RAL_DROP_NOT_TO_ME; 1802 if (!(ifp->if_flags & IFF_PROMISC)) 1803 tmp |= RAL_DROP_NOT_TO_ME; 1804 1805 ural_write(sc, RAL_TXRX_CSR2, tmp); 1806 1807 DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? 1808 "entering" : "leaving")); 1809 } 1810 1811 const char * 1812 ural_get_rf(int rev) 1813 { 1814 switch (rev) { 1815 case RAL_RF_2522: return "RT2522"; 1816 case RAL_RF_2523: return "RT2523"; 1817 case RAL_RF_2524: return "RT2524"; 1818 case RAL_RF_2525: return "RT2525"; 1819 case RAL_RF_2525E: return "RT2525e"; 1820 case RAL_RF_2526: return "RT2526"; 1821 case RAL_RF_5222: return "RT5222"; 1822 default: return "unknown"; 1823 } 1824 } 1825 1826 void 1827 ural_read_eeprom(struct ural_softc *sc) 1828 { 1829 struct ieee80211com *ic = &sc->sc_ic; 1830 uint16_t val; 1831 1832 /* retrieve MAC/BBP type */ 1833 ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2); 1834 sc->macbbp_rev = letoh16(val); 1835 1836 ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); 1837 val = letoh16(val); 1838 sc->rf_rev = (val >> 11) & 0x7; 1839 sc->hw_radio = (val >> 10) & 0x1; 1840 sc->led_mode = (val >> 6) & 0x7; 1841 sc->rx_ant = (val >> 4) & 0x3; 1842 sc->tx_ant = (val >> 2) & 0x3; 1843 sc->nb_ant = val & 0x3; 1844 1845 /* read MAC address */ 1846 ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6); 1847 1848 /* read default values for BBP registers */ 1849 ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); 1850 1851 /* read Tx power for all b/g channels */ 1852 ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); 1853 } 1854 1855 int 1856 ural_bbp_init(struct ural_softc *sc) 1857 { 1858 int i, ntries; 1859 1860 /* wait for BBP to be ready */ 1861 for (ntries = 0; ntries < 100; ntries++) { 1862 if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) 1863 break; 1864 DELAY(1000); 1865 } 1866 if (ntries == 100) { 1867 printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname); 1868 return EIO; 1869 } 1870 1871 /* initialize BBP registers to default values */ 1872 for (i = 0; i < nitems(ural_def_bbp); i++) 1873 ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); 1874 1875 #if 0 1876 /* initialize BBP registers to values stored in EEPROM */ 1877 for (i = 0; i < 16; i++) { 1878 if (sc->bbp_prom[i].reg == 0xff) 1879 continue; 1880 ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); 1881 } 1882 #endif 1883 1884 return 0; 1885 } 1886 1887 void 1888 ural_set_txantenna(struct ural_softc *sc, int antenna) 1889 { 1890 uint16_t tmp; 1891 uint8_t tx; 1892 1893 tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; 1894 if (antenna == 1) 1895 tx |= RAL_BBP_ANTA; 1896 else if (antenna == 2) 1897 tx |= RAL_BBP_ANTB; 1898 else 1899 tx |= RAL_BBP_DIVERSITY; 1900 1901 /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ 1902 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || 1903 sc->rf_rev == RAL_RF_5222) 1904 tx |= RAL_BBP_FLIPIQ; 1905 1906 ural_bbp_write(sc, RAL_BBP_TX, tx); 1907 1908 /* update flags in PHY_CSR5 and PHY_CSR6 too */ 1909 tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; 1910 ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); 1911 1912 tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; 1913 ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); 1914 } 1915 1916 void 1917 ural_set_rxantenna(struct ural_softc *sc, int antenna) 1918 { 1919 uint8_t rx; 1920 1921 rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; 1922 if (antenna == 1) 1923 rx |= RAL_BBP_ANTA; 1924 else if (antenna == 2) 1925 rx |= RAL_BBP_ANTB; 1926 else 1927 rx |= RAL_BBP_DIVERSITY; 1928 1929 /* need to force no I/Q flip for RF 2525e and 2526 */ 1930 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) 1931 rx &= ~RAL_BBP_FLIPIQ; 1932 1933 ural_bbp_write(sc, RAL_BBP_RX, rx); 1934 } 1935 1936 int 1937 ural_init(struct ifnet *ifp) 1938 { 1939 struct ural_softc *sc = ifp->if_softc; 1940 struct ieee80211com *ic = &sc->sc_ic; 1941 uint16_t tmp; 1942 usbd_status error; 1943 int i, ntries; 1944 1945 ural_stop(ifp, 0); 1946 1947 /* initialize MAC registers to default values */ 1948 for (i = 0; i < nitems(ural_def_mac); i++) 1949 ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); 1950 1951 /* wait for BBP and RF to wake up (this can take a long time!) */ 1952 for (ntries = 0; ntries < 100; ntries++) { 1953 tmp = ural_read(sc, RAL_MAC_CSR17); 1954 if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == 1955 (RAL_BBP_AWAKE | RAL_RF_AWAKE)) 1956 break; 1957 DELAY(1000); 1958 } 1959 if (ntries == 100) { 1960 printf("%s: timeout waiting for BBP/RF to wakeup\n", 1961 sc->sc_dev.dv_xname); 1962 error = EIO; 1963 goto fail; 1964 } 1965 1966 /* we're ready! */ 1967 ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); 1968 1969 /* set basic rate set (will be updated later) */ 1970 ural_write(sc, RAL_TXRX_CSR11, 0x153); 1971 1972 error = ural_bbp_init(sc); 1973 if (error != 0) 1974 goto fail; 1975 1976 /* set default BSS channel */ 1977 ic->ic_bss->ni_chan = ic->ic_ibss_chan; 1978 ural_set_chan(sc, ic->ic_bss->ni_chan); 1979 1980 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 1981 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 1982 1983 /* set default sensitivity */ 1984 ural_bbp_write(sc, 17, 0x48); 1985 1986 ural_set_txantenna(sc, 1); 1987 ural_set_rxantenna(sc, 1); 1988 1989 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 1990 ural_set_macaddr(sc, ic->ic_myaddr); 1991 1992 /* 1993 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31). 1994 */ 1995 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1996 struct ieee80211_key *k = &ic->ic_nw_keys[i]; 1997 ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE, 1998 k->k_key, IEEE80211_KEYBUF_SIZE); 1999 } 2000 2001 /* 2002 * Allocate xfer for AMRR statistics requests. 2003 */ 2004 sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev); 2005 if (sc->amrr_xfer == NULL) { 2006 printf("%s: could not allocate AMRR xfer\n", 2007 sc->sc_dev.dv_xname); 2008 goto fail; 2009 } 2010 2011 /* 2012 * Open Tx and Rx USB bulk pipes. 2013 */ 2014 error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE, 2015 &sc->sc_tx_pipeh); 2016 if (error != 0) { 2017 printf("%s: could not open Tx pipe: %s\n", 2018 sc->sc_dev.dv_xname, usbd_errstr(error)); 2019 goto fail; 2020 } 2021 error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE, 2022 &sc->sc_rx_pipeh); 2023 if (error != 0) { 2024 printf("%s: could not open Rx pipe: %s\n", 2025 sc->sc_dev.dv_xname, usbd_errstr(error)); 2026 goto fail; 2027 } 2028 2029 /* 2030 * Allocate Tx and Rx xfer queues. 2031 */ 2032 error = ural_alloc_tx_list(sc); 2033 if (error != 0) { 2034 printf("%s: could not allocate Tx list\n", 2035 sc->sc_dev.dv_xname); 2036 goto fail; 2037 } 2038 error = ural_alloc_rx_list(sc); 2039 if (error != 0) { 2040 printf("%s: could not allocate Rx list\n", 2041 sc->sc_dev.dv_xname); 2042 goto fail; 2043 } 2044 2045 /* 2046 * Start up the receive pipe. 2047 */ 2048 for (i = 0; i < RAL_RX_LIST_COUNT; i++) { 2049 struct ural_rx_data *data = &sc->rx_data[i]; 2050 2051 usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf, 2052 MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); 2053 error = usbd_transfer(data->xfer); 2054 if (error != 0 && error != USBD_IN_PROGRESS) { 2055 printf("%s: could not queue Rx transfer\n", 2056 sc->sc_dev.dv_xname); 2057 goto fail; 2058 } 2059 } 2060 2061 /* kick Rx */ 2062 tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR; 2063 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2064 tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR; 2065 #ifndef IEEE80211_STA_ONLY 2066 if (ic->ic_opmode != IEEE80211_M_HOSTAP) 2067 #endif 2068 tmp |= RAL_DROP_TODS; 2069 if (!(ifp->if_flags & IFF_PROMISC)) 2070 tmp |= RAL_DROP_NOT_TO_ME; 2071 } 2072 ural_write(sc, RAL_TXRX_CSR2, tmp); 2073 2074 ifp->if_flags &= ~IFF_OACTIVE; 2075 ifp->if_flags |= IFF_RUNNING; 2076 2077 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2078 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 2079 else 2080 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 2081 2082 return 0; 2083 2084 fail: ural_stop(ifp, 1); 2085 return error; 2086 } 2087 2088 void 2089 ural_stop(struct ifnet *ifp, int disable) 2090 { 2091 struct ural_softc *sc = ifp->if_softc; 2092 struct ieee80211com *ic = &sc->sc_ic; 2093 2094 sc->sc_tx_timer = 0; 2095 ifp->if_timer = 0; 2096 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2097 2098 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); /* free all nodes */ 2099 2100 /* disable Rx */ 2101 ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); 2102 2103 /* reset ASIC and BBP (but won't reset MAC registers!) */ 2104 ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); 2105 ural_write(sc, RAL_MAC_CSR1, 0); 2106 2107 if (sc->amrr_xfer != NULL) { 2108 usbd_free_xfer(sc->amrr_xfer); 2109 sc->amrr_xfer = NULL; 2110 } 2111 if (sc->sc_rx_pipeh != NULL) { 2112 usbd_abort_pipe(sc->sc_rx_pipeh); 2113 usbd_close_pipe(sc->sc_rx_pipeh); 2114 sc->sc_rx_pipeh = NULL; 2115 } 2116 if (sc->sc_tx_pipeh != NULL) { 2117 usbd_abort_pipe(sc->sc_tx_pipeh); 2118 usbd_close_pipe(sc->sc_tx_pipeh); 2119 sc->sc_tx_pipeh = NULL; 2120 } 2121 2122 ural_free_rx_list(sc); 2123 ural_free_tx_list(sc); 2124 } 2125 2126 void 2127 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 2128 { 2129 /* start with lowest Tx rate */ 2130 ni->ni_txrate = 0; 2131 } 2132 2133 void 2134 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni) 2135 { 2136 int i; 2137 2138 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 2139 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 2140 2141 ieee80211_amrr_node_init(&sc->amrr, &sc->amn); 2142 2143 /* set rate to some reasonable initial value */ 2144 for (i = ni->ni_rates.rs_nrates - 1; 2145 i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72; 2146 i--); 2147 ni->ni_txrate = i; 2148 2149 if (!usbd_is_dying(sc->sc_udev)) 2150 timeout_add_sec(&sc->amrr_to, 1); 2151 } 2152 2153 void 2154 ural_amrr_timeout(void *arg) 2155 { 2156 struct ural_softc *sc = arg; 2157 usb_device_request_t req; 2158 int s; 2159 2160 if (usbd_is_dying(sc->sc_udev)) 2161 return; 2162 2163 usbd_ref_incr(sc->sc_udev); 2164 2165 s = splusb(); 2166 2167 /* 2168 * Asynchronously read statistic registers (cleared by read). 2169 */ 2170 req.bmRequestType = UT_READ_VENDOR_DEVICE; 2171 req.bRequest = RAL_READ_MULTI_MAC; 2172 USETW(req.wValue, 0); 2173 USETW(req.wIndex, RAL_STA_CSR0); 2174 USETW(req.wLength, sizeof sc->sta); 2175 2176 usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc, 2177 USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0, 2178 ural_amrr_update); 2179 (void)usbd_transfer(sc->amrr_xfer); 2180 2181 splx(s); 2182 2183 usbd_ref_decr(sc->sc_udev); 2184 } 2185 2186 void 2187 ural_amrr_update(struct usbd_xfer *xfer, void *priv, 2188 usbd_status status) 2189 { 2190 struct ural_softc *sc = (struct ural_softc *)priv; 2191 struct ifnet *ifp = &sc->sc_ic.ic_if; 2192 2193 if (status != USBD_NORMAL_COMPLETION) { 2194 printf("%s: could not retrieve Tx statistics - cancelling " 2195 "automatic rate control\n", sc->sc_dev.dv_xname); 2196 return; 2197 } 2198 2199 /* count TX retry-fail as Tx errors */ 2200 ifp->if_oerrors += letoh16(sc->sta[9]); 2201 2202 sc->amn.amn_retrycnt = 2203 letoh16(sc->sta[7]) + /* TX one-retry ok count */ 2204 letoh16(sc->sta[8]) + /* TX more-retry ok count */ 2205 letoh16(sc->sta[9]); /* TX retry-fail count */ 2206 2207 sc->amn.amn_txcnt = 2208 sc->amn.amn_retrycnt + 2209 letoh16(sc->sta[6]); /* TX no-retry ok count */ 2210 2211 ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn); 2212 2213 if (!usbd_is_dying(sc->sc_udev)) 2214 timeout_add_sec(&sc->amrr_to, 1); 2215 } 2216