xref: /openbsd-src/sys/dev/usb/if_ral.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_ral.c,v 1.146 2019/04/25 01:52:14 kevlo Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005, 2006
5  *	Damien Bergamini <damien.bergamini@free.fr>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Ralink Technology RT2500USB chipset driver
22  * http://www.ralinktech.com.tw/
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/timeout.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 
38 #include <machine/intr.h>
39 
40 #if NBPFILTER > 0
41 #include <net/bpf.h>
42 #endif
43 #include <net/if.h>
44 #include <net/if_dl.h>
45 #include <net/if_media.h>
46 
47 #include <netinet/in.h>
48 #include <netinet/if_ether.h>
49 
50 #include <net80211/ieee80211_var.h>
51 #include <net80211/ieee80211_amrr.h>
52 #include <net80211/ieee80211_radiotap.h>
53 
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdi_util.h>
57 #include <dev/usb/usbdevs.h>
58 
59 #include <dev/usb/if_ralreg.h>
60 #include <dev/usb/if_ralvar.h>
61 
62 #ifdef URAL_DEBUG
63 #define DPRINTF(x)	do { if (ural_debug) printf x; } while (0)
64 #define DPRINTFN(n, x)	do { if (ural_debug >= (n)) printf x; } while (0)
65 int ural_debug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n, x)
69 #endif
70 
71 /* various supported device vendors/products */
72 static const struct usb_devno ural_devs[] = {
73 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570 },
74 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570_2 },
75 	{ USB_VENDOR_BELKIN,		USB_PRODUCT_BELKIN_F5D7050 },
76 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54G },
77 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54GP },
78 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_HU200TS },
79 	{ USB_VENDOR_CONCEPTRONIC2,	USB_PRODUCT_CONCEPTRONIC2_C54RU },
80 	{ USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_RT2570 },
81 	{ USB_VENDOR_GIGABYTE,		USB_PRODUCT_GIGABYTE_GNWBKG },
82 	{ USB_VENDOR_GUILLEMOT,		USB_PRODUCT_GUILLEMOT_HWGUSB254 },
83 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54 },
84 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54AI },
85 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54YB },
86 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_NINWIFI },
87 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570 },
88 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_2 },
89 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_3 },
90 	{ USB_VENDOR_NOVATECH,		USB_PRODUCT_NOVATECH_NV902W },
91 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570 },
92 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_2 },
93 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_3 },
94 	{ USB_VENDOR_SPHAIRON,		USB_PRODUCT_SPHAIRON_UB801R },
95 	{ USB_VENDOR_SURECOM,		USB_PRODUCT_SURECOM_RT2570 },
96 	{ USB_VENDOR_VTECH,		USB_PRODUCT_VTECH_RT2570 },
97 	{ USB_VENDOR_ZINWELL,		USB_PRODUCT_ZINWELL_RT2570 }
98 };
99 
100 int		ural_alloc_tx_list(struct ural_softc *);
101 void		ural_free_tx_list(struct ural_softc *);
102 int		ural_alloc_rx_list(struct ural_softc *);
103 void		ural_free_rx_list(struct ural_softc *);
104 int		ural_media_change(struct ifnet *);
105 void		ural_next_scan(void *);
106 void		ural_task(void *);
107 int		ural_newstate(struct ieee80211com *, enum ieee80211_state,
108 		    int);
109 void		ural_txeof(struct usbd_xfer *, void *, usbd_status);
110 void		ural_rxeof(struct usbd_xfer *, void *, usbd_status);
111 #if NBPFILTER > 0
112 uint8_t		ural_rxrate(const struct ural_rx_desc *);
113 #endif
114 int		ural_ack_rate(struct ieee80211com *, int);
115 uint16_t	ural_txtime(int, int, uint32_t);
116 uint8_t		ural_plcp_signal(int);
117 void		ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *,
118 		    uint32_t, int, int);
119 #ifndef IEEE80211_STA_ONLY
120 int		ural_tx_bcn(struct ural_softc *, struct mbuf *,
121 		    struct ieee80211_node *);
122 #endif
123 int		ural_tx_data(struct ural_softc *, struct mbuf *,
124 		    struct ieee80211_node *);
125 void		ural_start(struct ifnet *);
126 void		ural_watchdog(struct ifnet *);
127 int		ural_ioctl(struct ifnet *, u_long, caddr_t);
128 void		ural_eeprom_read(struct ural_softc *, uint16_t, void *, int);
129 uint16_t	ural_read(struct ural_softc *, uint16_t);
130 void		ural_read_multi(struct ural_softc *, uint16_t, void *, int);
131 void		ural_write(struct ural_softc *, uint16_t, uint16_t);
132 void		ural_write_multi(struct ural_softc *, uint16_t, void *, int);
133 void		ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
134 uint8_t		ural_bbp_read(struct ural_softc *, uint8_t);
135 void		ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
136 void		ural_set_chan(struct ural_softc *, struct ieee80211_channel *);
137 void		ural_disable_rf_tune(struct ural_softc *);
138 void		ural_enable_tsf_sync(struct ural_softc *);
139 void		ural_update_slot(struct ural_softc *);
140 void		ural_set_txpreamble(struct ural_softc *);
141 void		ural_set_basicrates(struct ural_softc *);
142 void		ural_set_bssid(struct ural_softc *, const uint8_t *);
143 void		ural_set_macaddr(struct ural_softc *, const uint8_t *);
144 void		ural_update_promisc(struct ural_softc *);
145 const char	*ural_get_rf(int);
146 void		ural_read_eeprom(struct ural_softc *);
147 int		ural_bbp_init(struct ural_softc *);
148 void		ural_set_txantenna(struct ural_softc *, int);
149 void		ural_set_rxantenna(struct ural_softc *, int);
150 int		ural_init(struct ifnet *);
151 void		ural_stop(struct ifnet *, int);
152 void		ural_newassoc(struct ieee80211com *, struct ieee80211_node *,
153 		    int);
154 void		ural_amrr_start(struct ural_softc *, struct ieee80211_node *);
155 void		ural_amrr_timeout(void *);
156 void		ural_amrr_update(struct usbd_xfer *, void *,
157 		    usbd_status status);
158 
159 static const struct {
160 	uint16_t	reg;
161 	uint16_t	val;
162 } ural_def_mac[] = {
163 	RAL_DEF_MAC
164 };
165 
166 static const struct {
167 	uint8_t	reg;
168 	uint8_t	val;
169 } ural_def_bbp[] = {
170 	RAL_DEF_BBP
171 };
172 
173 static const uint32_t ural_rf2522_r2[] =    RAL_RF2522_R2;
174 static const uint32_t ural_rf2523_r2[] =    RAL_RF2523_R2;
175 static const uint32_t ural_rf2524_r2[] =    RAL_RF2524_R2;
176 static const uint32_t ural_rf2525_r2[] =    RAL_RF2525_R2;
177 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2;
178 static const uint32_t ural_rf2525e_r2[] =   RAL_RF2525E_R2;
179 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2;
180 static const uint32_t ural_rf2526_r2[] =    RAL_RF2526_R2;
181 
182 int ural_match(struct device *, void *, void *);
183 void ural_attach(struct device *, struct device *, void *);
184 int ural_detach(struct device *, int);
185 
186 struct cfdriver ural_cd = {
187 	NULL, "ural", DV_IFNET
188 };
189 
190 const struct cfattach ural_ca = {
191 	sizeof(struct ural_softc), ural_match, ural_attach, ural_detach
192 };
193 
194 int
195 ural_match(struct device *parent, void *match, void *aux)
196 {
197 	struct usb_attach_arg *uaa = aux;
198 
199 	if (uaa->configno != RAL_CONFIG_NO || uaa->ifaceno != RAL_IFACE_NO)
200 		return UMATCH_NONE;
201 
202 	return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ?
203 	    UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
204 }
205 
206 void
207 ural_attach(struct device *parent, struct device *self, void *aux)
208 {
209 	struct ural_softc *sc = (struct ural_softc *)self;
210 	struct usb_attach_arg *uaa = aux;
211 	struct ieee80211com *ic = &sc->sc_ic;
212 	struct ifnet *ifp = &ic->ic_if;
213 	usb_interface_descriptor_t *id;
214 	usb_endpoint_descriptor_t *ed;
215 	int i;
216 
217 	sc->sc_udev = uaa->device;
218 	sc->sc_iface = uaa->iface;
219 
220 	/*
221 	 * Find endpoints.
222 	 */
223 	id = usbd_get_interface_descriptor(sc->sc_iface);
224 
225 	sc->sc_rx_no = sc->sc_tx_no = -1;
226 	for (i = 0; i < id->bNumEndpoints; i++) {
227 		ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
228 		if (ed == NULL) {
229 			printf("%s: no endpoint descriptor for iface %d\n",
230 			    sc->sc_dev.dv_xname, i);
231 			return;
232 		}
233 
234 		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
235 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
236 			sc->sc_rx_no = ed->bEndpointAddress;
237 		else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
238 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
239 			sc->sc_tx_no = ed->bEndpointAddress;
240 	}
241 	if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) {
242 		printf("%s: missing endpoint\n", sc->sc_dev.dv_xname);
243 		return;
244 	}
245 
246 	usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC);
247 	timeout_set(&sc->scan_to, ural_next_scan, sc);
248 
249 	sc->amrr.amrr_min_success_threshold =  1;
250 	sc->amrr.amrr_max_success_threshold = 10;
251 	timeout_set(&sc->amrr_to, ural_amrr_timeout, sc);
252 
253 	/* retrieve RT2570 rev. no */
254 	sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
255 
256 	/* retrieve MAC address and various other things from EEPROM */
257 	ural_read_eeprom(sc);
258 
259 	printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n",
260 	    sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev,
261 	    ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr));
262 
263 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
264 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
265 	ic->ic_state = IEEE80211_S_INIT;
266 
267 	/* set device capabilities */
268 	ic->ic_caps =
269 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
270 #ifndef IEEE80211_STA_ONLY
271 	    IEEE80211_C_IBSS |		/* IBSS mode supported */
272 	    IEEE80211_C_HOSTAP |	/* HostAp mode supported */
273 #endif
274 	    IEEE80211_C_TXPMGT |	/* tx power management */
275 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
276 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
277 	    IEEE80211_C_WEP |		/* s/w WEP */
278 	    IEEE80211_C_RSN;		/* WPA/RSN */
279 
280 	/* set supported .11b and .11g rates */
281 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
282 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
283 
284 	/* set supported .11b and .11g channels (1 through 14) */
285 	for (i = 1; i <= 14; i++) {
286 		ic->ic_channels[i].ic_freq =
287 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
288 		ic->ic_channels[i].ic_flags =
289 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
290 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
291 	}
292 
293 	ifp->if_softc = sc;
294 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 	ifp->if_ioctl = ural_ioctl;
296 	ifp->if_start = ural_start;
297 	ifp->if_watchdog = ural_watchdog;
298 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
299 
300 	if_attach(ifp);
301 	ieee80211_ifattach(ifp);
302 	ic->ic_newassoc = ural_newassoc;
303 
304 	/* override state transition machine */
305 	sc->sc_newstate = ic->ic_newstate;
306 	ic->ic_newstate = ural_newstate;
307 	ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status);
308 
309 #if NBPFILTER > 0
310 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
311 	    sizeof (struct ieee80211_frame) + 64);
312 
313 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
314 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
315 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT);
316 
317 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
318 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
319 	sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT);
320 #endif
321 }
322 
323 int
324 ural_detach(struct device *self, int flags)
325 {
326 	struct ural_softc *sc = (struct ural_softc *)self;
327 	struct ifnet *ifp = &sc->sc_ic.ic_if;
328 	int s;
329 
330 	s = splusb();
331 
332 	if (timeout_initialized(&sc->scan_to))
333 		timeout_del(&sc->scan_to);
334 	if (timeout_initialized(&sc->amrr_to))
335 		timeout_del(&sc->amrr_to);
336 
337 	usb_rem_wait_task(sc->sc_udev, &sc->sc_task);
338 
339 	usbd_ref_wait(sc->sc_udev);
340 
341 	if (ifp->if_softc != NULL) {
342 		ieee80211_ifdetach(ifp);	/* free all nodes */
343 		if_detach(ifp);
344 	}
345 
346 	if (sc->amrr_xfer != NULL) {
347 		usbd_free_xfer(sc->amrr_xfer);
348 		sc->amrr_xfer = NULL;
349 	}
350 
351 	if (sc->sc_rx_pipeh != NULL) {
352 		usbd_abort_pipe(sc->sc_rx_pipeh);
353 		usbd_close_pipe(sc->sc_rx_pipeh);
354 	}
355 
356 	if (sc->sc_tx_pipeh != NULL) {
357 		usbd_abort_pipe(sc->sc_tx_pipeh);
358 		usbd_close_pipe(sc->sc_tx_pipeh);
359 	}
360 
361 	ural_free_rx_list(sc);
362 	ural_free_tx_list(sc);
363 
364 	splx(s);
365 
366 	return 0;
367 }
368 
369 int
370 ural_alloc_tx_list(struct ural_softc *sc)
371 {
372 	int i, error;
373 
374 	sc->tx_cur = sc->tx_queued = 0;
375 
376 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
377 		struct ural_tx_data *data = &sc->tx_data[i];
378 
379 		data->sc = sc;
380 
381 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
382 		if (data->xfer == NULL) {
383 			printf("%s: could not allocate tx xfer\n",
384 			    sc->sc_dev.dv_xname);
385 			error = ENOMEM;
386 			goto fail;
387 		}
388 		data->buf = usbd_alloc_buffer(data->xfer,
389 		    RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN);
390 		if (data->buf == NULL) {
391 			printf("%s: could not allocate tx buffer\n",
392 			    sc->sc_dev.dv_xname);
393 			error = ENOMEM;
394 			goto fail;
395 		}
396 	}
397 
398 	return 0;
399 
400 fail:	ural_free_tx_list(sc);
401 	return error;
402 }
403 
404 void
405 ural_free_tx_list(struct ural_softc *sc)
406 {
407 	int i;
408 
409 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
410 		struct ural_tx_data *data = &sc->tx_data[i];
411 
412 		if (data->xfer != NULL) {
413 			usbd_free_xfer(data->xfer);
414 			data->xfer = NULL;
415 		}
416 		/*
417 		 * The node has already been freed at that point so don't call
418 		 * ieee80211_release_node() here.
419 		 */
420 		data->ni = NULL;
421 	}
422 }
423 
424 int
425 ural_alloc_rx_list(struct ural_softc *sc)
426 {
427 	int i, error;
428 
429 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
430 		struct ural_rx_data *data = &sc->rx_data[i];
431 
432 		data->sc = sc;
433 
434 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
435 		if (data->xfer == NULL) {
436 			printf("%s: could not allocate rx xfer\n",
437 			    sc->sc_dev.dv_xname);
438 			error = ENOMEM;
439 			goto fail;
440 		}
441 		if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) {
442 			printf("%s: could not allocate rx buffer\n",
443 			    sc->sc_dev.dv_xname);
444 			error = ENOMEM;
445 			goto fail;
446 		}
447 
448 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
449 		if (data->m == NULL) {
450 			printf("%s: could not allocate rx mbuf\n",
451 			    sc->sc_dev.dv_xname);
452 			error = ENOMEM;
453 			goto fail;
454 		}
455 		MCLGET(data->m, M_DONTWAIT);
456 		if (!(data->m->m_flags & M_EXT)) {
457 			printf("%s: could not allocate rx mbuf cluster\n",
458 			    sc->sc_dev.dv_xname);
459 			error = ENOMEM;
460 			goto fail;
461 		}
462 		data->buf = mtod(data->m, uint8_t *);
463 	}
464 
465 	return 0;
466 
467 fail:	ural_free_rx_list(sc);
468 	return error;
469 }
470 
471 void
472 ural_free_rx_list(struct ural_softc *sc)
473 {
474 	int i;
475 
476 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
477 		struct ural_rx_data *data = &sc->rx_data[i];
478 
479 		if (data->xfer != NULL) {
480 			usbd_free_xfer(data->xfer);
481 			data->xfer = NULL;
482 		}
483 		if (data->m != NULL) {
484 			m_freem(data->m);
485 			data->m = NULL;
486 		}
487 	}
488 }
489 
490 int
491 ural_media_change(struct ifnet *ifp)
492 {
493 	int error;
494 
495 	error = ieee80211_media_change(ifp);
496 	if (error != ENETRESET)
497 		return error;
498 
499 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
500 		error = ural_init(ifp);
501 
502 	return error;
503 }
504 
505 /*
506  * This function is called periodically (every 200ms) during scanning to
507  * switch from one channel to another.
508  */
509 void
510 ural_next_scan(void *arg)
511 {
512 	struct ural_softc *sc = arg;
513 	struct ieee80211com *ic = &sc->sc_ic;
514 	struct ifnet *ifp = &ic->ic_if;
515 
516 	if (usbd_is_dying(sc->sc_udev))
517 		return;
518 
519 	usbd_ref_incr(sc->sc_udev);
520 
521 	if (ic->ic_state == IEEE80211_S_SCAN)
522 		ieee80211_next_scan(ifp);
523 
524 	usbd_ref_decr(sc->sc_udev);
525 }
526 
527 void
528 ural_task(void *arg)
529 {
530 	struct ural_softc *sc = arg;
531 	struct ieee80211com *ic = &sc->sc_ic;
532 	enum ieee80211_state ostate;
533 	struct ieee80211_node *ni;
534 
535 	if (usbd_is_dying(sc->sc_udev))
536 		return;
537 
538 	ostate = ic->ic_state;
539 
540 	switch (sc->sc_state) {
541 	case IEEE80211_S_INIT:
542 		if (ostate == IEEE80211_S_RUN) {
543 			/* abort TSF synchronization */
544 			ural_write(sc, RAL_TXRX_CSR19, 0);
545 
546 			/* force tx led to stop blinking */
547 			ural_write(sc, RAL_MAC_CSR20, 0);
548 		}
549 		break;
550 
551 	case IEEE80211_S_SCAN:
552 		ural_set_chan(sc, ic->ic_bss->ni_chan);
553 		if (!usbd_is_dying(sc->sc_udev))
554 			timeout_add_msec(&sc->scan_to, 200);
555 		break;
556 
557 	case IEEE80211_S_AUTH:
558 		ural_set_chan(sc, ic->ic_bss->ni_chan);
559 		break;
560 
561 	case IEEE80211_S_ASSOC:
562 		ural_set_chan(sc, ic->ic_bss->ni_chan);
563 		break;
564 
565 	case IEEE80211_S_RUN:
566 		ural_set_chan(sc, ic->ic_bss->ni_chan);
567 
568 		ni = ic->ic_bss;
569 
570 		if (ic->ic_opmode != IEEE80211_M_MONITOR) {
571 			ural_update_slot(sc);
572 			ural_set_txpreamble(sc);
573 			ural_set_basicrates(sc);
574 			ural_set_bssid(sc, ni->ni_bssid);
575 		}
576 
577 #ifndef IEEE80211_STA_ONLY
578 		if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
579 		    ic->ic_opmode == IEEE80211_M_IBSS) {
580 			struct mbuf *m = ieee80211_beacon_alloc(ic, ni);
581 			if (m == NULL) {
582 				printf("%s: could not allocate beacon\n",
583 				    sc->sc_dev.dv_xname);
584 				return;
585 			}
586 
587 			if (ural_tx_bcn(sc, m, ni) != 0) {
588 				m_freem(m);
589 				printf("%s: could not transmit beacon\n",
590 				    sc->sc_dev.dv_xname);
591 				return;
592 			}
593 
594 			/* beacon is no longer needed */
595 			m_freem(m);
596 		}
597 #endif
598 
599 		/* make tx led blink on tx (controlled by ASIC) */
600 		ural_write(sc, RAL_MAC_CSR20, 1);
601 
602 		if (ic->ic_opmode != IEEE80211_M_MONITOR)
603 			ural_enable_tsf_sync(sc);
604 
605 		if (ic->ic_opmode == IEEE80211_M_STA) {
606 			/* fake a join to init the tx rate */
607 			ural_newassoc(ic, ic->ic_bss, 1);
608 
609 			/* enable automatic rate control in STA mode */
610 			if (ic->ic_fixed_rate == -1)
611 				ural_amrr_start(sc, ic->ic_bss);
612 		}
613 
614 		break;
615 	}
616 
617 	sc->sc_newstate(ic, sc->sc_state, sc->sc_arg);
618 }
619 
620 int
621 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
622 {
623 	struct ural_softc *sc = ic->ic_if.if_softc;
624 
625 	usb_rem_task(sc->sc_udev, &sc->sc_task);
626 	timeout_del(&sc->scan_to);
627 	timeout_del(&sc->amrr_to);
628 
629 	/* do it in a process context */
630 	sc->sc_state = nstate;
631 	sc->sc_arg = arg;
632 	usb_add_task(sc->sc_udev, &sc->sc_task);
633 	return 0;
634 }
635 
636 /* quickly determine if a given rate is CCK or OFDM */
637 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
638 
639 #define RAL_ACK_SIZE	14	/* 10 + 4(FCS) */
640 #define RAL_CTS_SIZE	14	/* 10 + 4(FCS) */
641 
642 #define RAL_SIFS		10	/* us */
643 
644 #define RAL_RXTX_TURNAROUND	5	/* us */
645 
646 void
647 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
648 {
649 	struct ural_tx_data *data = priv;
650 	struct ural_softc *sc = data->sc;
651 	struct ieee80211com *ic = &sc->sc_ic;
652 	struct ifnet *ifp = &ic->ic_if;
653 	int s;
654 
655 	if (status != USBD_NORMAL_COMPLETION) {
656 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
657 			return;
658 
659 		printf("%s: could not transmit buffer: %s\n",
660 		    sc->sc_dev.dv_xname, usbd_errstr(status));
661 
662 		if (status == USBD_STALLED)
663 			usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh);
664 
665 		ifp->if_oerrors++;
666 		return;
667 	}
668 
669 	s = splnet();
670 
671 	ieee80211_release_node(ic, data->ni);
672 	data->ni = NULL;
673 
674 	sc->tx_queued--;
675 
676 	DPRINTFN(10, ("tx done\n"));
677 
678 	sc->sc_tx_timer = 0;
679 	ifq_clr_oactive(&ifp->if_snd);
680 	ural_start(ifp);
681 
682 	splx(s);
683 }
684 
685 void
686 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
687 {
688 	struct ural_rx_data *data = priv;
689 	struct ural_softc *sc = data->sc;
690 	struct ieee80211com *ic = &sc->sc_ic;
691 	struct ifnet *ifp = &ic->ic_if;
692 	const struct ural_rx_desc *desc;
693 	struct ieee80211_frame *wh;
694 	struct ieee80211_rxinfo rxi;
695 	struct ieee80211_node *ni;
696 	struct mbuf *mnew, *m;
697 	int s, len;
698 
699 	if (status != USBD_NORMAL_COMPLETION) {
700 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
701 			return;
702 
703 		if (status == USBD_STALLED)
704 			usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh);
705 		goto skip;
706 	}
707 
708 	usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
709 
710 	if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) {
711 		DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname,
712 		    len));
713 		ifp->if_ierrors++;
714 		goto skip;
715 	}
716 
717 	/* rx descriptor is located at the end */
718 	desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE);
719 
720 	if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
721 		/*
722 		 * This should not happen since we did not request to receive
723 		 * those frames when we filled RAL_TXRX_CSR2.
724 		 */
725 		DPRINTFN(5, ("PHY or CRC error\n"));
726 		ifp->if_ierrors++;
727 		goto skip;
728 	}
729 
730 	MGETHDR(mnew, M_DONTWAIT, MT_DATA);
731 	if (mnew == NULL) {
732 		printf("%s: could not allocate rx mbuf\n",
733 		    sc->sc_dev.dv_xname);
734 		ifp->if_ierrors++;
735 		goto skip;
736 	}
737 	MCLGET(mnew, M_DONTWAIT);
738 	if (!(mnew->m_flags & M_EXT)) {
739 		printf("%s: could not allocate rx mbuf cluster\n",
740 		    sc->sc_dev.dv_xname);
741 		m_freem(mnew);
742 		ifp->if_ierrors++;
743 		goto skip;
744 	}
745 	m = data->m;
746 	data->m = mnew;
747 	data->buf = mtod(data->m, uint8_t *);
748 
749 	/* finalize mbuf */
750 	m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff;
751 
752 	s = splnet();
753 
754 #if NBPFILTER > 0
755 	if (sc->sc_drvbpf != NULL) {
756 		struct mbuf mb;
757 		struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
758 
759 		tap->wr_flags = IEEE80211_RADIOTAP_F_FCS;
760 		tap->wr_rate = ural_rxrate(desc);
761 		tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
762 		tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
763 		tap->wr_antenna = sc->rx_ant;
764 		tap->wr_antsignal = desc->rssi;
765 
766 		mb.m_data = (caddr_t)tap;
767 		mb.m_len = sc->sc_rxtap_len;
768 		mb.m_next = m;
769 		mb.m_nextpkt = NULL;
770 		mb.m_type = 0;
771 		mb.m_flags = 0;
772 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
773 	}
774 #endif
775 	m_adj(m, -IEEE80211_CRC_LEN);	/* trim FCS */
776 
777 	wh = mtod(m, struct ieee80211_frame *);
778 	ni = ieee80211_find_rxnode(ic, wh);
779 
780 	/* send the frame to the 802.11 layer */
781 	rxi.rxi_flags = 0;
782 	rxi.rxi_rssi = desc->rssi;
783 	rxi.rxi_tstamp = 0;	/* unused */
784 	ieee80211_input(ifp, m, ni, &rxi);
785 
786 	/* node is no longer needed */
787 	ieee80211_release_node(ic, ni);
788 
789 	splx(s);
790 
791 	DPRINTFN(15, ("rx done\n"));
792 
793 skip:	/* setup a new transfer */
794 	usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES,
795 	    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
796 	(void)usbd_transfer(xfer);
797 }
798 
799 /*
800  * This function is only used by the Rx radiotap code. It returns the rate at
801  * which a given frame was received.
802  */
803 #if NBPFILTER > 0
804 uint8_t
805 ural_rxrate(const struct ural_rx_desc *desc)
806 {
807 	if (letoh32(desc->flags) & RAL_RX_OFDM) {
808 		/* reverse function of ural_plcp_signal */
809 		switch (desc->rate) {
810 		case 0xb:	return 12;
811 		case 0xf:	return 18;
812 		case 0xa:	return 24;
813 		case 0xe:	return 36;
814 		case 0x9:	return 48;
815 		case 0xd:	return 72;
816 		case 0x8:	return 96;
817 		case 0xc:	return 108;
818 		}
819 	} else {
820 		if (desc->rate == 10)
821 			return 2;
822 		if (desc->rate == 20)
823 			return 4;
824 		if (desc->rate == 55)
825 			return 11;
826 		if (desc->rate == 110)
827 			return 22;
828 	}
829 	return 2;	/* should not get there */
830 }
831 #endif
832 
833 /*
834  * Return the expected ack rate for a frame transmitted at rate `rate'.
835  */
836 int
837 ural_ack_rate(struct ieee80211com *ic, int rate)
838 {
839 	switch (rate) {
840 	/* CCK rates */
841 	case 2:
842 		return 2;
843 	case 4:
844 	case 11:
845 	case 22:
846 		return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate;
847 
848 	/* OFDM rates */
849 	case 12:
850 	case 18:
851 		return 12;
852 	case 24:
853 	case 36:
854 		return 24;
855 	case 48:
856 	case 72:
857 	case 96:
858 	case 108:
859 		return 48;
860 	}
861 
862 	/* default to 1Mbps */
863 	return 2;
864 }
865 
866 /*
867  * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'.
868  * The function automatically determines the operating mode depending on the
869  * given rate. `flags' indicates whether short preamble is in use or not.
870  */
871 uint16_t
872 ural_txtime(int len, int rate, uint32_t flags)
873 {
874 	uint16_t txtime;
875 
876 	if (RAL_RATE_IS_OFDM(rate)) {
877 		/* IEEE Std 802.11g-2003, pp. 44 */
878 		txtime = (8 + 4 * len + 3 + rate - 1) / rate;
879 		txtime = 16 + 4 + 4 * txtime + 6;
880 	} else {
881 		/* IEEE Std 802.11b-1999, pp. 28 */
882 		txtime = (16 * len + rate - 1) / rate;
883 		if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE))
884 			txtime +=  72 + 24;
885 		else
886 			txtime += 144 + 48;
887 	}
888 	return txtime;
889 }
890 
891 uint8_t
892 ural_plcp_signal(int rate)
893 {
894 	switch (rate) {
895 	/* CCK rates (returned values are device-dependent) */
896 	case 2:		return 0x0;
897 	case 4:		return 0x1;
898 	case 11:	return 0x2;
899 	case 22:	return 0x3;
900 
901 	/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
902 	case 12:	return 0xb;
903 	case 18:	return 0xf;
904 	case 24:	return 0xa;
905 	case 36:	return 0xe;
906 	case 48:	return 0x9;
907 	case 72:	return 0xd;
908 	case 96:	return 0x8;
909 	case 108:	return 0xc;
910 
911 	/* unsupported rates (should not get there) */
912 	default:	return 0xff;
913 	}
914 }
915 
916 void
917 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
918     uint32_t flags, int len, int rate)
919 {
920 	struct ieee80211com *ic = &sc->sc_ic;
921 	uint16_t plcp_length;
922 	int remainder;
923 
924 	desc->flags = htole32(flags);
925 	desc->flags |= htole32(len << 16);
926 
927 	desc->wme = htole16(
928 	    RAL_AIFSN(2) |
929 	    RAL_LOGCWMIN(3) |
930 	    RAL_LOGCWMAX(5));
931 
932 	/* setup PLCP fields */
933 	desc->plcp_signal  = ural_plcp_signal(rate);
934 	desc->plcp_service = 4;
935 
936 	len += IEEE80211_CRC_LEN;
937 	if (RAL_RATE_IS_OFDM(rate)) {
938 		desc->flags |= htole32(RAL_TX_OFDM);
939 
940 		plcp_length = len & 0xfff;
941 		desc->plcp_length_hi = plcp_length >> 6;
942 		desc->plcp_length_lo = plcp_length & 0x3f;
943 	} else {
944 		plcp_length = (16 * len + rate - 1) / rate;
945 		if (rate == 22) {
946 			remainder = (16 * len) % 22;
947 			if (remainder != 0 && remainder < 7)
948 				desc->plcp_service |= RAL_PLCP_LENGEXT;
949 		}
950 		desc->plcp_length_hi = plcp_length >> 8;
951 		desc->plcp_length_lo = plcp_length & 0xff;
952 
953 		if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
954 			desc->plcp_signal |= 0x08;
955 	}
956 
957 	desc->iv = 0;
958 	desc->eiv = 0;
959 }
960 
961 #define RAL_TX_TIMEOUT	5000
962 
963 #ifndef IEEE80211_STA_ONLY
964 int
965 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
966 {
967 	struct ural_tx_desc *desc;
968 	struct usbd_xfer *xfer;
969 	usbd_status error;
970 	uint8_t cmd = 0;
971 	uint8_t *buf;
972 	int xferlen, rate = 2;
973 
974 	xfer = usbd_alloc_xfer(sc->sc_udev);
975 	if (xfer == NULL)
976 		return ENOMEM;
977 
978 	/* xfer length needs to be a multiple of two! */
979 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
980 
981 	buf = usbd_alloc_buffer(xfer, xferlen);
982 	if (buf == NULL) {
983 		usbd_free_xfer(xfer);
984 		return ENOMEM;
985 	}
986 
987 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd,
988 	    USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL);
989 
990 	error = usbd_transfer(xfer);
991 	if (error != 0) {
992 		usbd_free_xfer(xfer);
993 		return error;
994 	}
995 
996 	desc = (struct ural_tx_desc *)buf;
997 
998 	m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE);
999 	ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP,
1000 	    m0->m_pkthdr.len, rate);
1001 
1002 	DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n",
1003 	    m0->m_pkthdr.len, rate, xferlen));
1004 
1005 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen,
1006 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS,
1007 	    RAL_TX_TIMEOUT, NULL);
1008 
1009 	error = usbd_transfer(xfer);
1010 	usbd_free_xfer(xfer);
1011 
1012 	return error;
1013 }
1014 #endif
1015 
1016 int
1017 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
1018 {
1019 	struct ieee80211com *ic = &sc->sc_ic;
1020 	struct ural_tx_desc *desc;
1021 	struct ural_tx_data *data;
1022 	struct ieee80211_frame *wh;
1023 	struct ieee80211_key *k;
1024 	uint32_t flags = RAL_TX_NEWSEQ;
1025 	uint16_t dur;
1026 	usbd_status error;
1027 	int rate, xferlen, pktlen, needrts = 0, needcts = 0;
1028 
1029 	wh = mtod(m0, struct ieee80211_frame *);
1030 
1031 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1032 		k = ieee80211_get_txkey(ic, wh, ni);
1033 
1034 		if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
1035 			return ENOBUFS;
1036 
1037 		/* packet header may have moved, reset our local pointer */
1038 		wh = mtod(m0, struct ieee80211_frame *);
1039 	}
1040 
1041 	/* compute actual packet length (including CRC and crypto overhead) */
1042 	pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
1043 
1044 	/* pickup a rate */
1045 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
1046 	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1047 	     IEEE80211_FC0_TYPE_MGT)) {
1048 		/* mgmt/multicast frames are sent at the lowest avail. rate */
1049 		rate = ni->ni_rates.rs_rates[0];
1050 	} else if (ic->ic_fixed_rate != -1) {
1051 		rate = ic->ic_sup_rates[ic->ic_curmode].
1052 		    rs_rates[ic->ic_fixed_rate];
1053 	} else
1054 			rate = ni->ni_rates.rs_rates[ni->ni_txrate];
1055 	if (rate == 0)
1056 		rate = 2;	/* XXX should not happen */
1057 	rate &= IEEE80211_RATE_VAL;
1058 
1059 	/* check if RTS/CTS or CTS-to-self protection must be used */
1060 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1061 		/* multicast frames are not sent at OFDM rates in 802.11b/g */
1062 		if (pktlen > ic->ic_rtsthreshold) {
1063 			needrts = 1;	/* RTS/CTS based on frame length */
1064 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1065 		    RAL_RATE_IS_OFDM(rate)) {
1066 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1067 				needcts = 1;	/* CTS-to-self */
1068 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1069 				needrts = 1;	/* RTS/CTS */
1070 		}
1071 	}
1072 	if (needrts || needcts) {
1073 		struct mbuf *mprot;
1074 		int protrate, ackrate;
1075 		uint16_t dur;
1076 
1077 		protrate = 2;
1078 		ackrate  = ural_ack_rate(ic, rate);
1079 
1080 		dur = ural_txtime(pktlen, rate, ic->ic_flags) +
1081 		      ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) +
1082 		      2 * RAL_SIFS;
1083 		if (needrts) {
1084 			dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic,
1085 			    protrate), ic->ic_flags) + RAL_SIFS;
1086 			mprot = ieee80211_get_rts(ic, wh, dur);
1087 		} else {
1088 			mprot = ieee80211_get_cts_to_self(ic, dur);
1089 		}
1090 		if (mprot == NULL) {
1091 			printf("%s: could not allocate protection frame\n",
1092 			    sc->sc_dev.dv_xname);
1093 			m_freem(m0);
1094 			return ENOBUFS;
1095 		}
1096 
1097 		data = &sc->tx_data[sc->tx_cur];
1098 		desc = (struct ural_tx_desc *)data->buf;
1099 
1100 		/* avoid multiple free() of the same node for each fragment */
1101 		data->ni = ieee80211_ref_node(ni);
1102 
1103 		m_copydata(mprot, 0, mprot->m_pkthdr.len,
1104 		    data->buf + RAL_TX_DESC_SIZE);
1105 		ural_setup_tx_desc(sc, desc,
1106 		    (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7),
1107 		    mprot->m_pkthdr.len, protrate);
1108 
1109 		/* no roundup necessary here */
1110 		xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len;
1111 
1112 		/* XXX may want to pass the protection frame to BPF */
1113 
1114 		/* mbuf is no longer needed */
1115 		m_freem(mprot);
1116 
1117 		usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf,
1118 		    xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
1119 		    RAL_TX_TIMEOUT, ural_txeof);
1120 		error = usbd_transfer(data->xfer);
1121 		if (error != 0 && error != USBD_IN_PROGRESS) {
1122 			m_freem(m0);
1123 			return error;
1124 		}
1125 
1126 		sc->tx_queued++;
1127 		sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1128 
1129 		flags |= RAL_TX_IFS_SIFS;
1130 	}
1131 
1132 	data = &sc->tx_data[sc->tx_cur];
1133 	desc = (struct ural_tx_desc *)data->buf;
1134 
1135 	data->ni = ni;
1136 
1137 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1138 		flags |= RAL_TX_NEED_ACK;
1139 		flags |= RAL_TX_RETRY(7);
1140 
1141 		dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate),
1142 		    ic->ic_flags) + RAL_SIFS;
1143 		*(uint16_t *)wh->i_dur = htole16(dur);
1144 
1145 #ifndef IEEE80211_STA_ONLY
1146 		/* tell hardware to set timestamp in probe responses */
1147 		if ((wh->i_fc[0] &
1148 		    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
1149 		    (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
1150 			flags |= RAL_TX_TIMESTAMP;
1151 #endif
1152 	}
1153 
1154 #if NBPFILTER > 0
1155 	if (sc->sc_drvbpf != NULL) {
1156 		struct mbuf mb;
1157 		struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
1158 
1159 		tap->wt_flags = 0;
1160 		tap->wt_rate = rate;
1161 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1162 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1163 		tap->wt_antenna = sc->tx_ant;
1164 
1165 		mb.m_data = (caddr_t)tap;
1166 		mb.m_len = sc->sc_txtap_len;
1167 		mb.m_next = m0;
1168 		mb.m_nextpkt = NULL;
1169 		mb.m_type = 0;
1170 		mb.m_flags = 0;
1171 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1172 	}
1173 #endif
1174 
1175 	m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE);
1176 	ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate);
1177 
1178 	/* align end on a 2-bytes boundary */
1179 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1180 
1181 	/*
1182 	 * No space left in the last URB to store the extra 2 bytes, force
1183 	 * sending of another URB.
1184 	 */
1185 	if ((xferlen % 64) == 0)
1186 		xferlen += 2;
1187 
1188 	DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n",
1189 	    m0->m_pkthdr.len, rate, xferlen));
1190 
1191 	/* mbuf is no longer needed */
1192 	m_freem(m0);
1193 
1194 	usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen,
1195 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof);
1196 	error = usbd_transfer(data->xfer);
1197 	if (error != 0 && error != USBD_IN_PROGRESS)
1198 		return error;
1199 
1200 	sc->tx_queued++;
1201 	sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1202 
1203 	return 0;
1204 }
1205 
1206 void
1207 ural_start(struct ifnet *ifp)
1208 {
1209 	struct ural_softc *sc = ifp->if_softc;
1210 	struct ieee80211com *ic = &sc->sc_ic;
1211 	struct ieee80211_node *ni;
1212 	struct mbuf *m0;
1213 
1214 	/*
1215 	 * net80211 may still try to send management frames even if the
1216 	 * IFF_RUNNING flag is not set...
1217 	 */
1218 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1219 		return;
1220 
1221 	for (;;) {
1222 		if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1223 			ifq_set_oactive(&ifp->if_snd);
1224 			break;
1225 		}
1226 
1227 		m0 = mq_dequeue(&ic->ic_mgtq);
1228 		if (m0 != NULL) {
1229 			ni = m0->m_pkthdr.ph_cookie;
1230 #if NBPFILTER > 0
1231 			if (ic->ic_rawbpf != NULL)
1232 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1233 #endif
1234 			if (ural_tx_data(sc, m0, ni) != 0)
1235 				break;
1236 
1237 		} else {
1238 			if (ic->ic_state != IEEE80211_S_RUN)
1239 				break;
1240 
1241 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1242 			if (m0 == NULL)
1243 				break;
1244 #if NBPFILTER > 0
1245 			if (ifp->if_bpf != NULL)
1246 				bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1247 #endif
1248 			m0 = ieee80211_encap(ifp, m0, &ni);
1249 			if (m0 == NULL)
1250 				continue;
1251 #if NBPFILTER > 0
1252 			if (ic->ic_rawbpf != NULL)
1253 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1254 #endif
1255 			if (ural_tx_data(sc, m0, ni) != 0) {
1256 				if (ni != NULL)
1257 					ieee80211_release_node(ic, ni);
1258 				ifp->if_oerrors++;
1259 				break;
1260 			}
1261 		}
1262 
1263 		sc->sc_tx_timer = 5;
1264 		ifp->if_timer = 1;
1265 	}
1266 }
1267 
1268 void
1269 ural_watchdog(struct ifnet *ifp)
1270 {
1271 	struct ural_softc *sc = ifp->if_softc;
1272 
1273 	ifp->if_timer = 0;
1274 
1275 	if (sc->sc_tx_timer > 0) {
1276 		if (--sc->sc_tx_timer == 0) {
1277 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1278 			/*ural_init(ifp); XXX needs a process context! */
1279 			ifp->if_oerrors++;
1280 			return;
1281 		}
1282 		ifp->if_timer = 1;
1283 	}
1284 
1285 	ieee80211_watchdog(ifp);
1286 }
1287 
1288 int
1289 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1290 {
1291 	struct ural_softc *sc = ifp->if_softc;
1292 	struct ieee80211com *ic = &sc->sc_ic;
1293 	int s, error = 0;
1294 
1295 	if (usbd_is_dying(sc->sc_udev))
1296 		return ENXIO;
1297 
1298 	usbd_ref_incr(sc->sc_udev);
1299 
1300 	s = splnet();
1301 
1302 	switch (cmd) {
1303 	case SIOCSIFADDR:
1304 		ifp->if_flags |= IFF_UP;
1305 		/* FALLTHROUGH */
1306 	case SIOCSIFFLAGS:
1307 		if (ifp->if_flags & IFF_UP) {
1308 			if (ifp->if_flags & IFF_RUNNING)
1309 				ural_update_promisc(sc);
1310 			else
1311 				ural_init(ifp);
1312 		} else {
1313 			if (ifp->if_flags & IFF_RUNNING)
1314 				ural_stop(ifp, 1);
1315 		}
1316 		break;
1317 
1318 	case SIOCS80211CHANNEL:
1319 		/*
1320 		 * This allows for fast channel switching in monitor mode
1321 		 * (used by kismet). In IBSS mode, we must explicitly reset
1322 		 * the interface to generate a new beacon frame.
1323 		 */
1324 		error = ieee80211_ioctl(ifp, cmd, data);
1325 		if (error == ENETRESET &&
1326 		    ic->ic_opmode == IEEE80211_M_MONITOR) {
1327 			if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1328 			    (IFF_UP | IFF_RUNNING))
1329 				ural_set_chan(sc, ic->ic_ibss_chan);
1330 			error = 0;
1331 		}
1332 		break;
1333 
1334 	default:
1335 		error = ieee80211_ioctl(ifp, cmd, data);
1336 	}
1337 
1338 	if (error == ENETRESET) {
1339 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1340 		    (IFF_UP | IFF_RUNNING))
1341 			ural_init(ifp);
1342 		error = 0;
1343 	}
1344 
1345 	splx(s);
1346 
1347 	usbd_ref_decr(sc->sc_udev);
1348 
1349 	return error;
1350 }
1351 
1352 void
1353 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
1354 {
1355 	usb_device_request_t req;
1356 	usbd_status error;
1357 
1358 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1359 	req.bRequest = RAL_READ_EEPROM;
1360 	USETW(req.wValue, 0);
1361 	USETW(req.wIndex, addr);
1362 	USETW(req.wLength, len);
1363 
1364 	error = usbd_do_request(sc->sc_udev, &req, buf);
1365 	if (error != 0) {
1366 		printf("%s: could not read EEPROM: %s\n",
1367 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1368 	}
1369 }
1370 
1371 uint16_t
1372 ural_read(struct ural_softc *sc, uint16_t reg)
1373 {
1374 	usb_device_request_t req;
1375 	usbd_status error;
1376 	uint16_t val;
1377 
1378 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1379 	req.bRequest = RAL_READ_MAC;
1380 	USETW(req.wValue, 0);
1381 	USETW(req.wIndex, reg);
1382 	USETW(req.wLength, sizeof (uint16_t));
1383 
1384 	error = usbd_do_request(sc->sc_udev, &req, &val);
1385 	if (error != 0) {
1386 		printf("%s: could not read MAC register: %s\n",
1387 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1388 		return 0;
1389 	}
1390 	return letoh16(val);
1391 }
1392 
1393 void
1394 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1395 {
1396 	usb_device_request_t req;
1397 	usbd_status error;
1398 
1399 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1400 	req.bRequest = RAL_READ_MULTI_MAC;
1401 	USETW(req.wValue, 0);
1402 	USETW(req.wIndex, reg);
1403 	USETW(req.wLength, len);
1404 
1405 	error = usbd_do_request(sc->sc_udev, &req, buf);
1406 	if (error != 0) {
1407 		printf("%s: could not read MAC register: %s\n",
1408 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1409 	}
1410 }
1411 
1412 void
1413 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
1414 {
1415 	usb_device_request_t req;
1416 	usbd_status error;
1417 
1418 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1419 	req.bRequest = RAL_WRITE_MAC;
1420 	USETW(req.wValue, val);
1421 	USETW(req.wIndex, reg);
1422 	USETW(req.wLength, 0);
1423 
1424 	error = usbd_do_request(sc->sc_udev, &req, NULL);
1425 	if (error != 0) {
1426 		printf("%s: could not write MAC register: %s\n",
1427 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1428 	}
1429 }
1430 
1431 void
1432 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1433 {
1434 	usb_device_request_t req;
1435 	usbd_status error;
1436 
1437 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1438 	req.bRequest = RAL_WRITE_MULTI_MAC;
1439 	USETW(req.wValue, 0);
1440 	USETW(req.wIndex, reg);
1441 	USETW(req.wLength, len);
1442 
1443 	error = usbd_do_request(sc->sc_udev, &req, buf);
1444 	if (error != 0) {
1445 		printf("%s: could not write MAC register: %s\n",
1446 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1447 	}
1448 }
1449 
1450 void
1451 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
1452 {
1453 	uint16_t tmp;
1454 	int ntries;
1455 
1456 	for (ntries = 0; ntries < 5; ntries++) {
1457 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1458 			break;
1459 	}
1460 	if (ntries == 5) {
1461 		printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname);
1462 		return;
1463 	}
1464 
1465 	tmp = reg << 8 | val;
1466 	ural_write(sc, RAL_PHY_CSR7, tmp);
1467 }
1468 
1469 uint8_t
1470 ural_bbp_read(struct ural_softc *sc, uint8_t reg)
1471 {
1472 	uint16_t val;
1473 	int ntries;
1474 
1475 	val = RAL_BBP_WRITE | reg << 8;
1476 	ural_write(sc, RAL_PHY_CSR7, val);
1477 
1478 	for (ntries = 0; ntries < 5; ntries++) {
1479 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1480 			break;
1481 	}
1482 	if (ntries == 5) {
1483 		printf("%s: could not read BBP\n", sc->sc_dev.dv_xname);
1484 		return 0;
1485 	}
1486 	return ural_read(sc, RAL_PHY_CSR7) & 0xff;
1487 }
1488 
1489 void
1490 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
1491 {
1492 	uint32_t tmp;
1493 	int ntries;
1494 
1495 	for (ntries = 0; ntries < 5; ntries++) {
1496 		if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
1497 			break;
1498 	}
1499 	if (ntries == 5) {
1500 		printf("%s: could not write to RF\n", sc->sc_dev.dv_xname);
1501 		return;
1502 	}
1503 
1504 	tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
1505 	ural_write(sc, RAL_PHY_CSR9,  tmp & 0xffff);
1506 	ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
1507 
1508 	/* remember last written value in sc */
1509 	sc->rf_regs[reg] = val;
1510 
1511 	DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff));
1512 }
1513 
1514 void
1515 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
1516 {
1517 	struct ieee80211com *ic = &sc->sc_ic;
1518 	uint8_t power, tmp;
1519 	u_int chan;
1520 
1521 	chan = ieee80211_chan2ieee(ic, c);
1522 	if (chan == 0 || chan == IEEE80211_CHAN_ANY)
1523 		return;
1524 
1525 	power = min(sc->txpow[chan - 1], 31);
1526 
1527 	DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power));
1528 
1529 	switch (sc->rf_rev) {
1530 	case RAL_RF_2522:
1531 		ural_rf_write(sc, RAL_RF1, 0x00814);
1532 		ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
1533 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1534 		break;
1535 
1536 	case RAL_RF_2523:
1537 		ural_rf_write(sc, RAL_RF1, 0x08804);
1538 		ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
1539 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
1540 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1541 		break;
1542 
1543 	case RAL_RF_2524:
1544 		ural_rf_write(sc, RAL_RF1, 0x0c808);
1545 		ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
1546 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1547 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1548 		break;
1549 
1550 	case RAL_RF_2525:
1551 		ural_rf_write(sc, RAL_RF1, 0x08808);
1552 		ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
1553 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1554 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1555 
1556 		ural_rf_write(sc, RAL_RF1, 0x08808);
1557 		ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
1558 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1559 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1560 		break;
1561 
1562 	case RAL_RF_2525E:
1563 		ural_rf_write(sc, RAL_RF1, 0x08808);
1564 		ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
1565 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1566 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
1567 		break;
1568 
1569 	case RAL_RF_2526:
1570 		ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
1571 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1572 		ural_rf_write(sc, RAL_RF1, 0x08804);
1573 
1574 		ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
1575 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1576 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1577 		break;
1578 	}
1579 
1580 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
1581 	    ic->ic_state != IEEE80211_S_SCAN) {
1582 		/* set Japan filter bit for channel 14 */
1583 		tmp = ural_bbp_read(sc, 70);
1584 
1585 		tmp &= ~RAL_JAPAN_FILTER;
1586 		if (chan == 14)
1587 			tmp |= RAL_JAPAN_FILTER;
1588 
1589 		ural_bbp_write(sc, 70, tmp);
1590 
1591 		/* clear CRC errors */
1592 		ural_read(sc, RAL_STA_CSR0);
1593 
1594 		DELAY(1000); /* RF needs a 1ms delay here */
1595 		ural_disable_rf_tune(sc);
1596 	}
1597 }
1598 
1599 /*
1600  * Disable RF auto-tuning.
1601  */
1602 void
1603 ural_disable_rf_tune(struct ural_softc *sc)
1604 {
1605 	uint32_t tmp;
1606 
1607 	if (sc->rf_rev != RAL_RF_2523) {
1608 		tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
1609 		ural_rf_write(sc, RAL_RF1, tmp);
1610 	}
1611 
1612 	tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
1613 	ural_rf_write(sc, RAL_RF3, tmp);
1614 
1615 	DPRINTFN(2, ("disabling RF autotune\n"));
1616 }
1617 
1618 /*
1619  * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
1620  * synchronization.
1621  */
1622 void
1623 ural_enable_tsf_sync(struct ural_softc *sc)
1624 {
1625 	struct ieee80211com *ic = &sc->sc_ic;
1626 	uint16_t logcwmin, preload, tmp;
1627 
1628 	/* first, disable TSF synchronization */
1629 	ural_write(sc, RAL_TXRX_CSR19, 0);
1630 
1631 	tmp = (16 * ic->ic_bss->ni_intval) << 4;
1632 	ural_write(sc, RAL_TXRX_CSR18, tmp);
1633 
1634 #ifndef IEEE80211_STA_ONLY
1635 	if (ic->ic_opmode == IEEE80211_M_IBSS) {
1636 		logcwmin = 2;
1637 		preload = 320;
1638 	} else
1639 #endif
1640 	{
1641 		logcwmin = 0;
1642 		preload = 6;
1643 	}
1644 	tmp = logcwmin << 12 | preload;
1645 	ural_write(sc, RAL_TXRX_CSR20, tmp);
1646 
1647 	/* finally, enable TSF synchronization */
1648 	tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
1649 	if (ic->ic_opmode == IEEE80211_M_STA)
1650 		tmp |= RAL_ENABLE_TSF_SYNC(1);
1651 #ifndef IEEE80211_STA_ONLY
1652 	else
1653 		tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
1654 #endif
1655 	ural_write(sc, RAL_TXRX_CSR19, tmp);
1656 
1657 	DPRINTF(("enabling TSF synchronization\n"));
1658 }
1659 
1660 void
1661 ural_update_slot(struct ural_softc *sc)
1662 {
1663 	struct ieee80211com *ic = &sc->sc_ic;
1664 	uint16_t slottime, sifs, eifs;
1665 
1666 	slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ?
1667 	    IEEE80211_DUR_DS_SHSLOT : IEEE80211_DUR_DS_SLOT;
1668 
1669 	/*
1670 	 * These settings may sound a bit inconsistent but this is what the
1671 	 * reference driver does.
1672 	 */
1673 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1674 		sifs = 16 - RAL_RXTX_TURNAROUND;
1675 		eifs = 364;
1676 	} else {
1677 		sifs = 10 - RAL_RXTX_TURNAROUND;
1678 		eifs = 64;
1679 	}
1680 
1681 	ural_write(sc, RAL_MAC_CSR10, slottime);
1682 	ural_write(sc, RAL_MAC_CSR11, sifs);
1683 	ural_write(sc, RAL_MAC_CSR12, eifs);
1684 }
1685 
1686 void
1687 ural_set_txpreamble(struct ural_softc *sc)
1688 {
1689 	uint16_t tmp;
1690 
1691 	tmp = ural_read(sc, RAL_TXRX_CSR10);
1692 
1693 	tmp &= ~RAL_SHORT_PREAMBLE;
1694 	if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE)
1695 		tmp |= RAL_SHORT_PREAMBLE;
1696 
1697 	ural_write(sc, RAL_TXRX_CSR10, tmp);
1698 }
1699 
1700 void
1701 ural_set_basicrates(struct ural_softc *sc)
1702 {
1703 	struct ieee80211com *ic = &sc->sc_ic;
1704 
1705 	/* update basic rate set */
1706 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1707 		/* 11b basic rates: 1, 2Mbps */
1708 		ural_write(sc, RAL_TXRX_CSR11, 0x3);
1709 	} else {
1710 		/* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
1711 		ural_write(sc, RAL_TXRX_CSR11, 0xf);
1712 	}
1713 }
1714 
1715 void
1716 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
1717 {
1718 	uint16_t tmp;
1719 
1720 	tmp = bssid[0] | bssid[1] << 8;
1721 	ural_write(sc, RAL_MAC_CSR5, tmp);
1722 
1723 	tmp = bssid[2] | bssid[3] << 8;
1724 	ural_write(sc, RAL_MAC_CSR6, tmp);
1725 
1726 	tmp = bssid[4] | bssid[5] << 8;
1727 	ural_write(sc, RAL_MAC_CSR7, tmp);
1728 
1729 	DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid)));
1730 }
1731 
1732 void
1733 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
1734 {
1735 	uint16_t tmp;
1736 
1737 	tmp = addr[0] | addr[1] << 8;
1738 	ural_write(sc, RAL_MAC_CSR2, tmp);
1739 
1740 	tmp = addr[2] | addr[3] << 8;
1741 	ural_write(sc, RAL_MAC_CSR3, tmp);
1742 
1743 	tmp = addr[4] | addr[5] << 8;
1744 	ural_write(sc, RAL_MAC_CSR4, tmp);
1745 
1746 	DPRINTF(("setting MAC address to %s\n",
1747 	    ether_sprintf((uint8_t *)addr)));
1748 }
1749 
1750 void
1751 ural_update_promisc(struct ural_softc *sc)
1752 {
1753 	struct ifnet *ifp = &sc->sc_ic.ic_if;
1754 	uint16_t tmp;
1755 
1756 	tmp = ural_read(sc, RAL_TXRX_CSR2);
1757 
1758 	tmp &= ~RAL_DROP_NOT_TO_ME;
1759 	if (!(ifp->if_flags & IFF_PROMISC))
1760 		tmp |= RAL_DROP_NOT_TO_ME;
1761 
1762 	ural_write(sc, RAL_TXRX_CSR2, tmp);
1763 
1764 	DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
1765 	    "entering" : "leaving"));
1766 }
1767 
1768 const char *
1769 ural_get_rf(int rev)
1770 {
1771 	switch (rev) {
1772 	case RAL_RF_2522:	return "RT2522";
1773 	case RAL_RF_2523:	return "RT2523";
1774 	case RAL_RF_2524:	return "RT2524";
1775 	case RAL_RF_2525:	return "RT2525";
1776 	case RAL_RF_2525E:	return "RT2525e";
1777 	case RAL_RF_2526:	return "RT2526";
1778 	case RAL_RF_5222:	return "RT5222";
1779 	default:		return "unknown";
1780 	}
1781 }
1782 
1783 void
1784 ural_read_eeprom(struct ural_softc *sc)
1785 {
1786 	struct ieee80211com *ic = &sc->sc_ic;
1787 	uint16_t val;
1788 
1789 	/* retrieve MAC/BBP type */
1790 	ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2);
1791 	sc->macbbp_rev = letoh16(val);
1792 
1793 	ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
1794 	val = letoh16(val);
1795 	sc->rf_rev =   (val >> 11) & 0x7;
1796 	sc->hw_radio = (val >> 10) & 0x1;
1797 	sc->led_mode = (val >> 6)  & 0x7;
1798 	sc->rx_ant =   (val >> 4)  & 0x3;
1799 	sc->tx_ant =   (val >> 2)  & 0x3;
1800 	sc->nb_ant =   val & 0x3;
1801 
1802 	/* read MAC address */
1803 	ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6);
1804 
1805 	/* read default values for BBP registers */
1806 	ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
1807 
1808 	/* read Tx power for all b/g channels */
1809 	ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
1810 }
1811 
1812 int
1813 ural_bbp_init(struct ural_softc *sc)
1814 {
1815 	int i, ntries;
1816 
1817 	/* wait for BBP to be ready */
1818 	for (ntries = 0; ntries < 100; ntries++) {
1819 		if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
1820 			break;
1821 		DELAY(1000);
1822 	}
1823 	if (ntries == 100) {
1824 		printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname);
1825 		return EIO;
1826 	}
1827 
1828 	/* initialize BBP registers to default values */
1829 	for (i = 0; i < nitems(ural_def_bbp); i++)
1830 		ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
1831 
1832 #if 0
1833 	/* initialize BBP registers to values stored in EEPROM */
1834 	for (i = 0; i < 16; i++) {
1835 		if (sc->bbp_prom[i].reg == 0xff)
1836 			continue;
1837 		ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
1838 	}
1839 #endif
1840 
1841 	return 0;
1842 }
1843 
1844 void
1845 ural_set_txantenna(struct ural_softc *sc, int antenna)
1846 {
1847 	uint16_t tmp;
1848 	uint8_t tx;
1849 
1850 	tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
1851 	if (antenna == 1)
1852 		tx |= RAL_BBP_ANTA;
1853 	else if (antenna == 2)
1854 		tx |= RAL_BBP_ANTB;
1855 	else
1856 		tx |= RAL_BBP_DIVERSITY;
1857 
1858 	/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
1859 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
1860 	    sc->rf_rev == RAL_RF_5222)
1861 		tx |= RAL_BBP_FLIPIQ;
1862 
1863 	ural_bbp_write(sc, RAL_BBP_TX, tx);
1864 
1865 	/* update flags in PHY_CSR5 and PHY_CSR6 too */
1866 	tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
1867 	ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
1868 
1869 	tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
1870 	ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
1871 }
1872 
1873 void
1874 ural_set_rxantenna(struct ural_softc *sc, int antenna)
1875 {
1876 	uint8_t rx;
1877 
1878 	rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
1879 	if (antenna == 1)
1880 		rx |= RAL_BBP_ANTA;
1881 	else if (antenna == 2)
1882 		rx |= RAL_BBP_ANTB;
1883 	else
1884 		rx |= RAL_BBP_DIVERSITY;
1885 
1886 	/* need to force no I/Q flip for RF 2525e and 2526 */
1887 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
1888 		rx &= ~RAL_BBP_FLIPIQ;
1889 
1890 	ural_bbp_write(sc, RAL_BBP_RX, rx);
1891 }
1892 
1893 int
1894 ural_init(struct ifnet *ifp)
1895 {
1896 	struct ural_softc *sc = ifp->if_softc;
1897 	struct ieee80211com *ic = &sc->sc_ic;
1898 	uint16_t tmp;
1899 	usbd_status error;
1900 	int i, ntries;
1901 
1902 	ural_stop(ifp, 0);
1903 
1904 	/* initialize MAC registers to default values */
1905 	for (i = 0; i < nitems(ural_def_mac); i++)
1906 		ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
1907 
1908 	/* wait for BBP and RF to wake up (this can take a long time!) */
1909 	for (ntries = 0; ntries < 100; ntries++) {
1910 		tmp = ural_read(sc, RAL_MAC_CSR17);
1911 		if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
1912 		    (RAL_BBP_AWAKE | RAL_RF_AWAKE))
1913 			break;
1914 		DELAY(1000);
1915 	}
1916 	if (ntries == 100) {
1917 		printf("%s: timeout waiting for BBP/RF to wakeup\n",
1918 		    sc->sc_dev.dv_xname);
1919 		error = EIO;
1920 		goto fail;
1921 	}
1922 
1923 	/* we're ready! */
1924 	ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
1925 
1926 	/* set basic rate set (will be updated later) */
1927 	ural_write(sc, RAL_TXRX_CSR11, 0x153);
1928 
1929 	error = ural_bbp_init(sc);
1930 	if (error != 0)
1931 		goto fail;
1932 
1933 	/* set default BSS channel */
1934 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
1935 	ural_set_chan(sc, ic->ic_bss->ni_chan);
1936 
1937 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
1938 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
1939 
1940 	/* set default sensitivity */
1941 	ural_bbp_write(sc, 17, 0x48);
1942 
1943 	ural_set_txantenna(sc, 1);
1944 	ural_set_rxantenna(sc, 1);
1945 
1946 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
1947 	ural_set_macaddr(sc, ic->ic_myaddr);
1948 
1949 	/*
1950 	 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31).
1951 	 */
1952 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1953 		struct ieee80211_key *k = &ic->ic_nw_keys[i];
1954 		ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE,
1955 		    k->k_key, IEEE80211_KEYBUF_SIZE);
1956 	}
1957 
1958 	/*
1959 	 * Allocate xfer for AMRR statistics requests.
1960 	 */
1961 	sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev);
1962 	if (sc->amrr_xfer == NULL) {
1963 		printf("%s: could not allocate AMRR xfer\n",
1964 		    sc->sc_dev.dv_xname);
1965 		goto fail;
1966 	}
1967 
1968 	/*
1969 	 * Open Tx and Rx USB bulk pipes.
1970 	 */
1971 	error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE,
1972 	    &sc->sc_tx_pipeh);
1973 	if (error != 0) {
1974 		printf("%s: could not open Tx pipe: %s\n",
1975 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1976 		goto fail;
1977 	}
1978 	error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE,
1979 	    &sc->sc_rx_pipeh);
1980 	if (error != 0) {
1981 		printf("%s: could not open Rx pipe: %s\n",
1982 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1983 		goto fail;
1984 	}
1985 
1986 	/*
1987 	 * Allocate Tx and Rx xfer queues.
1988 	 */
1989 	error = ural_alloc_tx_list(sc);
1990 	if (error != 0) {
1991 		printf("%s: could not allocate Tx list\n",
1992 		    sc->sc_dev.dv_xname);
1993 		goto fail;
1994 	}
1995 	error = ural_alloc_rx_list(sc);
1996 	if (error != 0) {
1997 		printf("%s: could not allocate Rx list\n",
1998 		    sc->sc_dev.dv_xname);
1999 		goto fail;
2000 	}
2001 
2002 	/*
2003 	 * Start up the receive pipe.
2004 	 */
2005 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
2006 		struct ural_rx_data *data = &sc->rx_data[i];
2007 
2008 		usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf,
2009 		    MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
2010 		error = usbd_transfer(data->xfer);
2011 		if (error != 0 && error != USBD_IN_PROGRESS) {
2012 			printf("%s: could not queue Rx transfer\n",
2013 			    sc->sc_dev.dv_xname);
2014 			goto fail;
2015 		}
2016 	}
2017 
2018 	/* kick Rx */
2019 	tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR;
2020 	if (ic->ic_opmode != IEEE80211_M_MONITOR) {
2021 		tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR;
2022 #ifndef IEEE80211_STA_ONLY
2023 		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
2024 #endif
2025 			tmp |= RAL_DROP_TODS;
2026 		if (!(ifp->if_flags & IFF_PROMISC))
2027 			tmp |= RAL_DROP_NOT_TO_ME;
2028 	}
2029 	ural_write(sc, RAL_TXRX_CSR2, tmp);
2030 
2031 	ifq_clr_oactive(&ifp->if_snd);
2032 	ifp->if_flags |= IFF_RUNNING;
2033 
2034 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2035 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2036 	else
2037 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2038 
2039 	return 0;
2040 
2041 fail:	ural_stop(ifp, 1);
2042 	return error;
2043 }
2044 
2045 void
2046 ural_stop(struct ifnet *ifp, int disable)
2047 {
2048 	struct ural_softc *sc = ifp->if_softc;
2049 	struct ieee80211com *ic = &sc->sc_ic;
2050 
2051 	sc->sc_tx_timer = 0;
2052 	ifp->if_timer = 0;
2053 	ifp->if_flags &= ~IFF_RUNNING;
2054 	ifq_clr_oactive(&ifp->if_snd);
2055 
2056 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);	/* free all nodes */
2057 
2058 	/* disable Rx */
2059 	ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
2060 
2061 	/* reset ASIC and BBP (but won't reset MAC registers!) */
2062 	ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
2063 	ural_write(sc, RAL_MAC_CSR1, 0);
2064 
2065 	if (sc->amrr_xfer != NULL) {
2066 		usbd_free_xfer(sc->amrr_xfer);
2067 		sc->amrr_xfer = NULL;
2068 	}
2069 	if (sc->sc_rx_pipeh != NULL) {
2070 		usbd_abort_pipe(sc->sc_rx_pipeh);
2071 		usbd_close_pipe(sc->sc_rx_pipeh);
2072 		sc->sc_rx_pipeh = NULL;
2073 	}
2074 	if (sc->sc_tx_pipeh != NULL) {
2075 		usbd_abort_pipe(sc->sc_tx_pipeh);
2076 		usbd_close_pipe(sc->sc_tx_pipeh);
2077 		sc->sc_tx_pipeh = NULL;
2078 	}
2079 
2080 	ural_free_rx_list(sc);
2081 	ural_free_tx_list(sc);
2082 }
2083 
2084 void
2085 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2086 {
2087 	/* start with lowest Tx rate */
2088 	ni->ni_txrate = 0;
2089 }
2090 
2091 void
2092 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni)
2093 {
2094 	int i;
2095 
2096 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
2097 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
2098 
2099 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
2100 
2101 	/* set rate to some reasonable initial value */
2102 	for (i = ni->ni_rates.rs_nrates - 1;
2103 	     i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72;
2104 	     i--);
2105 	ni->ni_txrate = i;
2106 
2107 	if (!usbd_is_dying(sc->sc_udev))
2108 		timeout_add_sec(&sc->amrr_to, 1);
2109 }
2110 
2111 void
2112 ural_amrr_timeout(void *arg)
2113 {
2114 	struct ural_softc *sc = arg;
2115 	usb_device_request_t req;
2116 	int s;
2117 
2118 	if (usbd_is_dying(sc->sc_udev))
2119 		return;
2120 
2121 	usbd_ref_incr(sc->sc_udev);
2122 
2123 	s = splusb();
2124 
2125 	/*
2126 	 * Asynchronously read statistic registers (cleared by read).
2127 	 */
2128 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
2129 	req.bRequest = RAL_READ_MULTI_MAC;
2130 	USETW(req.wValue, 0);
2131 	USETW(req.wIndex, RAL_STA_CSR0);
2132 	USETW(req.wLength, sizeof sc->sta);
2133 
2134 	usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc,
2135 	    USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0,
2136 	    ural_amrr_update);
2137 	(void)usbd_transfer(sc->amrr_xfer);
2138 
2139 	splx(s);
2140 
2141 	usbd_ref_decr(sc->sc_udev);
2142 }
2143 
2144 void
2145 ural_amrr_update(struct usbd_xfer *xfer, void *priv,
2146     usbd_status status)
2147 {
2148 	struct ural_softc *sc = (struct ural_softc *)priv;
2149 	struct ifnet *ifp = &sc->sc_ic.ic_if;
2150 
2151 	if (status != USBD_NORMAL_COMPLETION) {
2152 		printf("%s: could not retrieve Tx statistics - cancelling "
2153 		    "automatic rate control\n", sc->sc_dev.dv_xname);
2154 		return;
2155 	}
2156 
2157 	/* count TX retry-fail as Tx errors */
2158 	ifp->if_oerrors += letoh16(sc->sta[9]);
2159 
2160 	sc->amn.amn_retrycnt =
2161 	    letoh16(sc->sta[7]) +	/* TX one-retry ok count */
2162 	    letoh16(sc->sta[8]) +	/* TX more-retry ok count */
2163 	    letoh16(sc->sta[9]);	/* TX retry-fail count */
2164 
2165 	sc->amn.amn_txcnt =
2166 	    sc->amn.amn_retrycnt +
2167 	    letoh16(sc->sta[6]);	/* TX no-retry ok count */
2168 
2169 	ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn);
2170 
2171 	if (!usbd_is_dying(sc->sc_udev))
2172 		timeout_add_sec(&sc->amrr_to, 1);
2173 }
2174