xref: /openbsd-src/sys/dev/usb/if_ral.c (revision 48950c12d106c85f315112191a0228d7b83b9510)
1 /*	$OpenBSD: if_ral.c,v 1.121 2011/07/03 15:47:17 matthew Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005, 2006
5  *	Damien Bergamini <damien.bergamini@free.fr>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Ralink Technology RT2500USB chipset driver
22  * http://www.ralinktech.com.tw/
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/timeout.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 
37 #include <machine/bus.h>
38 #include <machine/endian.h>
39 #include <machine/intr.h>
40 
41 #if NBPFILTER > 0
42 #include <net/bpf.h>
43 #endif
44 #include <net/if.h>
45 #include <net/if_arp.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/if_ether.h>
54 #include <netinet/ip.h>
55 
56 #include <net80211/ieee80211_var.h>
57 #include <net80211/ieee80211_amrr.h>
58 #include <net80211/ieee80211_radiotap.h>
59 
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbdevs.h>
64 
65 #include <dev/usb/if_ralreg.h>
66 #include <dev/usb/if_ralvar.h>
67 
68 #ifdef USB_DEBUG
69 #define URAL_DEBUG
70 #endif
71 
72 #ifdef URAL_DEBUG
73 #define DPRINTF(x)	do { if (ural_debug) printf x; } while (0)
74 #define DPRINTFN(n, x)	do { if (ural_debug >= (n)) printf x; } while (0)
75 int ural_debug = 0;
76 #else
77 #define DPRINTF(x)
78 #define DPRINTFN(n, x)
79 #endif
80 
81 /* various supported device vendors/products */
82 static const struct usb_devno ural_devs[] = {
83 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570 },
84 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570_2 },
85 	{ USB_VENDOR_BELKIN,		USB_PRODUCT_BELKIN_F5D7050 },
86 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54G },
87 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54GP },
88 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_HU200TS },
89 	{ USB_VENDOR_CONCEPTRONIC2,	USB_PRODUCT_CONCEPTRONIC2_C54RU },
90 	{ USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_RT2570 },
91 	{ USB_VENDOR_GIGABYTE,		USB_PRODUCT_GIGABYTE_GNWBKG },
92 	{ USB_VENDOR_GUILLEMOT,		USB_PRODUCT_GUILLEMOT_HWGUSB254 },
93 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54 },
94 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54AI },
95 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54YB },
96 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_NINWIFI },
97 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570 },
98 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_2 },
99 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_3 },
100 	{ USB_VENDOR_NOVATECH,		USB_PRODUCT_NOVATECH_NV902W },
101 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570 },
102 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_2 },
103 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_3 },
104 	{ USB_VENDOR_SPHAIRON,		USB_PRODUCT_SPHAIRON_UB801R },
105 	{ USB_VENDOR_SURECOM,		USB_PRODUCT_SURECOM_RT2570 },
106 	{ USB_VENDOR_VTECH,		USB_PRODUCT_VTECH_RT2570 },
107 	{ USB_VENDOR_ZINWELL,		USB_PRODUCT_ZINWELL_RT2570 }
108 };
109 
110 int		ural_alloc_tx_list(struct ural_softc *);
111 void		ural_free_tx_list(struct ural_softc *);
112 int		ural_alloc_rx_list(struct ural_softc *);
113 void		ural_free_rx_list(struct ural_softc *);
114 int		ural_media_change(struct ifnet *);
115 void		ural_next_scan(void *);
116 void		ural_task(void *);
117 int		ural_newstate(struct ieee80211com *, enum ieee80211_state,
118 		    int);
119 void		ural_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status);
120 void		ural_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status);
121 #if NBPFILTER > 0
122 uint8_t		ural_rxrate(const struct ural_rx_desc *);
123 #endif
124 int		ural_ack_rate(struct ieee80211com *, int);
125 uint16_t	ural_txtime(int, int, uint32_t);
126 uint8_t		ural_plcp_signal(int);
127 void		ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *,
128 		    uint32_t, int, int);
129 #ifndef IEEE80211_STA_ONLY
130 int		ural_tx_bcn(struct ural_softc *, struct mbuf *,
131 		    struct ieee80211_node *);
132 #endif
133 int		ural_tx_data(struct ural_softc *, struct mbuf *,
134 		    struct ieee80211_node *);
135 void		ural_start(struct ifnet *);
136 void		ural_watchdog(struct ifnet *);
137 int		ural_ioctl(struct ifnet *, u_long, caddr_t);
138 void		ural_eeprom_read(struct ural_softc *, uint16_t, void *, int);
139 uint16_t	ural_read(struct ural_softc *, uint16_t);
140 void		ural_read_multi(struct ural_softc *, uint16_t, void *, int);
141 void		ural_write(struct ural_softc *, uint16_t, uint16_t);
142 void		ural_write_multi(struct ural_softc *, uint16_t, void *, int);
143 void		ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
144 uint8_t		ural_bbp_read(struct ural_softc *, uint8_t);
145 void		ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
146 void		ural_set_chan(struct ural_softc *, struct ieee80211_channel *);
147 void		ural_disable_rf_tune(struct ural_softc *);
148 void		ural_enable_tsf_sync(struct ural_softc *);
149 void		ural_update_slot(struct ural_softc *);
150 void		ural_set_txpreamble(struct ural_softc *);
151 void		ural_set_basicrates(struct ural_softc *);
152 void		ural_set_bssid(struct ural_softc *, const uint8_t *);
153 void		ural_set_macaddr(struct ural_softc *, const uint8_t *);
154 void		ural_update_promisc(struct ural_softc *);
155 const char	*ural_get_rf(int);
156 void		ural_read_eeprom(struct ural_softc *);
157 int		ural_bbp_init(struct ural_softc *);
158 void		ural_set_txantenna(struct ural_softc *, int);
159 void		ural_set_rxantenna(struct ural_softc *, int);
160 int		ural_init(struct ifnet *);
161 void		ural_stop(struct ifnet *, int);
162 void		ural_newassoc(struct ieee80211com *, struct ieee80211_node *,
163 		    int);
164 void		ural_amrr_start(struct ural_softc *, struct ieee80211_node *);
165 void		ural_amrr_timeout(void *);
166 void		ural_amrr_update(usbd_xfer_handle, usbd_private_handle,
167 		    usbd_status status);
168 
169 static const struct {
170 	uint16_t	reg;
171 	uint16_t	val;
172 } ural_def_mac[] = {
173 	RAL_DEF_MAC
174 };
175 
176 static const struct {
177 	uint8_t	reg;
178 	uint8_t	val;
179 } ural_def_bbp[] = {
180 	RAL_DEF_BBP
181 };
182 
183 static const uint32_t ural_rf2522_r2[] =    RAL_RF2522_R2;
184 static const uint32_t ural_rf2523_r2[] =    RAL_RF2523_R2;
185 static const uint32_t ural_rf2524_r2[] =    RAL_RF2524_R2;
186 static const uint32_t ural_rf2525_r2[] =    RAL_RF2525_R2;
187 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2;
188 static const uint32_t ural_rf2525e_r2[] =   RAL_RF2525E_R2;
189 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2;
190 static const uint32_t ural_rf2526_r2[] =    RAL_RF2526_R2;
191 
192 int ural_match(struct device *, void *, void *);
193 void ural_attach(struct device *, struct device *, void *);
194 int ural_detach(struct device *, int);
195 int ural_activate(struct device *, int);
196 
197 struct cfdriver ural_cd = {
198 	NULL, "ural", DV_IFNET
199 };
200 
201 const struct cfattach ural_ca = {
202 	sizeof(struct ural_softc),
203 	ural_match,
204 	ural_attach,
205 	ural_detach,
206 	ural_activate,
207 };
208 
209 int
210 ural_match(struct device *parent, void *match, void *aux)
211 {
212 	struct usb_attach_arg *uaa = aux;
213 
214 	if (uaa->iface != NULL)
215 		return UMATCH_NONE;
216 
217 	return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ?
218 	    UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
219 }
220 
221 void
222 ural_attach(struct device *parent, struct device *self, void *aux)
223 {
224 	struct ural_softc *sc = (struct ural_softc *)self;
225 	struct usb_attach_arg *uaa = aux;
226 	struct ieee80211com *ic = &sc->sc_ic;
227 	struct ifnet *ifp = &ic->ic_if;
228 	usb_interface_descriptor_t *id;
229 	usb_endpoint_descriptor_t *ed;
230 	usbd_status error;
231 	int i;
232 
233 	sc->sc_udev = uaa->device;
234 
235 	if (usbd_set_config_no(sc->sc_udev, RAL_CONFIG_NO, 0) != 0) {
236 		printf("%s: could not set configuration no\n",
237 		    sc->sc_dev.dv_xname);
238 		return;
239 	}
240 
241 	/* get the first interface handle */
242 	error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX,
243 	    &sc->sc_iface);
244 	if (error != 0) {
245 		printf("%s: could not get interface handle\n",
246 		    sc->sc_dev.dv_xname);
247 		return;
248 	}
249 
250 	/*
251 	 * Find endpoints.
252 	 */
253 	id = usbd_get_interface_descriptor(sc->sc_iface);
254 
255 	sc->sc_rx_no = sc->sc_tx_no = -1;
256 	for (i = 0; i < id->bNumEndpoints; i++) {
257 		ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
258 		if (ed == NULL) {
259 			printf("%s: no endpoint descriptor for iface %d\n",
260 			    sc->sc_dev.dv_xname, i);
261 			return;
262 		}
263 
264 		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
265 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
266 			sc->sc_rx_no = ed->bEndpointAddress;
267 		else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
268 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
269 			sc->sc_tx_no = ed->bEndpointAddress;
270 	}
271 	if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) {
272 		printf("%s: missing endpoint\n", sc->sc_dev.dv_xname);
273 		return;
274 	}
275 
276 	usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC);
277 	timeout_set(&sc->scan_to, ural_next_scan, sc);
278 
279 	sc->amrr.amrr_min_success_threshold =  1;
280 	sc->amrr.amrr_max_success_threshold = 10;
281 	timeout_set(&sc->amrr_to, ural_amrr_timeout, sc);
282 
283 	/* retrieve RT2570 rev. no */
284 	sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
285 
286 	/* retrieve MAC address and various other things from EEPROM */
287 	ural_read_eeprom(sc);
288 
289 	printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n",
290 	    sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev,
291 	    ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr));
292 
293 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
294 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
295 	ic->ic_state = IEEE80211_S_INIT;
296 
297 	/* set device capabilities */
298 	ic->ic_caps =
299 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
300 #ifndef IEEE80211_STA_ONLY
301 	    IEEE80211_C_IBSS |		/* IBSS mode supported */
302 	    IEEE80211_C_HOSTAP |	/* HostAp mode supported */
303 #endif
304 	    IEEE80211_C_TXPMGT |	/* tx power management */
305 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
306 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
307 	    IEEE80211_C_WEP |		/* s/w WEP */
308 	    IEEE80211_C_RSN;		/* WPA/RSN */
309 
310 	/* set supported .11b and .11g rates */
311 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
312 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
313 
314 	/* set supported .11b and .11g channels (1 through 14) */
315 	for (i = 1; i <= 14; i++) {
316 		ic->ic_channels[i].ic_freq =
317 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
318 		ic->ic_channels[i].ic_flags =
319 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
320 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
321 	}
322 
323 	ifp->if_softc = sc;
324 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
325 	ifp->if_ioctl = ural_ioctl;
326 	ifp->if_start = ural_start;
327 	ifp->if_watchdog = ural_watchdog;
328 	IFQ_SET_READY(&ifp->if_snd);
329 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
330 
331 	if_attach(ifp);
332 	ieee80211_ifattach(ifp);
333 	ic->ic_newassoc = ural_newassoc;
334 
335 	/* override state transition machine */
336 	sc->sc_newstate = ic->ic_newstate;
337 	ic->ic_newstate = ural_newstate;
338 	ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status);
339 
340 #if NBPFILTER > 0
341 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
342 	    sizeof (struct ieee80211_frame) + 64);
343 
344 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
345 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
346 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT);
347 
348 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
349 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
350 	sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT);
351 #endif
352 }
353 
354 int
355 ural_detach(struct device *self, int flags)
356 {
357 	struct ural_softc *sc = (struct ural_softc *)self;
358 	struct ifnet *ifp = &sc->sc_ic.ic_if;
359 	int s;
360 
361 	s = splusb();
362 
363 	if (timeout_initialized(&sc->scan_to))
364 		timeout_del(&sc->scan_to);
365 	if (timeout_initialized(&sc->amrr_to))
366 		timeout_del(&sc->amrr_to);
367 
368 	usb_rem_wait_task(sc->sc_udev, &sc->sc_task);
369 
370 	usbd_ref_wait(sc->sc_udev);
371 
372 	if (ifp->if_softc != NULL) {
373 		ieee80211_ifdetach(ifp);	/* free all nodes */
374 		if_detach(ifp);
375 	}
376 
377 	if (sc->amrr_xfer != NULL) {
378 		usbd_free_xfer(sc->amrr_xfer);
379 		sc->amrr_xfer = NULL;
380 	}
381 
382 	if (sc->sc_rx_pipeh != NULL) {
383 		usbd_abort_pipe(sc->sc_rx_pipeh);
384 		usbd_close_pipe(sc->sc_rx_pipeh);
385 	}
386 
387 	if (sc->sc_tx_pipeh != NULL) {
388 		usbd_abort_pipe(sc->sc_tx_pipeh);
389 		usbd_close_pipe(sc->sc_tx_pipeh);
390 	}
391 
392 	ural_free_rx_list(sc);
393 	ural_free_tx_list(sc);
394 
395 	splx(s);
396 
397 	return 0;
398 }
399 
400 int
401 ural_alloc_tx_list(struct ural_softc *sc)
402 {
403 	int i, error;
404 
405 	sc->tx_cur = sc->tx_queued = 0;
406 
407 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
408 		struct ural_tx_data *data = &sc->tx_data[i];
409 
410 		data->sc = sc;
411 
412 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
413 		if (data->xfer == NULL) {
414 			printf("%s: could not allocate tx xfer\n",
415 			    sc->sc_dev.dv_xname);
416 			error = ENOMEM;
417 			goto fail;
418 		}
419 		data->buf = usbd_alloc_buffer(data->xfer,
420 		    RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN);
421 		if (data->buf == NULL) {
422 			printf("%s: could not allocate tx buffer\n",
423 			    sc->sc_dev.dv_xname);
424 			error = ENOMEM;
425 			goto fail;
426 		}
427 	}
428 
429 	return 0;
430 
431 fail:	ural_free_tx_list(sc);
432 	return error;
433 }
434 
435 void
436 ural_free_tx_list(struct ural_softc *sc)
437 {
438 	int i;
439 
440 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
441 		struct ural_tx_data *data = &sc->tx_data[i];
442 
443 		if (data->xfer != NULL) {
444 			usbd_free_xfer(data->xfer);
445 			data->xfer = NULL;
446 		}
447 		/*
448 		 * The node has already been freed at that point so don't call
449 		 * ieee80211_release_node() here.
450 		 */
451 		data->ni = NULL;
452 	}
453 }
454 
455 int
456 ural_alloc_rx_list(struct ural_softc *sc)
457 {
458 	int i, error;
459 
460 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
461 		struct ural_rx_data *data = &sc->rx_data[i];
462 
463 		data->sc = sc;
464 
465 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
466 		if (data->xfer == NULL) {
467 			printf("%s: could not allocate rx xfer\n",
468 			    sc->sc_dev.dv_xname);
469 			error = ENOMEM;
470 			goto fail;
471 		}
472 		if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) {
473 			printf("%s: could not allocate rx buffer\n",
474 			    sc->sc_dev.dv_xname);
475 			error = ENOMEM;
476 			goto fail;
477 		}
478 
479 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
480 		if (data->m == NULL) {
481 			printf("%s: could not allocate rx mbuf\n",
482 			    sc->sc_dev.dv_xname);
483 			error = ENOMEM;
484 			goto fail;
485 		}
486 		MCLGET(data->m, M_DONTWAIT);
487 		if (!(data->m->m_flags & M_EXT)) {
488 			printf("%s: could not allocate rx mbuf cluster\n",
489 			    sc->sc_dev.dv_xname);
490 			error = ENOMEM;
491 			goto fail;
492 		}
493 		data->buf = mtod(data->m, uint8_t *);
494 	}
495 
496 	return 0;
497 
498 fail:	ural_free_rx_list(sc);
499 	return error;
500 }
501 
502 void
503 ural_free_rx_list(struct ural_softc *sc)
504 {
505 	int i;
506 
507 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
508 		struct ural_rx_data *data = &sc->rx_data[i];
509 
510 		if (data->xfer != NULL) {
511 			usbd_free_xfer(data->xfer);
512 			data->xfer = NULL;
513 		}
514 		if (data->m != NULL) {
515 			m_freem(data->m);
516 			data->m = NULL;
517 		}
518 	}
519 }
520 
521 int
522 ural_media_change(struct ifnet *ifp)
523 {
524 	int error;
525 
526 	error = ieee80211_media_change(ifp);
527 	if (error != ENETRESET)
528 		return error;
529 
530 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
531 		ural_init(ifp);
532 
533 	return 0;
534 }
535 
536 /*
537  * This function is called periodically (every 200ms) during scanning to
538  * switch from one channel to another.
539  */
540 void
541 ural_next_scan(void *arg)
542 {
543 	struct ural_softc *sc = arg;
544 	struct ieee80211com *ic = &sc->sc_ic;
545 	struct ifnet *ifp = &ic->ic_if;
546 
547 	if (usbd_is_dying(sc->sc_udev))
548 		return;
549 
550 	usbd_ref_incr(sc->sc_udev);
551 
552 	if (ic->ic_state == IEEE80211_S_SCAN)
553 		ieee80211_next_scan(ifp);
554 
555 	usbd_ref_decr(sc->sc_udev);
556 }
557 
558 void
559 ural_task(void *arg)
560 {
561 	struct ural_softc *sc = arg;
562 	struct ieee80211com *ic = &sc->sc_ic;
563 	enum ieee80211_state ostate;
564 	struct ieee80211_node *ni;
565 
566 	if (usbd_is_dying(sc->sc_udev))
567 		return;
568 
569 	ostate = ic->ic_state;
570 
571 	switch (sc->sc_state) {
572 	case IEEE80211_S_INIT:
573 		if (ostate == IEEE80211_S_RUN) {
574 			/* abort TSF synchronization */
575 			ural_write(sc, RAL_TXRX_CSR19, 0);
576 
577 			/* force tx led to stop blinking */
578 			ural_write(sc, RAL_MAC_CSR20, 0);
579 		}
580 		break;
581 
582 	case IEEE80211_S_SCAN:
583 		ural_set_chan(sc, ic->ic_bss->ni_chan);
584 		if (!usbd_is_dying(sc->sc_udev))
585 			timeout_add_msec(&sc->scan_to, 200);
586 		break;
587 
588 	case IEEE80211_S_AUTH:
589 		ural_set_chan(sc, ic->ic_bss->ni_chan);
590 		break;
591 
592 	case IEEE80211_S_ASSOC:
593 		ural_set_chan(sc, ic->ic_bss->ni_chan);
594 		break;
595 
596 	case IEEE80211_S_RUN:
597 		ural_set_chan(sc, ic->ic_bss->ni_chan);
598 
599 		ni = ic->ic_bss;
600 
601 		if (ic->ic_opmode != IEEE80211_M_MONITOR) {
602 			ural_update_slot(sc);
603 			ural_set_txpreamble(sc);
604 			ural_set_basicrates(sc);
605 			ural_set_bssid(sc, ni->ni_bssid);
606 		}
607 
608 #ifndef IEEE80211_STA_ONLY
609 		if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
610 		    ic->ic_opmode == IEEE80211_M_IBSS) {
611 			struct mbuf *m = ieee80211_beacon_alloc(ic, ni);
612 			if (m == NULL) {
613 				printf("%s: could not allocate beacon\n",
614 				    sc->sc_dev.dv_xname);
615 				return;
616 			}
617 
618 			if (ural_tx_bcn(sc, m, ni) != 0) {
619 				m_freem(m);
620 				printf("%s: could not transmit beacon\n",
621 				    sc->sc_dev.dv_xname);
622 				return;
623 			}
624 
625 			/* beacon is no longer needed */
626 			m_freem(m);
627 		}
628 #endif
629 
630 		/* make tx led blink on tx (controlled by ASIC) */
631 		ural_write(sc, RAL_MAC_CSR20, 1);
632 
633 		if (ic->ic_opmode != IEEE80211_M_MONITOR)
634 			ural_enable_tsf_sync(sc);
635 
636 		if (ic->ic_opmode == IEEE80211_M_STA) {
637 			/* fake a join to init the tx rate */
638 			ural_newassoc(ic, ic->ic_bss, 1);
639 
640 			/* enable automatic rate control in STA mode */
641 			if (ic->ic_fixed_rate == -1)
642 				ural_amrr_start(sc, ic->ic_bss);
643 		}
644 
645 		break;
646 	}
647 
648 	sc->sc_newstate(ic, sc->sc_state, sc->sc_arg);
649 }
650 
651 int
652 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
653 {
654 	struct ural_softc *sc = ic->ic_if.if_softc;
655 
656 	usb_rem_task(sc->sc_udev, &sc->sc_task);
657 	timeout_del(&sc->scan_to);
658 	timeout_del(&sc->amrr_to);
659 
660 	/* do it in a process context */
661 	sc->sc_state = nstate;
662 	sc->sc_arg = arg;
663 	usb_add_task(sc->sc_udev, &sc->sc_task);
664 	return 0;
665 }
666 
667 /* quickly determine if a given rate is CCK or OFDM */
668 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
669 
670 #define RAL_ACK_SIZE	14	/* 10 + 4(FCS) */
671 #define RAL_CTS_SIZE	14	/* 10 + 4(FCS) */
672 
673 #define RAL_SIFS		10	/* us */
674 
675 #define RAL_RXTX_TURNAROUND	5	/* us */
676 
677 void
678 ural_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status)
679 {
680 	struct ural_tx_data *data = priv;
681 	struct ural_softc *sc = data->sc;
682 	struct ieee80211com *ic = &sc->sc_ic;
683 	struct ifnet *ifp = &ic->ic_if;
684 	int s;
685 
686 	if (status != USBD_NORMAL_COMPLETION) {
687 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
688 			return;
689 
690 		printf("%s: could not transmit buffer: %s\n",
691 		    sc->sc_dev.dv_xname, usbd_errstr(status));
692 
693 		if (status == USBD_STALLED)
694 			usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh);
695 
696 		ifp->if_oerrors++;
697 		return;
698 	}
699 
700 	s = splnet();
701 
702 	ieee80211_release_node(ic, data->ni);
703 	data->ni = NULL;
704 
705 	sc->tx_queued--;
706 	ifp->if_opackets++;
707 
708 	DPRINTFN(10, ("tx done\n"));
709 
710 	sc->sc_tx_timer = 0;
711 	ifp->if_flags &= ~IFF_OACTIVE;
712 	ural_start(ifp);
713 
714 	splx(s);
715 }
716 
717 void
718 ural_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status)
719 {
720 	struct ural_rx_data *data = priv;
721 	struct ural_softc *sc = data->sc;
722 	struct ieee80211com *ic = &sc->sc_ic;
723 	struct ifnet *ifp = &ic->ic_if;
724 	const struct ural_rx_desc *desc;
725 	struct ieee80211_frame *wh;
726 	struct ieee80211_rxinfo rxi;
727 	struct ieee80211_node *ni;
728 	struct mbuf *mnew, *m;
729 	int s, len;
730 
731 	if (status != USBD_NORMAL_COMPLETION) {
732 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
733 			return;
734 
735 		if (status == USBD_STALLED)
736 			usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh);
737 		goto skip;
738 	}
739 
740 	usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
741 
742 	if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) {
743 		DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname,
744 		    len));
745 		ifp->if_ierrors++;
746 		goto skip;
747 	}
748 
749 	/* rx descriptor is located at the end */
750 	desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE);
751 
752 	if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
753 		/*
754 		 * This should not happen since we did not request to receive
755 		 * those frames when we filled RAL_TXRX_CSR2.
756 		 */
757 		DPRINTFN(5, ("PHY or CRC error\n"));
758 		ifp->if_ierrors++;
759 		goto skip;
760 	}
761 
762 	MGETHDR(mnew, M_DONTWAIT, MT_DATA);
763 	if (mnew == NULL) {
764 		printf("%s: could not allocate rx mbuf\n",
765 		    sc->sc_dev.dv_xname);
766 		ifp->if_ierrors++;
767 		goto skip;
768 	}
769 	MCLGET(mnew, M_DONTWAIT);
770 	if (!(mnew->m_flags & M_EXT)) {
771 		printf("%s: could not allocate rx mbuf cluster\n",
772 		    sc->sc_dev.dv_xname);
773 		m_freem(mnew);
774 		ifp->if_ierrors++;
775 		goto skip;
776 	}
777 	m = data->m;
778 	data->m = mnew;
779 	data->buf = mtod(data->m, uint8_t *);
780 
781 	/* finalize mbuf */
782 	m->m_pkthdr.rcvif = ifp;
783 	m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff;
784 
785 	s = splnet();
786 
787 #if NBPFILTER > 0
788 	if (sc->sc_drvbpf != NULL) {
789 		struct mbuf mb;
790 		struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
791 
792 		tap->wr_flags = IEEE80211_RADIOTAP_F_FCS;
793 		tap->wr_rate = ural_rxrate(desc);
794 		tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
795 		tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
796 		tap->wr_antenna = sc->rx_ant;
797 		tap->wr_antsignal = desc->rssi;
798 
799 		mb.m_data = (caddr_t)tap;
800 		mb.m_len = sc->sc_rxtap_len;
801 		mb.m_next = m;
802 		mb.m_nextpkt = NULL;
803 		mb.m_type = 0;
804 		mb.m_flags = 0;
805 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
806 	}
807 #endif
808 	m_adj(m, -IEEE80211_CRC_LEN);	/* trim FCS */
809 
810 	wh = mtod(m, struct ieee80211_frame *);
811 	ni = ieee80211_find_rxnode(ic, wh);
812 
813 	/* send the frame to the 802.11 layer */
814 	rxi.rxi_flags = 0;
815 	rxi.rxi_rssi = desc->rssi;
816 	rxi.rxi_tstamp = 0;	/* unused */
817 	ieee80211_input(ifp, m, ni, &rxi);
818 
819 	/* node is no longer needed */
820 	ieee80211_release_node(ic, ni);
821 
822 	splx(s);
823 
824 	DPRINTFN(15, ("rx done\n"));
825 
826 skip:	/* setup a new transfer */
827 	usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES,
828 	    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
829 	(void)usbd_transfer(xfer);
830 }
831 
832 /*
833  * This function is only used by the Rx radiotap code. It returns the rate at
834  * which a given frame was received.
835  */
836 #if NBPFILTER > 0
837 uint8_t
838 ural_rxrate(const struct ural_rx_desc *desc)
839 {
840 	if (letoh32(desc->flags) & RAL_RX_OFDM) {
841 		/* reverse function of ural_plcp_signal */
842 		switch (desc->rate) {
843 		case 0xb:	return 12;
844 		case 0xf:	return 18;
845 		case 0xa:	return 24;
846 		case 0xe:	return 36;
847 		case 0x9:	return 48;
848 		case 0xd:	return 72;
849 		case 0x8:	return 96;
850 		case 0xc:	return 108;
851 		}
852 	} else {
853 		if (desc->rate == 10)
854 			return 2;
855 		if (desc->rate == 20)
856 			return 4;
857 		if (desc->rate == 55)
858 			return 11;
859 		if (desc->rate == 110)
860 			return 22;
861 	}
862 	return 2;	/* should not get there */
863 }
864 #endif
865 
866 /*
867  * Return the expected ack rate for a frame transmitted at rate `rate'.
868  */
869 int
870 ural_ack_rate(struct ieee80211com *ic, int rate)
871 {
872 	switch (rate) {
873 	/* CCK rates */
874 	case 2:
875 		return 2;
876 	case 4:
877 	case 11:
878 	case 22:
879 		return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate;
880 
881 	/* OFDM rates */
882 	case 12:
883 	case 18:
884 		return 12;
885 	case 24:
886 	case 36:
887 		return 24;
888 	case 48:
889 	case 72:
890 	case 96:
891 	case 108:
892 		return 48;
893 	}
894 
895 	/* default to 1Mbps */
896 	return 2;
897 }
898 
899 /*
900  * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'.
901  * The function automatically determines the operating mode depending on the
902  * given rate. `flags' indicates whether short preamble is in use or not.
903  */
904 uint16_t
905 ural_txtime(int len, int rate, uint32_t flags)
906 {
907 	uint16_t txtime;
908 
909 	if (RAL_RATE_IS_OFDM(rate)) {
910 		/* IEEE Std 802.11g-2003, pp. 44 */
911 		txtime = (8 + 4 * len + 3 + rate - 1) / rate;
912 		txtime = 16 + 4 + 4 * txtime + 6;
913 	} else {
914 		/* IEEE Std 802.11b-1999, pp. 28 */
915 		txtime = (16 * len + rate - 1) / rate;
916 		if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE))
917 			txtime +=  72 + 24;
918 		else
919 			txtime += 144 + 48;
920 	}
921 	return txtime;
922 }
923 
924 uint8_t
925 ural_plcp_signal(int rate)
926 {
927 	switch (rate) {
928 	/* CCK rates (returned values are device-dependent) */
929 	case 2:		return 0x0;
930 	case 4:		return 0x1;
931 	case 11:	return 0x2;
932 	case 22:	return 0x3;
933 
934 	/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
935 	case 12:	return 0xb;
936 	case 18:	return 0xf;
937 	case 24:	return 0xa;
938 	case 36:	return 0xe;
939 	case 48:	return 0x9;
940 	case 72:	return 0xd;
941 	case 96:	return 0x8;
942 	case 108:	return 0xc;
943 
944 	/* unsupported rates (should not get there) */
945 	default:	return 0xff;
946 	}
947 }
948 
949 void
950 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
951     uint32_t flags, int len, int rate)
952 {
953 	struct ieee80211com *ic = &sc->sc_ic;
954 	uint16_t plcp_length;
955 	int remainder;
956 
957 	desc->flags = htole32(flags);
958 	desc->flags |= htole32(len << 16);
959 
960 	desc->wme = htole16(
961 	    RAL_AIFSN(2) |
962 	    RAL_LOGCWMIN(3) |
963 	    RAL_LOGCWMAX(5));
964 
965 	/* setup PLCP fields */
966 	desc->plcp_signal  = ural_plcp_signal(rate);
967 	desc->plcp_service = 4;
968 
969 	len += IEEE80211_CRC_LEN;
970 	if (RAL_RATE_IS_OFDM(rate)) {
971 		desc->flags |= htole32(RAL_TX_OFDM);
972 
973 		plcp_length = len & 0xfff;
974 		desc->plcp_length_hi = plcp_length >> 6;
975 		desc->plcp_length_lo = plcp_length & 0x3f;
976 	} else {
977 		plcp_length = (16 * len + rate - 1) / rate;
978 		if (rate == 22) {
979 			remainder = (16 * len) % 22;
980 			if (remainder != 0 && remainder < 7)
981 				desc->plcp_service |= RAL_PLCP_LENGEXT;
982 		}
983 		desc->plcp_length_hi = plcp_length >> 8;
984 		desc->plcp_length_lo = plcp_length & 0xff;
985 
986 		if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
987 			desc->plcp_signal |= 0x08;
988 	}
989 
990 	desc->iv = 0;
991 	desc->eiv = 0;
992 }
993 
994 #define RAL_TX_TIMEOUT	5000
995 
996 #ifndef IEEE80211_STA_ONLY
997 int
998 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
999 {
1000 	struct ural_tx_desc *desc;
1001 	usbd_xfer_handle xfer;
1002 	usbd_status error;
1003 	uint8_t cmd = 0;
1004 	uint8_t *buf;
1005 	int xferlen, rate = 2;
1006 
1007 	xfer = usbd_alloc_xfer(sc->sc_udev);
1008 	if (xfer == NULL)
1009 		return ENOMEM;
1010 
1011 	/* xfer length needs to be a multiple of two! */
1012 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1013 
1014 	buf = usbd_alloc_buffer(xfer, xferlen);
1015 	if (buf == NULL) {
1016 		usbd_free_xfer(xfer);
1017 		return ENOMEM;
1018 	}
1019 
1020 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd,
1021 	    USBD_FORCE_SHORT_XFER, RAL_TX_TIMEOUT, NULL);
1022 
1023 	error = usbd_sync_transfer(xfer);
1024 	if (error != 0) {
1025 		usbd_free_xfer(xfer);
1026 		return error;
1027 	}
1028 
1029 	desc = (struct ural_tx_desc *)buf;
1030 
1031 	m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE);
1032 	ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP,
1033 	    m0->m_pkthdr.len, rate);
1034 
1035 	DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n",
1036 	    m0->m_pkthdr.len, rate, xferlen));
1037 
1038 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen,
1039 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, NULL);
1040 
1041 	error = usbd_sync_transfer(xfer);
1042 	usbd_free_xfer(xfer);
1043 
1044 	return error;
1045 }
1046 #endif
1047 
1048 int
1049 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
1050 {
1051 	struct ieee80211com *ic = &sc->sc_ic;
1052 	struct ural_tx_desc *desc;
1053 	struct ural_tx_data *data;
1054 	struct ieee80211_frame *wh;
1055 	struct ieee80211_key *k;
1056 	uint32_t flags = RAL_TX_NEWSEQ;
1057 	uint16_t dur;
1058 	usbd_status error;
1059 	int rate, xferlen, pktlen, needrts = 0, needcts = 0;
1060 
1061 	wh = mtod(m0, struct ieee80211_frame *);
1062 
1063 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1064 		k = ieee80211_get_txkey(ic, wh, ni);
1065 
1066 		if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
1067 			return ENOBUFS;
1068 
1069 		/* packet header may have moved, reset our local pointer */
1070 		wh = mtod(m0, struct ieee80211_frame *);
1071 	}
1072 
1073 	/* compute actual packet length (including CRC and crypto overhead) */
1074 	pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
1075 
1076 	/* pickup a rate */
1077 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
1078 	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1079 	     IEEE80211_FC0_TYPE_MGT)) {
1080 		/* mgmt/multicast frames are sent at the lowest avail. rate */
1081 		rate = ni->ni_rates.rs_rates[0];
1082 	} else if (ic->ic_fixed_rate != -1) {
1083 		rate = ic->ic_sup_rates[ic->ic_curmode].
1084 		    rs_rates[ic->ic_fixed_rate];
1085 	} else
1086 			rate = ni->ni_rates.rs_rates[ni->ni_txrate];
1087 	if (rate == 0)
1088 		rate = 2;	/* XXX should not happen */
1089 	rate &= IEEE80211_RATE_VAL;
1090 
1091 	/* check if RTS/CTS or CTS-to-self protection must be used */
1092 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1093 		/* multicast frames are not sent at OFDM rates in 802.11b/g */
1094 		if (pktlen > ic->ic_rtsthreshold) {
1095 			needrts = 1;	/* RTS/CTS based on frame length */
1096 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1097 		    RAL_RATE_IS_OFDM(rate)) {
1098 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1099 				needcts = 1;	/* CTS-to-self */
1100 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1101 				needrts = 1;	/* RTS/CTS */
1102 		}
1103 	}
1104 	if (needrts || needcts) {
1105 		struct mbuf *mprot;
1106 		int protrate, ackrate;
1107 		uint16_t dur;
1108 
1109 		protrate = 2;
1110 		ackrate  = ural_ack_rate(ic, rate);
1111 
1112 		dur = ural_txtime(pktlen, rate, ic->ic_flags) +
1113 		      ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) +
1114 		      2 * RAL_SIFS;
1115 		if (needrts) {
1116 			dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic,
1117 			    protrate), ic->ic_flags) + RAL_SIFS;
1118 			mprot = ieee80211_get_rts(ic, wh, dur);
1119 		} else {
1120 			mprot = ieee80211_get_cts_to_self(ic, dur);
1121 		}
1122 		if (mprot == NULL) {
1123 			printf("%s: could not allocate protection frame\n",
1124 			    sc->sc_dev.dv_xname);
1125 			m_freem(m0);
1126 			return ENOBUFS;
1127 		}
1128 
1129 		data = &sc->tx_data[sc->tx_cur];
1130 		desc = (struct ural_tx_desc *)data->buf;
1131 
1132 		/* avoid multiple free() of the same node for each fragment */
1133 		data->ni = ieee80211_ref_node(ni);
1134 
1135 		m_copydata(mprot, 0, mprot->m_pkthdr.len,
1136 		    data->buf + RAL_TX_DESC_SIZE);
1137 		ural_setup_tx_desc(sc, desc,
1138 		    (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7),
1139 		    mprot->m_pkthdr.len, protrate);
1140 
1141 		/* no roundup necessary here */
1142 		xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len;
1143 
1144 		/* XXX may want to pass the protection frame to BPF */
1145 
1146 		/* mbuf is no longer needed */
1147 		m_freem(mprot);
1148 
1149 		usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf,
1150 		    xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
1151 		    RAL_TX_TIMEOUT, ural_txeof);
1152 		error = usbd_transfer(data->xfer);
1153 		if (error != 0 && error != USBD_IN_PROGRESS) {
1154 			m_freem(m0);
1155 			return error;
1156 		}
1157 
1158 		sc->tx_queued++;
1159 		sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1160 
1161 		flags |= RAL_TX_IFS_SIFS;
1162 	}
1163 
1164 	data = &sc->tx_data[sc->tx_cur];
1165 	desc = (struct ural_tx_desc *)data->buf;
1166 
1167 	data->ni = ni;
1168 
1169 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1170 		flags |= RAL_TX_NEED_ACK;
1171 		flags |= RAL_TX_RETRY(7);
1172 
1173 		dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate),
1174 		    ic->ic_flags) + RAL_SIFS;
1175 		*(uint16_t *)wh->i_dur = htole16(dur);
1176 
1177 #ifndef IEEE80211_STA_ONLY
1178 		/* tell hardware to set timestamp in probe responses */
1179 		if ((wh->i_fc[0] &
1180 		    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
1181 		    (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
1182 			flags |= RAL_TX_TIMESTAMP;
1183 #endif
1184 	}
1185 
1186 #if NBPFILTER > 0
1187 	if (sc->sc_drvbpf != NULL) {
1188 		struct mbuf mb;
1189 		struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
1190 
1191 		tap->wt_flags = 0;
1192 		tap->wt_rate = rate;
1193 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1194 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1195 		tap->wt_antenna = sc->tx_ant;
1196 
1197 		mb.m_data = (caddr_t)tap;
1198 		mb.m_len = sc->sc_txtap_len;
1199 		mb.m_next = m0;
1200 		mb.m_nextpkt = NULL;
1201 		mb.m_type = 0;
1202 		mb.m_flags = 0;
1203 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1204 	}
1205 #endif
1206 
1207 	m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE);
1208 	ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate);
1209 
1210 	/* align end on a 2-bytes boundary */
1211 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1212 
1213 	/*
1214 	 * No space left in the last URB to store the extra 2 bytes, force
1215 	 * sending of another URB.
1216 	 */
1217 	if ((xferlen % 64) == 0)
1218 		xferlen += 2;
1219 
1220 	DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n",
1221 	    m0->m_pkthdr.len, rate, xferlen));
1222 
1223 	/* mbuf is no longer needed */
1224 	m_freem(m0);
1225 
1226 	usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen,
1227 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof);
1228 	error = usbd_transfer(data->xfer);
1229 	if (error != 0 && error != USBD_IN_PROGRESS)
1230 		return error;
1231 
1232 	sc->tx_queued++;
1233 	sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1234 
1235 	return 0;
1236 }
1237 
1238 void
1239 ural_start(struct ifnet *ifp)
1240 {
1241 	struct ural_softc *sc = ifp->if_softc;
1242 	struct ieee80211com *ic = &sc->sc_ic;
1243 	struct ieee80211_node *ni;
1244 	struct mbuf *m0;
1245 
1246 	/*
1247 	 * net80211 may still try to send management frames even if the
1248 	 * IFF_RUNNING flag is not set...
1249 	 */
1250 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1251 		return;
1252 
1253 	for (;;) {
1254 		IF_POLL(&ic->ic_mgtq, m0);
1255 		if (m0 != NULL) {
1256 			if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1257 				ifp->if_flags |= IFF_OACTIVE;
1258 				break;
1259 			}
1260 			IF_DEQUEUE(&ic->ic_mgtq, m0);
1261 
1262 			ni = (struct ieee80211_node *)m0->m_pkthdr.rcvif;
1263 			m0->m_pkthdr.rcvif = NULL;
1264 #if NBPFILTER > 0
1265 			if (ic->ic_rawbpf != NULL)
1266 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1267 #endif
1268 			if (ural_tx_data(sc, m0, ni) != 0)
1269 				break;
1270 
1271 		} else {
1272 			if (ic->ic_state != IEEE80211_S_RUN)
1273 				break;
1274 			IFQ_POLL(&ifp->if_snd, m0);
1275 			if (m0 == NULL)
1276 				break;
1277 			if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1278 				ifp->if_flags |= IFF_OACTIVE;
1279 				break;
1280 			}
1281 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1282 #if NBPFILTER > 0
1283 			if (ifp->if_bpf != NULL)
1284 				bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1285 #endif
1286 			m0 = ieee80211_encap(ifp, m0, &ni);
1287 			if (m0 == NULL)
1288 				continue;
1289 #if NBPFILTER > 0
1290 			if (ic->ic_rawbpf != NULL)
1291 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1292 #endif
1293 			if (ural_tx_data(sc, m0, ni) != 0) {
1294 				if (ni != NULL)
1295 					ieee80211_release_node(ic, ni);
1296 				ifp->if_oerrors++;
1297 				break;
1298 			}
1299 		}
1300 
1301 		sc->sc_tx_timer = 5;
1302 		ifp->if_timer = 1;
1303 	}
1304 }
1305 
1306 void
1307 ural_watchdog(struct ifnet *ifp)
1308 {
1309 	struct ural_softc *sc = ifp->if_softc;
1310 
1311 	ifp->if_timer = 0;
1312 
1313 	if (sc->sc_tx_timer > 0) {
1314 		if (--sc->sc_tx_timer == 0) {
1315 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1316 			/*ural_init(ifp); XXX needs a process context! */
1317 			ifp->if_oerrors++;
1318 			return;
1319 		}
1320 		ifp->if_timer = 1;
1321 	}
1322 
1323 	ieee80211_watchdog(ifp);
1324 }
1325 
1326 int
1327 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1328 {
1329 	struct ural_softc *sc = ifp->if_softc;
1330 	struct ieee80211com *ic = &sc->sc_ic;
1331 	struct ifaddr *ifa;
1332 	struct ifreq *ifr;
1333 	int s, error = 0;
1334 
1335 	if (usbd_is_dying(sc->sc_udev))
1336 		return ENXIO;
1337 
1338 	usbd_ref_incr(sc->sc_udev);
1339 
1340 	s = splnet();
1341 
1342 	switch (cmd) {
1343 	case SIOCSIFADDR:
1344 		ifa = (struct ifaddr *)data;
1345 		ifp->if_flags |= IFF_UP;
1346 #ifdef INET
1347 		if (ifa->ifa_addr->sa_family == AF_INET)
1348 			arp_ifinit(&ic->ic_ac, ifa);
1349 #endif
1350 		/* FALLTHROUGH */
1351 	case SIOCSIFFLAGS:
1352 		if (ifp->if_flags & IFF_UP) {
1353 			if (ifp->if_flags & IFF_RUNNING)
1354 				ural_update_promisc(sc);
1355 			else
1356 				ural_init(ifp);
1357 		} else {
1358 			if (ifp->if_flags & IFF_RUNNING)
1359 				ural_stop(ifp, 1);
1360 		}
1361 		break;
1362 
1363 	case SIOCADDMULTI:
1364 	case SIOCDELMULTI:
1365 		ifr = (struct ifreq *)data;
1366 		error = (cmd == SIOCADDMULTI) ?
1367 		    ether_addmulti(ifr, &ic->ic_ac) :
1368 		    ether_delmulti(ifr, &ic->ic_ac);
1369 
1370 		if (error == ENETRESET)
1371 			error = 0;
1372 		break;
1373 
1374 	case SIOCS80211CHANNEL:
1375 		/*
1376 		 * This allows for fast channel switching in monitor mode
1377 		 * (used by kismet). In IBSS mode, we must explicitly reset
1378 		 * the interface to generate a new beacon frame.
1379 		 */
1380 		error = ieee80211_ioctl(ifp, cmd, data);
1381 		if (error == ENETRESET &&
1382 		    ic->ic_opmode == IEEE80211_M_MONITOR) {
1383 			if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1384 			    (IFF_UP | IFF_RUNNING))
1385 				ural_set_chan(sc, ic->ic_ibss_chan);
1386 			error = 0;
1387 		}
1388 		break;
1389 
1390 	default:
1391 		error = ieee80211_ioctl(ifp, cmd, data);
1392 	}
1393 
1394 	if (error == ENETRESET) {
1395 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1396 		    (IFF_UP | IFF_RUNNING))
1397 			ural_init(ifp);
1398 		error = 0;
1399 	}
1400 
1401 	splx(s);
1402 
1403 	usbd_ref_decr(sc->sc_udev);
1404 
1405 	return error;
1406 }
1407 
1408 void
1409 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
1410 {
1411 	usb_device_request_t req;
1412 	usbd_status error;
1413 
1414 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1415 	req.bRequest = RAL_READ_EEPROM;
1416 	USETW(req.wValue, 0);
1417 	USETW(req.wIndex, addr);
1418 	USETW(req.wLength, len);
1419 
1420 	error = usbd_do_request(sc->sc_udev, &req, buf);
1421 	if (error != 0) {
1422 		printf("%s: could not read EEPROM: %s\n",
1423 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1424 	}
1425 }
1426 
1427 uint16_t
1428 ural_read(struct ural_softc *sc, uint16_t reg)
1429 {
1430 	usb_device_request_t req;
1431 	usbd_status error;
1432 	uint16_t val;
1433 
1434 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1435 	req.bRequest = RAL_READ_MAC;
1436 	USETW(req.wValue, 0);
1437 	USETW(req.wIndex, reg);
1438 	USETW(req.wLength, sizeof (uint16_t));
1439 
1440 	error = usbd_do_request(sc->sc_udev, &req, &val);
1441 	if (error != 0) {
1442 		printf("%s: could not read MAC register: %s\n",
1443 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1444 		return 0;
1445 	}
1446 	return letoh16(val);
1447 }
1448 
1449 void
1450 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1451 {
1452 	usb_device_request_t req;
1453 	usbd_status error;
1454 
1455 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1456 	req.bRequest = RAL_READ_MULTI_MAC;
1457 	USETW(req.wValue, 0);
1458 	USETW(req.wIndex, reg);
1459 	USETW(req.wLength, len);
1460 
1461 	error = usbd_do_request(sc->sc_udev, &req, buf);
1462 	if (error != 0) {
1463 		printf("%s: could not read MAC register: %s\n",
1464 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1465 	}
1466 }
1467 
1468 void
1469 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
1470 {
1471 	usb_device_request_t req;
1472 	usbd_status error;
1473 
1474 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1475 	req.bRequest = RAL_WRITE_MAC;
1476 	USETW(req.wValue, val);
1477 	USETW(req.wIndex, reg);
1478 	USETW(req.wLength, 0);
1479 
1480 	error = usbd_do_request(sc->sc_udev, &req, NULL);
1481 	if (error != 0) {
1482 		printf("%s: could not write MAC register: %s\n",
1483 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1484 	}
1485 }
1486 
1487 void
1488 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1489 {
1490 	usb_device_request_t req;
1491 	usbd_status error;
1492 
1493 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1494 	req.bRequest = RAL_WRITE_MULTI_MAC;
1495 	USETW(req.wValue, 0);
1496 	USETW(req.wIndex, reg);
1497 	USETW(req.wLength, len);
1498 
1499 	error = usbd_do_request(sc->sc_udev, &req, buf);
1500 	if (error != 0) {
1501 		printf("%s: could not write MAC register: %s\n",
1502 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1503 	}
1504 }
1505 
1506 void
1507 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
1508 {
1509 	uint16_t tmp;
1510 	int ntries;
1511 
1512 	for (ntries = 0; ntries < 5; ntries++) {
1513 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1514 			break;
1515 	}
1516 	if (ntries == 5) {
1517 		printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname);
1518 		return;
1519 	}
1520 
1521 	tmp = reg << 8 | val;
1522 	ural_write(sc, RAL_PHY_CSR7, tmp);
1523 }
1524 
1525 uint8_t
1526 ural_bbp_read(struct ural_softc *sc, uint8_t reg)
1527 {
1528 	uint16_t val;
1529 	int ntries;
1530 
1531 	val = RAL_BBP_WRITE | reg << 8;
1532 	ural_write(sc, RAL_PHY_CSR7, val);
1533 
1534 	for (ntries = 0; ntries < 5; ntries++) {
1535 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1536 			break;
1537 	}
1538 	if (ntries == 5) {
1539 		printf("%s: could not read BBP\n", sc->sc_dev.dv_xname);
1540 		return 0;
1541 	}
1542 	return ural_read(sc, RAL_PHY_CSR7) & 0xff;
1543 }
1544 
1545 void
1546 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
1547 {
1548 	uint32_t tmp;
1549 	int ntries;
1550 
1551 	for (ntries = 0; ntries < 5; ntries++) {
1552 		if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
1553 			break;
1554 	}
1555 	if (ntries == 5) {
1556 		printf("%s: could not write to RF\n", sc->sc_dev.dv_xname);
1557 		return;
1558 	}
1559 
1560 	tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
1561 	ural_write(sc, RAL_PHY_CSR9,  tmp & 0xffff);
1562 	ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
1563 
1564 	/* remember last written value in sc */
1565 	sc->rf_regs[reg] = val;
1566 
1567 	DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff));
1568 }
1569 
1570 void
1571 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
1572 {
1573 	struct ieee80211com *ic = &sc->sc_ic;
1574 	uint8_t power, tmp;
1575 	u_int chan;
1576 
1577 	chan = ieee80211_chan2ieee(ic, c);
1578 	if (chan == 0 || chan == IEEE80211_CHAN_ANY)
1579 		return;
1580 
1581 	power = min(sc->txpow[chan - 1], 31);
1582 
1583 	DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power));
1584 
1585 	switch (sc->rf_rev) {
1586 	case RAL_RF_2522:
1587 		ural_rf_write(sc, RAL_RF1, 0x00814);
1588 		ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
1589 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1590 		break;
1591 
1592 	case RAL_RF_2523:
1593 		ural_rf_write(sc, RAL_RF1, 0x08804);
1594 		ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
1595 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
1596 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1597 		break;
1598 
1599 	case RAL_RF_2524:
1600 		ural_rf_write(sc, RAL_RF1, 0x0c808);
1601 		ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
1602 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1603 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1604 		break;
1605 
1606 	case RAL_RF_2525:
1607 		ural_rf_write(sc, RAL_RF1, 0x08808);
1608 		ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
1609 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1610 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1611 
1612 		ural_rf_write(sc, RAL_RF1, 0x08808);
1613 		ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
1614 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1615 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1616 		break;
1617 
1618 	case RAL_RF_2525E:
1619 		ural_rf_write(sc, RAL_RF1, 0x08808);
1620 		ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
1621 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1622 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
1623 		break;
1624 
1625 	case RAL_RF_2526:
1626 		ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
1627 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1628 		ural_rf_write(sc, RAL_RF1, 0x08804);
1629 
1630 		ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
1631 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1632 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1633 		break;
1634 	}
1635 
1636 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
1637 	    ic->ic_state != IEEE80211_S_SCAN) {
1638 		/* set Japan filter bit for channel 14 */
1639 		tmp = ural_bbp_read(sc, 70);
1640 
1641 		tmp &= ~RAL_JAPAN_FILTER;
1642 		if (chan == 14)
1643 			tmp |= RAL_JAPAN_FILTER;
1644 
1645 		ural_bbp_write(sc, 70, tmp);
1646 
1647 		/* clear CRC errors */
1648 		ural_read(sc, RAL_STA_CSR0);
1649 
1650 		DELAY(1000); /* RF needs a 1ms delay here */
1651 		ural_disable_rf_tune(sc);
1652 	}
1653 }
1654 
1655 /*
1656  * Disable RF auto-tuning.
1657  */
1658 void
1659 ural_disable_rf_tune(struct ural_softc *sc)
1660 {
1661 	uint32_t tmp;
1662 
1663 	if (sc->rf_rev != RAL_RF_2523) {
1664 		tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
1665 		ural_rf_write(sc, RAL_RF1, tmp);
1666 	}
1667 
1668 	tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
1669 	ural_rf_write(sc, RAL_RF3, tmp);
1670 
1671 	DPRINTFN(2, ("disabling RF autotune\n"));
1672 }
1673 
1674 /*
1675  * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
1676  * synchronization.
1677  */
1678 void
1679 ural_enable_tsf_sync(struct ural_softc *sc)
1680 {
1681 	struct ieee80211com *ic = &sc->sc_ic;
1682 	uint16_t logcwmin, preload, tmp;
1683 
1684 	/* first, disable TSF synchronization */
1685 	ural_write(sc, RAL_TXRX_CSR19, 0);
1686 
1687 	tmp = (16 * ic->ic_bss->ni_intval) << 4;
1688 	ural_write(sc, RAL_TXRX_CSR18, tmp);
1689 
1690 #ifndef IEEE80211_STA_ONLY
1691 	if (ic->ic_opmode == IEEE80211_M_IBSS) {
1692 		logcwmin = 2;
1693 		preload = 320;
1694 	} else
1695 #endif
1696 	{
1697 		logcwmin = 0;
1698 		preload = 6;
1699 	}
1700 	tmp = logcwmin << 12 | preload;
1701 	ural_write(sc, RAL_TXRX_CSR20, tmp);
1702 
1703 	/* finally, enable TSF synchronization */
1704 	tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
1705 	if (ic->ic_opmode == IEEE80211_M_STA)
1706 		tmp |= RAL_ENABLE_TSF_SYNC(1);
1707 #ifndef IEEE80211_STA_ONLY
1708 	else
1709 		tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
1710 #endif
1711 	ural_write(sc, RAL_TXRX_CSR19, tmp);
1712 
1713 	DPRINTF(("enabling TSF synchronization\n"));
1714 }
1715 
1716 void
1717 ural_update_slot(struct ural_softc *sc)
1718 {
1719 	struct ieee80211com *ic = &sc->sc_ic;
1720 	uint16_t slottime, sifs, eifs;
1721 
1722 	slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
1723 
1724 	/*
1725 	 * These settings may sound a bit inconsistent but this is what the
1726 	 * reference driver does.
1727 	 */
1728 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1729 		sifs = 16 - RAL_RXTX_TURNAROUND;
1730 		eifs = 364;
1731 	} else {
1732 		sifs = 10 - RAL_RXTX_TURNAROUND;
1733 		eifs = 64;
1734 	}
1735 
1736 	ural_write(sc, RAL_MAC_CSR10, slottime);
1737 	ural_write(sc, RAL_MAC_CSR11, sifs);
1738 	ural_write(sc, RAL_MAC_CSR12, eifs);
1739 }
1740 
1741 void
1742 ural_set_txpreamble(struct ural_softc *sc)
1743 {
1744 	uint16_t tmp;
1745 
1746 	tmp = ural_read(sc, RAL_TXRX_CSR10);
1747 
1748 	tmp &= ~RAL_SHORT_PREAMBLE;
1749 	if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE)
1750 		tmp |= RAL_SHORT_PREAMBLE;
1751 
1752 	ural_write(sc, RAL_TXRX_CSR10, tmp);
1753 }
1754 
1755 void
1756 ural_set_basicrates(struct ural_softc *sc)
1757 {
1758 	struct ieee80211com *ic = &sc->sc_ic;
1759 
1760 	/* update basic rate set */
1761 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1762 		/* 11b basic rates: 1, 2Mbps */
1763 		ural_write(sc, RAL_TXRX_CSR11, 0x3);
1764 	} else {
1765 		/* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
1766 		ural_write(sc, RAL_TXRX_CSR11, 0xf);
1767 	}
1768 }
1769 
1770 void
1771 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
1772 {
1773 	uint16_t tmp;
1774 
1775 	tmp = bssid[0] | bssid[1] << 8;
1776 	ural_write(sc, RAL_MAC_CSR5, tmp);
1777 
1778 	tmp = bssid[2] | bssid[3] << 8;
1779 	ural_write(sc, RAL_MAC_CSR6, tmp);
1780 
1781 	tmp = bssid[4] | bssid[5] << 8;
1782 	ural_write(sc, RAL_MAC_CSR7, tmp);
1783 
1784 	DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid)));
1785 }
1786 
1787 void
1788 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
1789 {
1790 	uint16_t tmp;
1791 
1792 	tmp = addr[0] | addr[1] << 8;
1793 	ural_write(sc, RAL_MAC_CSR2, tmp);
1794 
1795 	tmp = addr[2] | addr[3] << 8;
1796 	ural_write(sc, RAL_MAC_CSR3, tmp);
1797 
1798 	tmp = addr[4] | addr[5] << 8;
1799 	ural_write(sc, RAL_MAC_CSR4, tmp);
1800 
1801 	DPRINTF(("setting MAC address to %s\n",
1802 	    ether_sprintf((uint8_t *)addr)));
1803 }
1804 
1805 void
1806 ural_update_promisc(struct ural_softc *sc)
1807 {
1808 	struct ifnet *ifp = &sc->sc_ic.ic_if;
1809 	uint16_t tmp;
1810 
1811 	tmp = ural_read(sc, RAL_TXRX_CSR2);
1812 
1813 	tmp &= ~RAL_DROP_NOT_TO_ME;
1814 	if (!(ifp->if_flags & IFF_PROMISC))
1815 		tmp |= RAL_DROP_NOT_TO_ME;
1816 
1817 	ural_write(sc, RAL_TXRX_CSR2, tmp);
1818 
1819 	DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
1820 	    "entering" : "leaving"));
1821 }
1822 
1823 const char *
1824 ural_get_rf(int rev)
1825 {
1826 	switch (rev) {
1827 	case RAL_RF_2522:	return "RT2522";
1828 	case RAL_RF_2523:	return "RT2523";
1829 	case RAL_RF_2524:	return "RT2524";
1830 	case RAL_RF_2525:	return "RT2525";
1831 	case RAL_RF_2525E:	return "RT2525e";
1832 	case RAL_RF_2526:	return "RT2526";
1833 	case RAL_RF_5222:	return "RT5222";
1834 	default:		return "unknown";
1835 	}
1836 }
1837 
1838 void
1839 ural_read_eeprom(struct ural_softc *sc)
1840 {
1841 	struct ieee80211com *ic = &sc->sc_ic;
1842 	uint16_t val;
1843 
1844 	/* retrieve MAC/BBP type */
1845 	ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2);
1846 	sc->macbbp_rev = letoh16(val);
1847 
1848 	ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
1849 	val = letoh16(val);
1850 	sc->rf_rev =   (val >> 11) & 0x7;
1851 	sc->hw_radio = (val >> 10) & 0x1;
1852 	sc->led_mode = (val >> 6)  & 0x7;
1853 	sc->rx_ant =   (val >> 4)  & 0x3;
1854 	sc->tx_ant =   (val >> 2)  & 0x3;
1855 	sc->nb_ant =   val & 0x3;
1856 
1857 	/* read MAC address */
1858 	ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6);
1859 
1860 	/* read default values for BBP registers */
1861 	ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
1862 
1863 	/* read Tx power for all b/g channels */
1864 	ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
1865 }
1866 
1867 int
1868 ural_bbp_init(struct ural_softc *sc)
1869 {
1870 	int i, ntries;
1871 
1872 	/* wait for BBP to be ready */
1873 	for (ntries = 0; ntries < 100; ntries++) {
1874 		if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
1875 			break;
1876 		DELAY(1000);
1877 	}
1878 	if (ntries == 100) {
1879 		printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname);
1880 		return EIO;
1881 	}
1882 
1883 	/* initialize BBP registers to default values */
1884 	for (i = 0; i < nitems(ural_def_bbp); i++)
1885 		ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
1886 
1887 #if 0
1888 	/* initialize BBP registers to values stored in EEPROM */
1889 	for (i = 0; i < 16; i++) {
1890 		if (sc->bbp_prom[i].reg == 0xff)
1891 			continue;
1892 		ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
1893 	}
1894 #endif
1895 
1896 	return 0;
1897 }
1898 
1899 void
1900 ural_set_txantenna(struct ural_softc *sc, int antenna)
1901 {
1902 	uint16_t tmp;
1903 	uint8_t tx;
1904 
1905 	tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
1906 	if (antenna == 1)
1907 		tx |= RAL_BBP_ANTA;
1908 	else if (antenna == 2)
1909 		tx |= RAL_BBP_ANTB;
1910 	else
1911 		tx |= RAL_BBP_DIVERSITY;
1912 
1913 	/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
1914 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
1915 	    sc->rf_rev == RAL_RF_5222)
1916 		tx |= RAL_BBP_FLIPIQ;
1917 
1918 	ural_bbp_write(sc, RAL_BBP_TX, tx);
1919 
1920 	/* update flags in PHY_CSR5 and PHY_CSR6 too */
1921 	tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
1922 	ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
1923 
1924 	tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
1925 	ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
1926 }
1927 
1928 void
1929 ural_set_rxantenna(struct ural_softc *sc, int antenna)
1930 {
1931 	uint8_t rx;
1932 
1933 	rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
1934 	if (antenna == 1)
1935 		rx |= RAL_BBP_ANTA;
1936 	else if (antenna == 2)
1937 		rx |= RAL_BBP_ANTB;
1938 	else
1939 		rx |= RAL_BBP_DIVERSITY;
1940 
1941 	/* need to force no I/Q flip for RF 2525e and 2526 */
1942 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
1943 		rx &= ~RAL_BBP_FLIPIQ;
1944 
1945 	ural_bbp_write(sc, RAL_BBP_RX, rx);
1946 }
1947 
1948 int
1949 ural_init(struct ifnet *ifp)
1950 {
1951 	struct ural_softc *sc = ifp->if_softc;
1952 	struct ieee80211com *ic = &sc->sc_ic;
1953 	uint16_t tmp;
1954 	usbd_status error;
1955 	int i, ntries;
1956 
1957 	ural_stop(ifp, 0);
1958 
1959 	/* initialize MAC registers to default values */
1960 	for (i = 0; i < nitems(ural_def_mac); i++)
1961 		ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
1962 
1963 	/* wait for BBP and RF to wake up (this can take a long time!) */
1964 	for (ntries = 0; ntries < 100; ntries++) {
1965 		tmp = ural_read(sc, RAL_MAC_CSR17);
1966 		if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
1967 		    (RAL_BBP_AWAKE | RAL_RF_AWAKE))
1968 			break;
1969 		DELAY(1000);
1970 	}
1971 	if (ntries == 100) {
1972 		printf("%s: timeout waiting for BBP/RF to wakeup\n",
1973 		    sc->sc_dev.dv_xname);
1974 		error = EIO;
1975 		goto fail;
1976 	}
1977 
1978 	/* we're ready! */
1979 	ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
1980 
1981 	/* set basic rate set (will be updated later) */
1982 	ural_write(sc, RAL_TXRX_CSR11, 0x153);
1983 
1984 	error = ural_bbp_init(sc);
1985 	if (error != 0)
1986 		goto fail;
1987 
1988 	/* set default BSS channel */
1989 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
1990 	ural_set_chan(sc, ic->ic_bss->ni_chan);
1991 
1992 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
1993 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
1994 
1995 	/* set default sensitivity */
1996 	ural_bbp_write(sc, 17, 0x48);
1997 
1998 	ural_set_txantenna(sc, 1);
1999 	ural_set_rxantenna(sc, 1);
2000 
2001 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
2002 	ural_set_macaddr(sc, ic->ic_myaddr);
2003 
2004 	/*
2005 	 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31).
2006 	 */
2007 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2008 		struct ieee80211_key *k = &ic->ic_nw_keys[i];
2009 		ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE,
2010 		    k->k_key, IEEE80211_KEYBUF_SIZE);
2011 	}
2012 
2013 	/*
2014 	 * Allocate xfer for AMRR statistics requests.
2015 	 */
2016 	sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev);
2017 	if (sc->amrr_xfer == NULL) {
2018 		printf("%s: could not allocate AMRR xfer\n",
2019 		    sc->sc_dev.dv_xname);
2020 		goto fail;
2021 	}
2022 
2023 	/*
2024 	 * Open Tx and Rx USB bulk pipes.
2025 	 */
2026 	error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE,
2027 	    &sc->sc_tx_pipeh);
2028 	if (error != 0) {
2029 		printf("%s: could not open Tx pipe: %s\n",
2030 		    sc->sc_dev.dv_xname, usbd_errstr(error));
2031 		goto fail;
2032 	}
2033 	error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE,
2034 	    &sc->sc_rx_pipeh);
2035 	if (error != 0) {
2036 		printf("%s: could not open Rx pipe: %s\n",
2037 		    sc->sc_dev.dv_xname, usbd_errstr(error));
2038 		goto fail;
2039 	}
2040 
2041 	/*
2042 	 * Allocate Tx and Rx xfer queues.
2043 	 */
2044 	error = ural_alloc_tx_list(sc);
2045 	if (error != 0) {
2046 		printf("%s: could not allocate Tx list\n",
2047 		    sc->sc_dev.dv_xname);
2048 		goto fail;
2049 	}
2050 	error = ural_alloc_rx_list(sc);
2051 	if (error != 0) {
2052 		printf("%s: could not allocate Rx list\n",
2053 		    sc->sc_dev.dv_xname);
2054 		goto fail;
2055 	}
2056 
2057 	/*
2058 	 * Start up the receive pipe.
2059 	 */
2060 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
2061 		struct ural_rx_data *data = &sc->rx_data[i];
2062 
2063 		usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf,
2064 		    MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
2065 		error = usbd_transfer(data->xfer);
2066 		if (error != 0 && error != USBD_IN_PROGRESS) {
2067 			printf("%s: could not queue Rx transfer\n",
2068 			    sc->sc_dev.dv_xname);
2069 			goto fail;
2070 		}
2071 	}
2072 
2073 	/* kick Rx */
2074 	tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR;
2075 	if (ic->ic_opmode != IEEE80211_M_MONITOR) {
2076 		tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR;
2077 #ifndef IEEE80211_STA_ONLY
2078 		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
2079 #endif
2080 			tmp |= RAL_DROP_TODS;
2081 		if (!(ifp->if_flags & IFF_PROMISC))
2082 			tmp |= RAL_DROP_NOT_TO_ME;
2083 	}
2084 	ural_write(sc, RAL_TXRX_CSR2, tmp);
2085 
2086 	ifp->if_flags &= ~IFF_OACTIVE;
2087 	ifp->if_flags |= IFF_RUNNING;
2088 
2089 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2090 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2091 	else
2092 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2093 
2094 	return 0;
2095 
2096 fail:	ural_stop(ifp, 1);
2097 	return error;
2098 }
2099 
2100 void
2101 ural_stop(struct ifnet *ifp, int disable)
2102 {
2103 	struct ural_softc *sc = ifp->if_softc;
2104 	struct ieee80211com *ic = &sc->sc_ic;
2105 
2106 	sc->sc_tx_timer = 0;
2107 	ifp->if_timer = 0;
2108 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2109 
2110 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);	/* free all nodes */
2111 
2112 	/* disable Rx */
2113 	ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
2114 
2115 	/* reset ASIC and BBP (but won't reset MAC registers!) */
2116 	ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
2117 	ural_write(sc, RAL_MAC_CSR1, 0);
2118 
2119 	if (sc->amrr_xfer != NULL) {
2120 		usbd_free_xfer(sc->amrr_xfer);
2121 		sc->amrr_xfer = NULL;
2122 	}
2123 	if (sc->sc_rx_pipeh != NULL) {
2124 		usbd_abort_pipe(sc->sc_rx_pipeh);
2125 		usbd_close_pipe(sc->sc_rx_pipeh);
2126 		sc->sc_rx_pipeh = NULL;
2127 	}
2128 	if (sc->sc_tx_pipeh != NULL) {
2129 		usbd_abort_pipe(sc->sc_tx_pipeh);
2130 		usbd_close_pipe(sc->sc_tx_pipeh);
2131 		sc->sc_tx_pipeh = NULL;
2132 	}
2133 
2134 	ural_free_rx_list(sc);
2135 	ural_free_tx_list(sc);
2136 }
2137 
2138 void
2139 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2140 {
2141 	/* start with lowest Tx rate */
2142 	ni->ni_txrate = 0;
2143 }
2144 
2145 void
2146 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni)
2147 {
2148 	int i;
2149 
2150 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
2151 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
2152 
2153 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
2154 
2155 	/* set rate to some reasonable initial value */
2156 	for (i = ni->ni_rates.rs_nrates - 1;
2157 	     i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72;
2158 	     i--);
2159 	ni->ni_txrate = i;
2160 
2161 	if (!usbd_is_dying(sc->sc_udev))
2162 		timeout_add_sec(&sc->amrr_to, 1);
2163 }
2164 
2165 void
2166 ural_amrr_timeout(void *arg)
2167 {
2168 	struct ural_softc *sc = arg;
2169 	usb_device_request_t req;
2170 	int s;
2171 
2172 	if (usbd_is_dying(sc->sc_udev))
2173 		return;
2174 
2175 	usbd_ref_incr(sc->sc_udev);
2176 
2177 	s = splusb();
2178 
2179 	/*
2180 	 * Asynchronously read statistic registers (cleared by read).
2181 	 */
2182 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
2183 	req.bRequest = RAL_READ_MULTI_MAC;
2184 	USETW(req.wValue, 0);
2185 	USETW(req.wIndex, RAL_STA_CSR0);
2186 	USETW(req.wLength, sizeof sc->sta);
2187 
2188 	usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc,
2189 	    USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0,
2190 	    ural_amrr_update);
2191 	(void)usbd_transfer(sc->amrr_xfer);
2192 
2193 	splx(s);
2194 
2195 	usbd_ref_decr(sc->sc_udev);
2196 }
2197 
2198 void
2199 ural_amrr_update(usbd_xfer_handle xfer, usbd_private_handle priv,
2200     usbd_status status)
2201 {
2202 	struct ural_softc *sc = (struct ural_softc *)priv;
2203 	struct ifnet *ifp = &sc->sc_ic.ic_if;
2204 
2205 	if (status != USBD_NORMAL_COMPLETION) {
2206 		printf("%s: could not retrieve Tx statistics - cancelling "
2207 		    "automatic rate control\n", sc->sc_dev.dv_xname);
2208 		return;
2209 	}
2210 
2211 	/* count TX retry-fail as Tx errors */
2212 	ifp->if_oerrors += letoh16(sc->sta[9]);
2213 
2214 	sc->amn.amn_retrycnt =
2215 	    letoh16(sc->sta[7]) +	/* TX one-retry ok count */
2216 	    letoh16(sc->sta[8]) +	/* TX more-retry ok count */
2217 	    letoh16(sc->sta[9]);	/* TX retry-fail count */
2218 
2219 	sc->amn.amn_txcnt =
2220 	    sc->amn.amn_retrycnt +
2221 	    letoh16(sc->sta[6]);	/* TX no-retry ok count */
2222 
2223 	ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn);
2224 
2225 	if (!usbd_is_dying(sc->sc_udev))
2226 		timeout_add_sec(&sc->amrr_to, 1);
2227 }
2228 
2229 int
2230 ural_activate(struct device *self, int act)
2231 {
2232 	struct ural_softc *sc = (struct ural_softc *)self;
2233 
2234 	switch (act) {
2235 	case DVACT_DEACTIVATE:
2236 		usbd_deactivate(sc->sc_udev);
2237 		break;
2238 	}
2239 
2240 	return 0;
2241 }
2242