xref: /openbsd-src/sys/dev/usb/if_ral.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: if_ral.c,v 1.140 2016/07/20 10:24:43 stsp Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005, 2006
5  *	Damien Bergamini <damien.bergamini@free.fr>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Ralink Technology RT2500USB chipset driver
22  * http://www.ralinktech.com.tw/
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/timeout.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 
38 #include <machine/intr.h>
39 
40 #if NBPFILTER > 0
41 #include <net/bpf.h>
42 #endif
43 #include <net/if.h>
44 #include <net/if_dl.h>
45 #include <net/if_media.h>
46 
47 #include <netinet/in.h>
48 #include <netinet/if_ether.h>
49 
50 #include <net80211/ieee80211_var.h>
51 #include <net80211/ieee80211_amrr.h>
52 #include <net80211/ieee80211_radiotap.h>
53 
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdi_util.h>
57 #include <dev/usb/usbdevs.h>
58 
59 #include <dev/usb/if_ralreg.h>
60 #include <dev/usb/if_ralvar.h>
61 
62 #ifdef URAL_DEBUG
63 #define DPRINTF(x)	do { if (ural_debug) printf x; } while (0)
64 #define DPRINTFN(n, x)	do { if (ural_debug >= (n)) printf x; } while (0)
65 int ural_debug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n, x)
69 #endif
70 
71 /* various supported device vendors/products */
72 static const struct usb_devno ural_devs[] = {
73 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570 },
74 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570_2 },
75 	{ USB_VENDOR_BELKIN,		USB_PRODUCT_BELKIN_F5D7050 },
76 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54G },
77 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54GP },
78 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_HU200TS },
79 	{ USB_VENDOR_CONCEPTRONIC2,	USB_PRODUCT_CONCEPTRONIC2_C54RU },
80 	{ USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_RT2570 },
81 	{ USB_VENDOR_GIGABYTE,		USB_PRODUCT_GIGABYTE_GNWBKG },
82 	{ USB_VENDOR_GUILLEMOT,		USB_PRODUCT_GUILLEMOT_HWGUSB254 },
83 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54 },
84 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54AI },
85 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54YB },
86 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_NINWIFI },
87 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570 },
88 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_2 },
89 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_3 },
90 	{ USB_VENDOR_NOVATECH,		USB_PRODUCT_NOVATECH_NV902W },
91 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570 },
92 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_2 },
93 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_3 },
94 	{ USB_VENDOR_SPHAIRON,		USB_PRODUCT_SPHAIRON_UB801R },
95 	{ USB_VENDOR_SURECOM,		USB_PRODUCT_SURECOM_RT2570 },
96 	{ USB_VENDOR_VTECH,		USB_PRODUCT_VTECH_RT2570 },
97 	{ USB_VENDOR_ZINWELL,		USB_PRODUCT_ZINWELL_RT2570 }
98 };
99 
100 int		ural_alloc_tx_list(struct ural_softc *);
101 void		ural_free_tx_list(struct ural_softc *);
102 int		ural_alloc_rx_list(struct ural_softc *);
103 void		ural_free_rx_list(struct ural_softc *);
104 int		ural_media_change(struct ifnet *);
105 void		ural_next_scan(void *);
106 void		ural_task(void *);
107 int		ural_newstate(struct ieee80211com *, enum ieee80211_state,
108 		    int);
109 void		ural_txeof(struct usbd_xfer *, void *, usbd_status);
110 void		ural_rxeof(struct usbd_xfer *, void *, usbd_status);
111 #if NBPFILTER > 0
112 uint8_t		ural_rxrate(const struct ural_rx_desc *);
113 #endif
114 int		ural_ack_rate(struct ieee80211com *, int);
115 uint16_t	ural_txtime(int, int, uint32_t);
116 uint8_t		ural_plcp_signal(int);
117 void		ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *,
118 		    uint32_t, int, int);
119 #ifndef IEEE80211_STA_ONLY
120 int		ural_tx_bcn(struct ural_softc *, struct mbuf *,
121 		    struct ieee80211_node *);
122 #endif
123 int		ural_tx_data(struct ural_softc *, struct mbuf *,
124 		    struct ieee80211_node *);
125 void		ural_start(struct ifnet *);
126 void		ural_watchdog(struct ifnet *);
127 int		ural_ioctl(struct ifnet *, u_long, caddr_t);
128 void		ural_eeprom_read(struct ural_softc *, uint16_t, void *, int);
129 uint16_t	ural_read(struct ural_softc *, uint16_t);
130 void		ural_read_multi(struct ural_softc *, uint16_t, void *, int);
131 void		ural_write(struct ural_softc *, uint16_t, uint16_t);
132 void		ural_write_multi(struct ural_softc *, uint16_t, void *, int);
133 void		ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
134 uint8_t		ural_bbp_read(struct ural_softc *, uint8_t);
135 void		ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
136 void		ural_set_chan(struct ural_softc *, struct ieee80211_channel *);
137 void		ural_disable_rf_tune(struct ural_softc *);
138 void		ural_enable_tsf_sync(struct ural_softc *);
139 void		ural_update_slot(struct ural_softc *);
140 void		ural_set_txpreamble(struct ural_softc *);
141 void		ural_set_basicrates(struct ural_softc *);
142 void		ural_set_bssid(struct ural_softc *, const uint8_t *);
143 void		ural_set_macaddr(struct ural_softc *, const uint8_t *);
144 void		ural_update_promisc(struct ural_softc *);
145 const char	*ural_get_rf(int);
146 void		ural_read_eeprom(struct ural_softc *);
147 int		ural_bbp_init(struct ural_softc *);
148 void		ural_set_txantenna(struct ural_softc *, int);
149 void		ural_set_rxantenna(struct ural_softc *, int);
150 int		ural_init(struct ifnet *);
151 void		ural_stop(struct ifnet *, int);
152 void		ural_newassoc(struct ieee80211com *, struct ieee80211_node *,
153 		    int);
154 void		ural_amrr_start(struct ural_softc *, struct ieee80211_node *);
155 void		ural_amrr_timeout(void *);
156 void		ural_amrr_update(struct usbd_xfer *, void *,
157 		    usbd_status status);
158 
159 static const struct {
160 	uint16_t	reg;
161 	uint16_t	val;
162 } ural_def_mac[] = {
163 	RAL_DEF_MAC
164 };
165 
166 static const struct {
167 	uint8_t	reg;
168 	uint8_t	val;
169 } ural_def_bbp[] = {
170 	RAL_DEF_BBP
171 };
172 
173 static const uint32_t ural_rf2522_r2[] =    RAL_RF2522_R2;
174 static const uint32_t ural_rf2523_r2[] =    RAL_RF2523_R2;
175 static const uint32_t ural_rf2524_r2[] =    RAL_RF2524_R2;
176 static const uint32_t ural_rf2525_r2[] =    RAL_RF2525_R2;
177 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2;
178 static const uint32_t ural_rf2525e_r2[] =   RAL_RF2525E_R2;
179 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2;
180 static const uint32_t ural_rf2526_r2[] =    RAL_RF2526_R2;
181 
182 int ural_match(struct device *, void *, void *);
183 void ural_attach(struct device *, struct device *, void *);
184 int ural_detach(struct device *, int);
185 
186 struct cfdriver ural_cd = {
187 	NULL, "ural", DV_IFNET
188 };
189 
190 const struct cfattach ural_ca = {
191 	sizeof(struct ural_softc), ural_match, ural_attach, ural_detach
192 };
193 
194 int
195 ural_match(struct device *parent, void *match, void *aux)
196 {
197 	struct usb_attach_arg *uaa = aux;
198 
199 	if (uaa->iface != NULL)
200 		return UMATCH_NONE;
201 
202 	return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ?
203 	    UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
204 }
205 
206 void
207 ural_attach(struct device *parent, struct device *self, void *aux)
208 {
209 	struct ural_softc *sc = (struct ural_softc *)self;
210 	struct usb_attach_arg *uaa = aux;
211 	struct ieee80211com *ic = &sc->sc_ic;
212 	struct ifnet *ifp = &ic->ic_if;
213 	usb_interface_descriptor_t *id;
214 	usb_endpoint_descriptor_t *ed;
215 	usbd_status error;
216 	int i;
217 
218 	sc->sc_udev = uaa->device;
219 
220 	if (usbd_set_config_no(sc->sc_udev, RAL_CONFIG_NO, 0) != 0) {
221 		printf("%s: could not set configuration no\n",
222 		    sc->sc_dev.dv_xname);
223 		return;
224 	}
225 
226 	/* get the first interface handle */
227 	error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX,
228 	    &sc->sc_iface);
229 	if (error != 0) {
230 		printf("%s: could not get interface handle\n",
231 		    sc->sc_dev.dv_xname);
232 		return;
233 	}
234 
235 	/*
236 	 * Find endpoints.
237 	 */
238 	id = usbd_get_interface_descriptor(sc->sc_iface);
239 
240 	sc->sc_rx_no = sc->sc_tx_no = -1;
241 	for (i = 0; i < id->bNumEndpoints; i++) {
242 		ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
243 		if (ed == NULL) {
244 			printf("%s: no endpoint descriptor for iface %d\n",
245 			    sc->sc_dev.dv_xname, i);
246 			return;
247 		}
248 
249 		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
250 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
251 			sc->sc_rx_no = ed->bEndpointAddress;
252 		else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
253 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
254 			sc->sc_tx_no = ed->bEndpointAddress;
255 	}
256 	if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) {
257 		printf("%s: missing endpoint\n", sc->sc_dev.dv_xname);
258 		return;
259 	}
260 
261 	usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC);
262 	timeout_set(&sc->scan_to, ural_next_scan, sc);
263 
264 	sc->amrr.amrr_min_success_threshold =  1;
265 	sc->amrr.amrr_max_success_threshold = 10;
266 	timeout_set(&sc->amrr_to, ural_amrr_timeout, sc);
267 
268 	/* retrieve RT2570 rev. no */
269 	sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
270 
271 	/* retrieve MAC address and various other things from EEPROM */
272 	ural_read_eeprom(sc);
273 
274 	printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n",
275 	    sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev,
276 	    ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr));
277 
278 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
279 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
280 	ic->ic_state = IEEE80211_S_INIT;
281 
282 	/* set device capabilities */
283 	ic->ic_caps =
284 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
285 #ifndef IEEE80211_STA_ONLY
286 	    IEEE80211_C_IBSS |		/* IBSS mode supported */
287 	    IEEE80211_C_HOSTAP |	/* HostAp mode supported */
288 #endif
289 	    IEEE80211_C_TXPMGT |	/* tx power management */
290 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
291 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
292 	    IEEE80211_C_WEP |		/* s/w WEP */
293 	    IEEE80211_C_RSN;		/* WPA/RSN */
294 
295 	/* set supported .11b and .11g rates */
296 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
297 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
298 
299 	/* set supported .11b and .11g channels (1 through 14) */
300 	for (i = 1; i <= 14; i++) {
301 		ic->ic_channels[i].ic_freq =
302 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
303 		ic->ic_channels[i].ic_flags =
304 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
305 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
306 	}
307 
308 	ifp->if_softc = sc;
309 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
310 	ifp->if_ioctl = ural_ioctl;
311 	ifp->if_start = ural_start;
312 	ifp->if_watchdog = ural_watchdog;
313 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
314 
315 	if_attach(ifp);
316 	ieee80211_ifattach(ifp);
317 	ic->ic_newassoc = ural_newassoc;
318 
319 	/* override state transition machine */
320 	sc->sc_newstate = ic->ic_newstate;
321 	ic->ic_newstate = ural_newstate;
322 	ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status);
323 
324 #if NBPFILTER > 0
325 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
326 	    sizeof (struct ieee80211_frame) + 64);
327 
328 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
329 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
330 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT);
331 
332 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
333 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
334 	sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT);
335 #endif
336 }
337 
338 int
339 ural_detach(struct device *self, int flags)
340 {
341 	struct ural_softc *sc = (struct ural_softc *)self;
342 	struct ifnet *ifp = &sc->sc_ic.ic_if;
343 	int s;
344 
345 	s = splusb();
346 
347 	if (timeout_initialized(&sc->scan_to))
348 		timeout_del(&sc->scan_to);
349 	if (timeout_initialized(&sc->amrr_to))
350 		timeout_del(&sc->amrr_to);
351 
352 	usb_rem_wait_task(sc->sc_udev, &sc->sc_task);
353 
354 	usbd_ref_wait(sc->sc_udev);
355 
356 	if (ifp->if_softc != NULL) {
357 		ieee80211_ifdetach(ifp);	/* free all nodes */
358 		if_detach(ifp);
359 	}
360 
361 	if (sc->amrr_xfer != NULL) {
362 		usbd_free_xfer(sc->amrr_xfer);
363 		sc->amrr_xfer = NULL;
364 	}
365 
366 	if (sc->sc_rx_pipeh != NULL) {
367 		usbd_abort_pipe(sc->sc_rx_pipeh);
368 		usbd_close_pipe(sc->sc_rx_pipeh);
369 	}
370 
371 	if (sc->sc_tx_pipeh != NULL) {
372 		usbd_abort_pipe(sc->sc_tx_pipeh);
373 		usbd_close_pipe(sc->sc_tx_pipeh);
374 	}
375 
376 	ural_free_rx_list(sc);
377 	ural_free_tx_list(sc);
378 
379 	splx(s);
380 
381 	return 0;
382 }
383 
384 int
385 ural_alloc_tx_list(struct ural_softc *sc)
386 {
387 	int i, error;
388 
389 	sc->tx_cur = sc->tx_queued = 0;
390 
391 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
392 		struct ural_tx_data *data = &sc->tx_data[i];
393 
394 		data->sc = sc;
395 
396 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
397 		if (data->xfer == NULL) {
398 			printf("%s: could not allocate tx xfer\n",
399 			    sc->sc_dev.dv_xname);
400 			error = ENOMEM;
401 			goto fail;
402 		}
403 		data->buf = usbd_alloc_buffer(data->xfer,
404 		    RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN);
405 		if (data->buf == NULL) {
406 			printf("%s: could not allocate tx buffer\n",
407 			    sc->sc_dev.dv_xname);
408 			error = ENOMEM;
409 			goto fail;
410 		}
411 	}
412 
413 	return 0;
414 
415 fail:	ural_free_tx_list(sc);
416 	return error;
417 }
418 
419 void
420 ural_free_tx_list(struct ural_softc *sc)
421 {
422 	int i;
423 
424 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
425 		struct ural_tx_data *data = &sc->tx_data[i];
426 
427 		if (data->xfer != NULL) {
428 			usbd_free_xfer(data->xfer);
429 			data->xfer = NULL;
430 		}
431 		/*
432 		 * The node has already been freed at that point so don't call
433 		 * ieee80211_release_node() here.
434 		 */
435 		data->ni = NULL;
436 	}
437 }
438 
439 int
440 ural_alloc_rx_list(struct ural_softc *sc)
441 {
442 	int i, error;
443 
444 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
445 		struct ural_rx_data *data = &sc->rx_data[i];
446 
447 		data->sc = sc;
448 
449 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
450 		if (data->xfer == NULL) {
451 			printf("%s: could not allocate rx xfer\n",
452 			    sc->sc_dev.dv_xname);
453 			error = ENOMEM;
454 			goto fail;
455 		}
456 		if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) {
457 			printf("%s: could not allocate rx buffer\n",
458 			    sc->sc_dev.dv_xname);
459 			error = ENOMEM;
460 			goto fail;
461 		}
462 
463 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
464 		if (data->m == NULL) {
465 			printf("%s: could not allocate rx mbuf\n",
466 			    sc->sc_dev.dv_xname);
467 			error = ENOMEM;
468 			goto fail;
469 		}
470 		MCLGET(data->m, M_DONTWAIT);
471 		if (!(data->m->m_flags & M_EXT)) {
472 			printf("%s: could not allocate rx mbuf cluster\n",
473 			    sc->sc_dev.dv_xname);
474 			error = ENOMEM;
475 			goto fail;
476 		}
477 		data->buf = mtod(data->m, uint8_t *);
478 	}
479 
480 	return 0;
481 
482 fail:	ural_free_rx_list(sc);
483 	return error;
484 }
485 
486 void
487 ural_free_rx_list(struct ural_softc *sc)
488 {
489 	int i;
490 
491 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
492 		struct ural_rx_data *data = &sc->rx_data[i];
493 
494 		if (data->xfer != NULL) {
495 			usbd_free_xfer(data->xfer);
496 			data->xfer = NULL;
497 		}
498 		if (data->m != NULL) {
499 			m_freem(data->m);
500 			data->m = NULL;
501 		}
502 	}
503 }
504 
505 int
506 ural_media_change(struct ifnet *ifp)
507 {
508 	int error;
509 
510 	error = ieee80211_media_change(ifp);
511 	if (error != ENETRESET)
512 		return error;
513 
514 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
515 		ural_init(ifp);
516 
517 	return 0;
518 }
519 
520 /*
521  * This function is called periodically (every 200ms) during scanning to
522  * switch from one channel to another.
523  */
524 void
525 ural_next_scan(void *arg)
526 {
527 	struct ural_softc *sc = arg;
528 	struct ieee80211com *ic = &sc->sc_ic;
529 	struct ifnet *ifp = &ic->ic_if;
530 
531 	if (usbd_is_dying(sc->sc_udev))
532 		return;
533 
534 	usbd_ref_incr(sc->sc_udev);
535 
536 	if (ic->ic_state == IEEE80211_S_SCAN)
537 		ieee80211_next_scan(ifp);
538 
539 	usbd_ref_decr(sc->sc_udev);
540 }
541 
542 void
543 ural_task(void *arg)
544 {
545 	struct ural_softc *sc = arg;
546 	struct ieee80211com *ic = &sc->sc_ic;
547 	enum ieee80211_state ostate;
548 	struct ieee80211_node *ni;
549 
550 	if (usbd_is_dying(sc->sc_udev))
551 		return;
552 
553 	ostate = ic->ic_state;
554 
555 	switch (sc->sc_state) {
556 	case IEEE80211_S_INIT:
557 		if (ostate == IEEE80211_S_RUN) {
558 			/* abort TSF synchronization */
559 			ural_write(sc, RAL_TXRX_CSR19, 0);
560 
561 			/* force tx led to stop blinking */
562 			ural_write(sc, RAL_MAC_CSR20, 0);
563 		}
564 		break;
565 
566 	case IEEE80211_S_SCAN:
567 		ural_set_chan(sc, ic->ic_bss->ni_chan);
568 		if (!usbd_is_dying(sc->sc_udev))
569 			timeout_add_msec(&sc->scan_to, 200);
570 		break;
571 
572 	case IEEE80211_S_AUTH:
573 		ural_set_chan(sc, ic->ic_bss->ni_chan);
574 		break;
575 
576 	case IEEE80211_S_ASSOC:
577 		ural_set_chan(sc, ic->ic_bss->ni_chan);
578 		break;
579 
580 	case IEEE80211_S_RUN:
581 		ural_set_chan(sc, ic->ic_bss->ni_chan);
582 
583 		ni = ic->ic_bss;
584 
585 		if (ic->ic_opmode != IEEE80211_M_MONITOR) {
586 			ural_update_slot(sc);
587 			ural_set_txpreamble(sc);
588 			ural_set_basicrates(sc);
589 			ural_set_bssid(sc, ni->ni_bssid);
590 		}
591 
592 #ifndef IEEE80211_STA_ONLY
593 		if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
594 		    ic->ic_opmode == IEEE80211_M_IBSS) {
595 			struct mbuf *m = ieee80211_beacon_alloc(ic, ni);
596 			if (m == NULL) {
597 				printf("%s: could not allocate beacon\n",
598 				    sc->sc_dev.dv_xname);
599 				return;
600 			}
601 
602 			if (ural_tx_bcn(sc, m, ni) != 0) {
603 				m_freem(m);
604 				printf("%s: could not transmit beacon\n",
605 				    sc->sc_dev.dv_xname);
606 				return;
607 			}
608 
609 			/* beacon is no longer needed */
610 			m_freem(m);
611 		}
612 #endif
613 
614 		/* make tx led blink on tx (controlled by ASIC) */
615 		ural_write(sc, RAL_MAC_CSR20, 1);
616 
617 		if (ic->ic_opmode != IEEE80211_M_MONITOR)
618 			ural_enable_tsf_sync(sc);
619 
620 		if (ic->ic_opmode == IEEE80211_M_STA) {
621 			/* fake a join to init the tx rate */
622 			ural_newassoc(ic, ic->ic_bss, 1);
623 
624 			/* enable automatic rate control in STA mode */
625 			if (ic->ic_fixed_rate == -1)
626 				ural_amrr_start(sc, ic->ic_bss);
627 		}
628 
629 		break;
630 	}
631 
632 	sc->sc_newstate(ic, sc->sc_state, sc->sc_arg);
633 }
634 
635 int
636 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
637 {
638 	struct ural_softc *sc = ic->ic_if.if_softc;
639 
640 	usb_rem_task(sc->sc_udev, &sc->sc_task);
641 	timeout_del(&sc->scan_to);
642 	timeout_del(&sc->amrr_to);
643 
644 	/* do it in a process context */
645 	sc->sc_state = nstate;
646 	sc->sc_arg = arg;
647 	usb_add_task(sc->sc_udev, &sc->sc_task);
648 	return 0;
649 }
650 
651 /* quickly determine if a given rate is CCK or OFDM */
652 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
653 
654 #define RAL_ACK_SIZE	14	/* 10 + 4(FCS) */
655 #define RAL_CTS_SIZE	14	/* 10 + 4(FCS) */
656 
657 #define RAL_SIFS		10	/* us */
658 
659 #define RAL_RXTX_TURNAROUND	5	/* us */
660 
661 void
662 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
663 {
664 	struct ural_tx_data *data = priv;
665 	struct ural_softc *sc = data->sc;
666 	struct ieee80211com *ic = &sc->sc_ic;
667 	struct ifnet *ifp = &ic->ic_if;
668 	int s;
669 
670 	if (status != USBD_NORMAL_COMPLETION) {
671 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
672 			return;
673 
674 		printf("%s: could not transmit buffer: %s\n",
675 		    sc->sc_dev.dv_xname, usbd_errstr(status));
676 
677 		if (status == USBD_STALLED)
678 			usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh);
679 
680 		ifp->if_oerrors++;
681 		return;
682 	}
683 
684 	s = splnet();
685 
686 	ieee80211_release_node(ic, data->ni);
687 	data->ni = NULL;
688 
689 	sc->tx_queued--;
690 	ifp->if_opackets++;
691 
692 	DPRINTFN(10, ("tx done\n"));
693 
694 	sc->sc_tx_timer = 0;
695 	ifq_clr_oactive(&ifp->if_snd);
696 	ural_start(ifp);
697 
698 	splx(s);
699 }
700 
701 void
702 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
703 {
704 	struct ural_rx_data *data = priv;
705 	struct ural_softc *sc = data->sc;
706 	struct ieee80211com *ic = &sc->sc_ic;
707 	struct ifnet *ifp = &ic->ic_if;
708 	const struct ural_rx_desc *desc;
709 	struct ieee80211_frame *wh;
710 	struct ieee80211_rxinfo rxi;
711 	struct ieee80211_node *ni;
712 	struct mbuf *mnew, *m;
713 	int s, len;
714 
715 	if (status != USBD_NORMAL_COMPLETION) {
716 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
717 			return;
718 
719 		if (status == USBD_STALLED)
720 			usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh);
721 		goto skip;
722 	}
723 
724 	usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
725 
726 	if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) {
727 		DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname,
728 		    len));
729 		ifp->if_ierrors++;
730 		goto skip;
731 	}
732 
733 	/* rx descriptor is located at the end */
734 	desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE);
735 
736 	if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
737 		/*
738 		 * This should not happen since we did not request to receive
739 		 * those frames when we filled RAL_TXRX_CSR2.
740 		 */
741 		DPRINTFN(5, ("PHY or CRC error\n"));
742 		ifp->if_ierrors++;
743 		goto skip;
744 	}
745 
746 	MGETHDR(mnew, M_DONTWAIT, MT_DATA);
747 	if (mnew == NULL) {
748 		printf("%s: could not allocate rx mbuf\n",
749 		    sc->sc_dev.dv_xname);
750 		ifp->if_ierrors++;
751 		goto skip;
752 	}
753 	MCLGET(mnew, M_DONTWAIT);
754 	if (!(mnew->m_flags & M_EXT)) {
755 		printf("%s: could not allocate rx mbuf cluster\n",
756 		    sc->sc_dev.dv_xname);
757 		m_freem(mnew);
758 		ifp->if_ierrors++;
759 		goto skip;
760 	}
761 	m = data->m;
762 	data->m = mnew;
763 	data->buf = mtod(data->m, uint8_t *);
764 
765 	/* finalize mbuf */
766 	m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff;
767 
768 	s = splnet();
769 
770 #if NBPFILTER > 0
771 	if (sc->sc_drvbpf != NULL) {
772 		struct mbuf mb;
773 		struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
774 
775 		tap->wr_flags = IEEE80211_RADIOTAP_F_FCS;
776 		tap->wr_rate = ural_rxrate(desc);
777 		tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
778 		tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
779 		tap->wr_antenna = sc->rx_ant;
780 		tap->wr_antsignal = desc->rssi;
781 
782 		mb.m_data = (caddr_t)tap;
783 		mb.m_len = sc->sc_rxtap_len;
784 		mb.m_next = m;
785 		mb.m_nextpkt = NULL;
786 		mb.m_type = 0;
787 		mb.m_flags = 0;
788 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
789 	}
790 #endif
791 	m_adj(m, -IEEE80211_CRC_LEN);	/* trim FCS */
792 
793 	wh = mtod(m, struct ieee80211_frame *);
794 	ni = ieee80211_find_rxnode(ic, wh);
795 
796 	/* send the frame to the 802.11 layer */
797 	rxi.rxi_flags = 0;
798 	rxi.rxi_rssi = desc->rssi;
799 	rxi.rxi_tstamp = 0;	/* unused */
800 	ieee80211_input(ifp, m, ni, &rxi);
801 
802 	/* node is no longer needed */
803 	ieee80211_release_node(ic, ni);
804 
805 	splx(s);
806 
807 	DPRINTFN(15, ("rx done\n"));
808 
809 skip:	/* setup a new transfer */
810 	usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES,
811 	    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
812 	(void)usbd_transfer(xfer);
813 }
814 
815 /*
816  * This function is only used by the Rx radiotap code. It returns the rate at
817  * which a given frame was received.
818  */
819 #if NBPFILTER > 0
820 uint8_t
821 ural_rxrate(const struct ural_rx_desc *desc)
822 {
823 	if (letoh32(desc->flags) & RAL_RX_OFDM) {
824 		/* reverse function of ural_plcp_signal */
825 		switch (desc->rate) {
826 		case 0xb:	return 12;
827 		case 0xf:	return 18;
828 		case 0xa:	return 24;
829 		case 0xe:	return 36;
830 		case 0x9:	return 48;
831 		case 0xd:	return 72;
832 		case 0x8:	return 96;
833 		case 0xc:	return 108;
834 		}
835 	} else {
836 		if (desc->rate == 10)
837 			return 2;
838 		if (desc->rate == 20)
839 			return 4;
840 		if (desc->rate == 55)
841 			return 11;
842 		if (desc->rate == 110)
843 			return 22;
844 	}
845 	return 2;	/* should not get there */
846 }
847 #endif
848 
849 /*
850  * Return the expected ack rate for a frame transmitted at rate `rate'.
851  */
852 int
853 ural_ack_rate(struct ieee80211com *ic, int rate)
854 {
855 	switch (rate) {
856 	/* CCK rates */
857 	case 2:
858 		return 2;
859 	case 4:
860 	case 11:
861 	case 22:
862 		return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate;
863 
864 	/* OFDM rates */
865 	case 12:
866 	case 18:
867 		return 12;
868 	case 24:
869 	case 36:
870 		return 24;
871 	case 48:
872 	case 72:
873 	case 96:
874 	case 108:
875 		return 48;
876 	}
877 
878 	/* default to 1Mbps */
879 	return 2;
880 }
881 
882 /*
883  * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'.
884  * The function automatically determines the operating mode depending on the
885  * given rate. `flags' indicates whether short preamble is in use or not.
886  */
887 uint16_t
888 ural_txtime(int len, int rate, uint32_t flags)
889 {
890 	uint16_t txtime;
891 
892 	if (RAL_RATE_IS_OFDM(rate)) {
893 		/* IEEE Std 802.11g-2003, pp. 44 */
894 		txtime = (8 + 4 * len + 3 + rate - 1) / rate;
895 		txtime = 16 + 4 + 4 * txtime + 6;
896 	} else {
897 		/* IEEE Std 802.11b-1999, pp. 28 */
898 		txtime = (16 * len + rate - 1) / rate;
899 		if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE))
900 			txtime +=  72 + 24;
901 		else
902 			txtime += 144 + 48;
903 	}
904 	return txtime;
905 }
906 
907 uint8_t
908 ural_plcp_signal(int rate)
909 {
910 	switch (rate) {
911 	/* CCK rates (returned values are device-dependent) */
912 	case 2:		return 0x0;
913 	case 4:		return 0x1;
914 	case 11:	return 0x2;
915 	case 22:	return 0x3;
916 
917 	/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
918 	case 12:	return 0xb;
919 	case 18:	return 0xf;
920 	case 24:	return 0xa;
921 	case 36:	return 0xe;
922 	case 48:	return 0x9;
923 	case 72:	return 0xd;
924 	case 96:	return 0x8;
925 	case 108:	return 0xc;
926 
927 	/* unsupported rates (should not get there) */
928 	default:	return 0xff;
929 	}
930 }
931 
932 void
933 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
934     uint32_t flags, int len, int rate)
935 {
936 	struct ieee80211com *ic = &sc->sc_ic;
937 	uint16_t plcp_length;
938 	int remainder;
939 
940 	desc->flags = htole32(flags);
941 	desc->flags |= htole32(len << 16);
942 
943 	desc->wme = htole16(
944 	    RAL_AIFSN(2) |
945 	    RAL_LOGCWMIN(3) |
946 	    RAL_LOGCWMAX(5));
947 
948 	/* setup PLCP fields */
949 	desc->plcp_signal  = ural_plcp_signal(rate);
950 	desc->plcp_service = 4;
951 
952 	len += IEEE80211_CRC_LEN;
953 	if (RAL_RATE_IS_OFDM(rate)) {
954 		desc->flags |= htole32(RAL_TX_OFDM);
955 
956 		plcp_length = len & 0xfff;
957 		desc->plcp_length_hi = plcp_length >> 6;
958 		desc->plcp_length_lo = plcp_length & 0x3f;
959 	} else {
960 		plcp_length = (16 * len + rate - 1) / rate;
961 		if (rate == 22) {
962 			remainder = (16 * len) % 22;
963 			if (remainder != 0 && remainder < 7)
964 				desc->plcp_service |= RAL_PLCP_LENGEXT;
965 		}
966 		desc->plcp_length_hi = plcp_length >> 8;
967 		desc->plcp_length_lo = plcp_length & 0xff;
968 
969 		if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
970 			desc->plcp_signal |= 0x08;
971 	}
972 
973 	desc->iv = 0;
974 	desc->eiv = 0;
975 }
976 
977 #define RAL_TX_TIMEOUT	5000
978 
979 #ifndef IEEE80211_STA_ONLY
980 int
981 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
982 {
983 	struct ural_tx_desc *desc;
984 	struct usbd_xfer *xfer;
985 	usbd_status error;
986 	uint8_t cmd = 0;
987 	uint8_t *buf;
988 	int xferlen, rate = 2;
989 
990 	xfer = usbd_alloc_xfer(sc->sc_udev);
991 	if (xfer == NULL)
992 		return ENOMEM;
993 
994 	/* xfer length needs to be a multiple of two! */
995 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
996 
997 	buf = usbd_alloc_buffer(xfer, xferlen);
998 	if (buf == NULL) {
999 		usbd_free_xfer(xfer);
1000 		return ENOMEM;
1001 	}
1002 
1003 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd,
1004 	    USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL);
1005 
1006 	error = usbd_transfer(xfer);
1007 	if (error != 0) {
1008 		usbd_free_xfer(xfer);
1009 		return error;
1010 	}
1011 
1012 	desc = (struct ural_tx_desc *)buf;
1013 
1014 	m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE);
1015 	ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP,
1016 	    m0->m_pkthdr.len, rate);
1017 
1018 	DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n",
1019 	    m0->m_pkthdr.len, rate, xferlen));
1020 
1021 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen,
1022 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS,
1023 	    RAL_TX_TIMEOUT, NULL);
1024 
1025 	error = usbd_transfer(xfer);
1026 	usbd_free_xfer(xfer);
1027 
1028 	return error;
1029 }
1030 #endif
1031 
1032 int
1033 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
1034 {
1035 	struct ieee80211com *ic = &sc->sc_ic;
1036 	struct ural_tx_desc *desc;
1037 	struct ural_tx_data *data;
1038 	struct ieee80211_frame *wh;
1039 	struct ieee80211_key *k;
1040 	uint32_t flags = RAL_TX_NEWSEQ;
1041 	uint16_t dur;
1042 	usbd_status error;
1043 	int rate, xferlen, pktlen, needrts = 0, needcts = 0;
1044 
1045 	wh = mtod(m0, struct ieee80211_frame *);
1046 
1047 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1048 		k = ieee80211_get_txkey(ic, wh, ni);
1049 
1050 		if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
1051 			return ENOBUFS;
1052 
1053 		/* packet header may have moved, reset our local pointer */
1054 		wh = mtod(m0, struct ieee80211_frame *);
1055 	}
1056 
1057 	/* compute actual packet length (including CRC and crypto overhead) */
1058 	pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
1059 
1060 	/* pickup a rate */
1061 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
1062 	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1063 	     IEEE80211_FC0_TYPE_MGT)) {
1064 		/* mgmt/multicast frames are sent at the lowest avail. rate */
1065 		rate = ni->ni_rates.rs_rates[0];
1066 	} else if (ic->ic_fixed_rate != -1) {
1067 		rate = ic->ic_sup_rates[ic->ic_curmode].
1068 		    rs_rates[ic->ic_fixed_rate];
1069 	} else
1070 			rate = ni->ni_rates.rs_rates[ni->ni_txrate];
1071 	if (rate == 0)
1072 		rate = 2;	/* XXX should not happen */
1073 	rate &= IEEE80211_RATE_VAL;
1074 
1075 	/* check if RTS/CTS or CTS-to-self protection must be used */
1076 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1077 		/* multicast frames are not sent at OFDM rates in 802.11b/g */
1078 		if (pktlen > ic->ic_rtsthreshold) {
1079 			needrts = 1;	/* RTS/CTS based on frame length */
1080 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1081 		    RAL_RATE_IS_OFDM(rate)) {
1082 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1083 				needcts = 1;	/* CTS-to-self */
1084 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1085 				needrts = 1;	/* RTS/CTS */
1086 		}
1087 	}
1088 	if (needrts || needcts) {
1089 		struct mbuf *mprot;
1090 		int protrate, ackrate;
1091 		uint16_t dur;
1092 
1093 		protrate = 2;
1094 		ackrate  = ural_ack_rate(ic, rate);
1095 
1096 		dur = ural_txtime(pktlen, rate, ic->ic_flags) +
1097 		      ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) +
1098 		      2 * RAL_SIFS;
1099 		if (needrts) {
1100 			dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic,
1101 			    protrate), ic->ic_flags) + RAL_SIFS;
1102 			mprot = ieee80211_get_rts(ic, wh, dur);
1103 		} else {
1104 			mprot = ieee80211_get_cts_to_self(ic, dur);
1105 		}
1106 		if (mprot == NULL) {
1107 			printf("%s: could not allocate protection frame\n",
1108 			    sc->sc_dev.dv_xname);
1109 			m_freem(m0);
1110 			return ENOBUFS;
1111 		}
1112 
1113 		data = &sc->tx_data[sc->tx_cur];
1114 		desc = (struct ural_tx_desc *)data->buf;
1115 
1116 		/* avoid multiple free() of the same node for each fragment */
1117 		data->ni = ieee80211_ref_node(ni);
1118 
1119 		m_copydata(mprot, 0, mprot->m_pkthdr.len,
1120 		    data->buf + RAL_TX_DESC_SIZE);
1121 		ural_setup_tx_desc(sc, desc,
1122 		    (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7),
1123 		    mprot->m_pkthdr.len, protrate);
1124 
1125 		/* no roundup necessary here */
1126 		xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len;
1127 
1128 		/* XXX may want to pass the protection frame to BPF */
1129 
1130 		/* mbuf is no longer needed */
1131 		m_freem(mprot);
1132 
1133 		usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf,
1134 		    xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
1135 		    RAL_TX_TIMEOUT, ural_txeof);
1136 		error = usbd_transfer(data->xfer);
1137 		if (error != 0 && error != USBD_IN_PROGRESS) {
1138 			m_freem(m0);
1139 			return error;
1140 		}
1141 
1142 		sc->tx_queued++;
1143 		sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1144 
1145 		flags |= RAL_TX_IFS_SIFS;
1146 	}
1147 
1148 	data = &sc->tx_data[sc->tx_cur];
1149 	desc = (struct ural_tx_desc *)data->buf;
1150 
1151 	data->ni = ni;
1152 
1153 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1154 		flags |= RAL_TX_NEED_ACK;
1155 		flags |= RAL_TX_RETRY(7);
1156 
1157 		dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate),
1158 		    ic->ic_flags) + RAL_SIFS;
1159 		*(uint16_t *)wh->i_dur = htole16(dur);
1160 
1161 #ifndef IEEE80211_STA_ONLY
1162 		/* tell hardware to set timestamp in probe responses */
1163 		if ((wh->i_fc[0] &
1164 		    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
1165 		    (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
1166 			flags |= RAL_TX_TIMESTAMP;
1167 #endif
1168 	}
1169 
1170 #if NBPFILTER > 0
1171 	if (sc->sc_drvbpf != NULL) {
1172 		struct mbuf mb;
1173 		struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
1174 
1175 		tap->wt_flags = 0;
1176 		tap->wt_rate = rate;
1177 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1178 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1179 		tap->wt_antenna = sc->tx_ant;
1180 
1181 		mb.m_data = (caddr_t)tap;
1182 		mb.m_len = sc->sc_txtap_len;
1183 		mb.m_next = m0;
1184 		mb.m_nextpkt = NULL;
1185 		mb.m_type = 0;
1186 		mb.m_flags = 0;
1187 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1188 	}
1189 #endif
1190 
1191 	m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE);
1192 	ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate);
1193 
1194 	/* align end on a 2-bytes boundary */
1195 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1196 
1197 	/*
1198 	 * No space left in the last URB to store the extra 2 bytes, force
1199 	 * sending of another URB.
1200 	 */
1201 	if ((xferlen % 64) == 0)
1202 		xferlen += 2;
1203 
1204 	DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n",
1205 	    m0->m_pkthdr.len, rate, xferlen));
1206 
1207 	/* mbuf is no longer needed */
1208 	m_freem(m0);
1209 
1210 	usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen,
1211 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof);
1212 	error = usbd_transfer(data->xfer);
1213 	if (error != 0 && error != USBD_IN_PROGRESS)
1214 		return error;
1215 
1216 	sc->tx_queued++;
1217 	sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1218 
1219 	return 0;
1220 }
1221 
1222 void
1223 ural_start(struct ifnet *ifp)
1224 {
1225 	struct ural_softc *sc = ifp->if_softc;
1226 	struct ieee80211com *ic = &sc->sc_ic;
1227 	struct ieee80211_node *ni;
1228 	struct mbuf *m0;
1229 
1230 	/*
1231 	 * net80211 may still try to send management frames even if the
1232 	 * IFF_RUNNING flag is not set...
1233 	 */
1234 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1235 		return;
1236 
1237 	for (;;) {
1238 		if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1239 			ifq_set_oactive(&ifp->if_snd);
1240 			break;
1241 		}
1242 
1243 		m0 = mq_dequeue(&ic->ic_mgtq);
1244 		if (m0 != NULL) {
1245 			ni = m0->m_pkthdr.ph_cookie;
1246 #if NBPFILTER > 0
1247 			if (ic->ic_rawbpf != NULL)
1248 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1249 #endif
1250 			if (ural_tx_data(sc, m0, ni) != 0)
1251 				break;
1252 
1253 		} else {
1254 			if (ic->ic_state != IEEE80211_S_RUN)
1255 				break;
1256 
1257 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1258 			if (m0 == NULL)
1259 				break;
1260 #if NBPFILTER > 0
1261 			if (ifp->if_bpf != NULL)
1262 				bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1263 #endif
1264 			m0 = ieee80211_encap(ifp, m0, &ni);
1265 			if (m0 == NULL)
1266 				continue;
1267 #if NBPFILTER > 0
1268 			if (ic->ic_rawbpf != NULL)
1269 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1270 #endif
1271 			if (ural_tx_data(sc, m0, ni) != 0) {
1272 				if (ni != NULL)
1273 					ieee80211_release_node(ic, ni);
1274 				ifp->if_oerrors++;
1275 				break;
1276 			}
1277 		}
1278 
1279 		sc->sc_tx_timer = 5;
1280 		ifp->if_timer = 1;
1281 	}
1282 }
1283 
1284 void
1285 ural_watchdog(struct ifnet *ifp)
1286 {
1287 	struct ural_softc *sc = ifp->if_softc;
1288 
1289 	ifp->if_timer = 0;
1290 
1291 	if (sc->sc_tx_timer > 0) {
1292 		if (--sc->sc_tx_timer == 0) {
1293 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1294 			/*ural_init(ifp); XXX needs a process context! */
1295 			ifp->if_oerrors++;
1296 			return;
1297 		}
1298 		ifp->if_timer = 1;
1299 	}
1300 
1301 	ieee80211_watchdog(ifp);
1302 }
1303 
1304 int
1305 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1306 {
1307 	struct ural_softc *sc = ifp->if_softc;
1308 	struct ieee80211com *ic = &sc->sc_ic;
1309 	struct ifreq *ifr;
1310 	int s, error = 0;
1311 
1312 	if (usbd_is_dying(sc->sc_udev))
1313 		return ENXIO;
1314 
1315 	usbd_ref_incr(sc->sc_udev);
1316 
1317 	s = splnet();
1318 
1319 	switch (cmd) {
1320 	case SIOCSIFADDR:
1321 		ifp->if_flags |= IFF_UP;
1322 		/* FALLTHROUGH */
1323 	case SIOCSIFFLAGS:
1324 		if (ifp->if_flags & IFF_UP) {
1325 			if (ifp->if_flags & IFF_RUNNING)
1326 				ural_update_promisc(sc);
1327 			else
1328 				ural_init(ifp);
1329 		} else {
1330 			if (ifp->if_flags & IFF_RUNNING)
1331 				ural_stop(ifp, 1);
1332 		}
1333 		break;
1334 
1335 	case SIOCADDMULTI:
1336 	case SIOCDELMULTI:
1337 		ifr = (struct ifreq *)data;
1338 		error = (cmd == SIOCADDMULTI) ?
1339 		    ether_addmulti(ifr, &ic->ic_ac) :
1340 		    ether_delmulti(ifr, &ic->ic_ac);
1341 
1342 		if (error == ENETRESET)
1343 			error = 0;
1344 		break;
1345 
1346 	case SIOCS80211CHANNEL:
1347 		/*
1348 		 * This allows for fast channel switching in monitor mode
1349 		 * (used by kismet). In IBSS mode, we must explicitly reset
1350 		 * the interface to generate a new beacon frame.
1351 		 */
1352 		error = ieee80211_ioctl(ifp, cmd, data);
1353 		if (error == ENETRESET &&
1354 		    ic->ic_opmode == IEEE80211_M_MONITOR) {
1355 			if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1356 			    (IFF_UP | IFF_RUNNING))
1357 				ural_set_chan(sc, ic->ic_ibss_chan);
1358 			error = 0;
1359 		}
1360 		break;
1361 
1362 	default:
1363 		error = ieee80211_ioctl(ifp, cmd, data);
1364 	}
1365 
1366 	if (error == ENETRESET) {
1367 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1368 		    (IFF_UP | IFF_RUNNING))
1369 			ural_init(ifp);
1370 		error = 0;
1371 	}
1372 
1373 	splx(s);
1374 
1375 	usbd_ref_decr(sc->sc_udev);
1376 
1377 	return error;
1378 }
1379 
1380 void
1381 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
1382 {
1383 	usb_device_request_t req;
1384 	usbd_status error;
1385 
1386 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1387 	req.bRequest = RAL_READ_EEPROM;
1388 	USETW(req.wValue, 0);
1389 	USETW(req.wIndex, addr);
1390 	USETW(req.wLength, len);
1391 
1392 	error = usbd_do_request(sc->sc_udev, &req, buf);
1393 	if (error != 0) {
1394 		printf("%s: could not read EEPROM: %s\n",
1395 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1396 	}
1397 }
1398 
1399 uint16_t
1400 ural_read(struct ural_softc *sc, uint16_t reg)
1401 {
1402 	usb_device_request_t req;
1403 	usbd_status error;
1404 	uint16_t val;
1405 
1406 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1407 	req.bRequest = RAL_READ_MAC;
1408 	USETW(req.wValue, 0);
1409 	USETW(req.wIndex, reg);
1410 	USETW(req.wLength, sizeof (uint16_t));
1411 
1412 	error = usbd_do_request(sc->sc_udev, &req, &val);
1413 	if (error != 0) {
1414 		printf("%s: could not read MAC register: %s\n",
1415 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1416 		return 0;
1417 	}
1418 	return letoh16(val);
1419 }
1420 
1421 void
1422 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1423 {
1424 	usb_device_request_t req;
1425 	usbd_status error;
1426 
1427 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1428 	req.bRequest = RAL_READ_MULTI_MAC;
1429 	USETW(req.wValue, 0);
1430 	USETW(req.wIndex, reg);
1431 	USETW(req.wLength, len);
1432 
1433 	error = usbd_do_request(sc->sc_udev, &req, buf);
1434 	if (error != 0) {
1435 		printf("%s: could not read MAC register: %s\n",
1436 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1437 	}
1438 }
1439 
1440 void
1441 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
1442 {
1443 	usb_device_request_t req;
1444 	usbd_status error;
1445 
1446 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1447 	req.bRequest = RAL_WRITE_MAC;
1448 	USETW(req.wValue, val);
1449 	USETW(req.wIndex, reg);
1450 	USETW(req.wLength, 0);
1451 
1452 	error = usbd_do_request(sc->sc_udev, &req, NULL);
1453 	if (error != 0) {
1454 		printf("%s: could not write MAC register: %s\n",
1455 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1456 	}
1457 }
1458 
1459 void
1460 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1461 {
1462 	usb_device_request_t req;
1463 	usbd_status error;
1464 
1465 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1466 	req.bRequest = RAL_WRITE_MULTI_MAC;
1467 	USETW(req.wValue, 0);
1468 	USETW(req.wIndex, reg);
1469 	USETW(req.wLength, len);
1470 
1471 	error = usbd_do_request(sc->sc_udev, &req, buf);
1472 	if (error != 0) {
1473 		printf("%s: could not write MAC register: %s\n",
1474 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1475 	}
1476 }
1477 
1478 void
1479 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
1480 {
1481 	uint16_t tmp;
1482 	int ntries;
1483 
1484 	for (ntries = 0; ntries < 5; ntries++) {
1485 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1486 			break;
1487 	}
1488 	if (ntries == 5) {
1489 		printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname);
1490 		return;
1491 	}
1492 
1493 	tmp = reg << 8 | val;
1494 	ural_write(sc, RAL_PHY_CSR7, tmp);
1495 }
1496 
1497 uint8_t
1498 ural_bbp_read(struct ural_softc *sc, uint8_t reg)
1499 {
1500 	uint16_t val;
1501 	int ntries;
1502 
1503 	val = RAL_BBP_WRITE | reg << 8;
1504 	ural_write(sc, RAL_PHY_CSR7, val);
1505 
1506 	for (ntries = 0; ntries < 5; ntries++) {
1507 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1508 			break;
1509 	}
1510 	if (ntries == 5) {
1511 		printf("%s: could not read BBP\n", sc->sc_dev.dv_xname);
1512 		return 0;
1513 	}
1514 	return ural_read(sc, RAL_PHY_CSR7) & 0xff;
1515 }
1516 
1517 void
1518 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
1519 {
1520 	uint32_t tmp;
1521 	int ntries;
1522 
1523 	for (ntries = 0; ntries < 5; ntries++) {
1524 		if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
1525 			break;
1526 	}
1527 	if (ntries == 5) {
1528 		printf("%s: could not write to RF\n", sc->sc_dev.dv_xname);
1529 		return;
1530 	}
1531 
1532 	tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
1533 	ural_write(sc, RAL_PHY_CSR9,  tmp & 0xffff);
1534 	ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
1535 
1536 	/* remember last written value in sc */
1537 	sc->rf_regs[reg] = val;
1538 
1539 	DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff));
1540 }
1541 
1542 void
1543 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
1544 {
1545 	struct ieee80211com *ic = &sc->sc_ic;
1546 	uint8_t power, tmp;
1547 	u_int chan;
1548 
1549 	chan = ieee80211_chan2ieee(ic, c);
1550 	if (chan == 0 || chan == IEEE80211_CHAN_ANY)
1551 		return;
1552 
1553 	power = min(sc->txpow[chan - 1], 31);
1554 
1555 	DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power));
1556 
1557 	switch (sc->rf_rev) {
1558 	case RAL_RF_2522:
1559 		ural_rf_write(sc, RAL_RF1, 0x00814);
1560 		ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
1561 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1562 		break;
1563 
1564 	case RAL_RF_2523:
1565 		ural_rf_write(sc, RAL_RF1, 0x08804);
1566 		ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
1567 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
1568 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1569 		break;
1570 
1571 	case RAL_RF_2524:
1572 		ural_rf_write(sc, RAL_RF1, 0x0c808);
1573 		ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
1574 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1575 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1576 		break;
1577 
1578 	case RAL_RF_2525:
1579 		ural_rf_write(sc, RAL_RF1, 0x08808);
1580 		ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
1581 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1582 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1583 
1584 		ural_rf_write(sc, RAL_RF1, 0x08808);
1585 		ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
1586 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1587 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1588 		break;
1589 
1590 	case RAL_RF_2525E:
1591 		ural_rf_write(sc, RAL_RF1, 0x08808);
1592 		ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
1593 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1594 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
1595 		break;
1596 
1597 	case RAL_RF_2526:
1598 		ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
1599 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1600 		ural_rf_write(sc, RAL_RF1, 0x08804);
1601 
1602 		ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
1603 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1604 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1605 		break;
1606 	}
1607 
1608 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
1609 	    ic->ic_state != IEEE80211_S_SCAN) {
1610 		/* set Japan filter bit for channel 14 */
1611 		tmp = ural_bbp_read(sc, 70);
1612 
1613 		tmp &= ~RAL_JAPAN_FILTER;
1614 		if (chan == 14)
1615 			tmp |= RAL_JAPAN_FILTER;
1616 
1617 		ural_bbp_write(sc, 70, tmp);
1618 
1619 		/* clear CRC errors */
1620 		ural_read(sc, RAL_STA_CSR0);
1621 
1622 		DELAY(1000); /* RF needs a 1ms delay here */
1623 		ural_disable_rf_tune(sc);
1624 	}
1625 }
1626 
1627 /*
1628  * Disable RF auto-tuning.
1629  */
1630 void
1631 ural_disable_rf_tune(struct ural_softc *sc)
1632 {
1633 	uint32_t tmp;
1634 
1635 	if (sc->rf_rev != RAL_RF_2523) {
1636 		tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
1637 		ural_rf_write(sc, RAL_RF1, tmp);
1638 	}
1639 
1640 	tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
1641 	ural_rf_write(sc, RAL_RF3, tmp);
1642 
1643 	DPRINTFN(2, ("disabling RF autotune\n"));
1644 }
1645 
1646 /*
1647  * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
1648  * synchronization.
1649  */
1650 void
1651 ural_enable_tsf_sync(struct ural_softc *sc)
1652 {
1653 	struct ieee80211com *ic = &sc->sc_ic;
1654 	uint16_t logcwmin, preload, tmp;
1655 
1656 	/* first, disable TSF synchronization */
1657 	ural_write(sc, RAL_TXRX_CSR19, 0);
1658 
1659 	tmp = (16 * ic->ic_bss->ni_intval) << 4;
1660 	ural_write(sc, RAL_TXRX_CSR18, tmp);
1661 
1662 #ifndef IEEE80211_STA_ONLY
1663 	if (ic->ic_opmode == IEEE80211_M_IBSS) {
1664 		logcwmin = 2;
1665 		preload = 320;
1666 	} else
1667 #endif
1668 	{
1669 		logcwmin = 0;
1670 		preload = 6;
1671 	}
1672 	tmp = logcwmin << 12 | preload;
1673 	ural_write(sc, RAL_TXRX_CSR20, tmp);
1674 
1675 	/* finally, enable TSF synchronization */
1676 	tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
1677 	if (ic->ic_opmode == IEEE80211_M_STA)
1678 		tmp |= RAL_ENABLE_TSF_SYNC(1);
1679 #ifndef IEEE80211_STA_ONLY
1680 	else
1681 		tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
1682 #endif
1683 	ural_write(sc, RAL_TXRX_CSR19, tmp);
1684 
1685 	DPRINTF(("enabling TSF synchronization\n"));
1686 }
1687 
1688 void
1689 ural_update_slot(struct ural_softc *sc)
1690 {
1691 	struct ieee80211com *ic = &sc->sc_ic;
1692 	uint16_t slottime, sifs, eifs;
1693 
1694 	slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
1695 
1696 	/*
1697 	 * These settings may sound a bit inconsistent but this is what the
1698 	 * reference driver does.
1699 	 */
1700 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1701 		sifs = 16 - RAL_RXTX_TURNAROUND;
1702 		eifs = 364;
1703 	} else {
1704 		sifs = 10 - RAL_RXTX_TURNAROUND;
1705 		eifs = 64;
1706 	}
1707 
1708 	ural_write(sc, RAL_MAC_CSR10, slottime);
1709 	ural_write(sc, RAL_MAC_CSR11, sifs);
1710 	ural_write(sc, RAL_MAC_CSR12, eifs);
1711 }
1712 
1713 void
1714 ural_set_txpreamble(struct ural_softc *sc)
1715 {
1716 	uint16_t tmp;
1717 
1718 	tmp = ural_read(sc, RAL_TXRX_CSR10);
1719 
1720 	tmp &= ~RAL_SHORT_PREAMBLE;
1721 	if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE)
1722 		tmp |= RAL_SHORT_PREAMBLE;
1723 
1724 	ural_write(sc, RAL_TXRX_CSR10, tmp);
1725 }
1726 
1727 void
1728 ural_set_basicrates(struct ural_softc *sc)
1729 {
1730 	struct ieee80211com *ic = &sc->sc_ic;
1731 
1732 	/* update basic rate set */
1733 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1734 		/* 11b basic rates: 1, 2Mbps */
1735 		ural_write(sc, RAL_TXRX_CSR11, 0x3);
1736 	} else {
1737 		/* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
1738 		ural_write(sc, RAL_TXRX_CSR11, 0xf);
1739 	}
1740 }
1741 
1742 void
1743 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
1744 {
1745 	uint16_t tmp;
1746 
1747 	tmp = bssid[0] | bssid[1] << 8;
1748 	ural_write(sc, RAL_MAC_CSR5, tmp);
1749 
1750 	tmp = bssid[2] | bssid[3] << 8;
1751 	ural_write(sc, RAL_MAC_CSR6, tmp);
1752 
1753 	tmp = bssid[4] | bssid[5] << 8;
1754 	ural_write(sc, RAL_MAC_CSR7, tmp);
1755 
1756 	DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid)));
1757 }
1758 
1759 void
1760 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
1761 {
1762 	uint16_t tmp;
1763 
1764 	tmp = addr[0] | addr[1] << 8;
1765 	ural_write(sc, RAL_MAC_CSR2, tmp);
1766 
1767 	tmp = addr[2] | addr[3] << 8;
1768 	ural_write(sc, RAL_MAC_CSR3, tmp);
1769 
1770 	tmp = addr[4] | addr[5] << 8;
1771 	ural_write(sc, RAL_MAC_CSR4, tmp);
1772 
1773 	DPRINTF(("setting MAC address to %s\n",
1774 	    ether_sprintf((uint8_t *)addr)));
1775 }
1776 
1777 void
1778 ural_update_promisc(struct ural_softc *sc)
1779 {
1780 	struct ifnet *ifp = &sc->sc_ic.ic_if;
1781 	uint16_t tmp;
1782 
1783 	tmp = ural_read(sc, RAL_TXRX_CSR2);
1784 
1785 	tmp &= ~RAL_DROP_NOT_TO_ME;
1786 	if (!(ifp->if_flags & IFF_PROMISC))
1787 		tmp |= RAL_DROP_NOT_TO_ME;
1788 
1789 	ural_write(sc, RAL_TXRX_CSR2, tmp);
1790 
1791 	DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
1792 	    "entering" : "leaving"));
1793 }
1794 
1795 const char *
1796 ural_get_rf(int rev)
1797 {
1798 	switch (rev) {
1799 	case RAL_RF_2522:	return "RT2522";
1800 	case RAL_RF_2523:	return "RT2523";
1801 	case RAL_RF_2524:	return "RT2524";
1802 	case RAL_RF_2525:	return "RT2525";
1803 	case RAL_RF_2525E:	return "RT2525e";
1804 	case RAL_RF_2526:	return "RT2526";
1805 	case RAL_RF_5222:	return "RT5222";
1806 	default:		return "unknown";
1807 	}
1808 }
1809 
1810 void
1811 ural_read_eeprom(struct ural_softc *sc)
1812 {
1813 	struct ieee80211com *ic = &sc->sc_ic;
1814 	uint16_t val;
1815 
1816 	/* retrieve MAC/BBP type */
1817 	ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2);
1818 	sc->macbbp_rev = letoh16(val);
1819 
1820 	ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
1821 	val = letoh16(val);
1822 	sc->rf_rev =   (val >> 11) & 0x7;
1823 	sc->hw_radio = (val >> 10) & 0x1;
1824 	sc->led_mode = (val >> 6)  & 0x7;
1825 	sc->rx_ant =   (val >> 4)  & 0x3;
1826 	sc->tx_ant =   (val >> 2)  & 0x3;
1827 	sc->nb_ant =   val & 0x3;
1828 
1829 	/* read MAC address */
1830 	ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6);
1831 
1832 	/* read default values for BBP registers */
1833 	ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
1834 
1835 	/* read Tx power for all b/g channels */
1836 	ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
1837 }
1838 
1839 int
1840 ural_bbp_init(struct ural_softc *sc)
1841 {
1842 	int i, ntries;
1843 
1844 	/* wait for BBP to be ready */
1845 	for (ntries = 0; ntries < 100; ntries++) {
1846 		if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
1847 			break;
1848 		DELAY(1000);
1849 	}
1850 	if (ntries == 100) {
1851 		printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname);
1852 		return EIO;
1853 	}
1854 
1855 	/* initialize BBP registers to default values */
1856 	for (i = 0; i < nitems(ural_def_bbp); i++)
1857 		ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
1858 
1859 #if 0
1860 	/* initialize BBP registers to values stored in EEPROM */
1861 	for (i = 0; i < 16; i++) {
1862 		if (sc->bbp_prom[i].reg == 0xff)
1863 			continue;
1864 		ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
1865 	}
1866 #endif
1867 
1868 	return 0;
1869 }
1870 
1871 void
1872 ural_set_txantenna(struct ural_softc *sc, int antenna)
1873 {
1874 	uint16_t tmp;
1875 	uint8_t tx;
1876 
1877 	tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
1878 	if (antenna == 1)
1879 		tx |= RAL_BBP_ANTA;
1880 	else if (antenna == 2)
1881 		tx |= RAL_BBP_ANTB;
1882 	else
1883 		tx |= RAL_BBP_DIVERSITY;
1884 
1885 	/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
1886 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
1887 	    sc->rf_rev == RAL_RF_5222)
1888 		tx |= RAL_BBP_FLIPIQ;
1889 
1890 	ural_bbp_write(sc, RAL_BBP_TX, tx);
1891 
1892 	/* update flags in PHY_CSR5 and PHY_CSR6 too */
1893 	tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
1894 	ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
1895 
1896 	tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
1897 	ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
1898 }
1899 
1900 void
1901 ural_set_rxantenna(struct ural_softc *sc, int antenna)
1902 {
1903 	uint8_t rx;
1904 
1905 	rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
1906 	if (antenna == 1)
1907 		rx |= RAL_BBP_ANTA;
1908 	else if (antenna == 2)
1909 		rx |= RAL_BBP_ANTB;
1910 	else
1911 		rx |= RAL_BBP_DIVERSITY;
1912 
1913 	/* need to force no I/Q flip for RF 2525e and 2526 */
1914 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
1915 		rx &= ~RAL_BBP_FLIPIQ;
1916 
1917 	ural_bbp_write(sc, RAL_BBP_RX, rx);
1918 }
1919 
1920 int
1921 ural_init(struct ifnet *ifp)
1922 {
1923 	struct ural_softc *sc = ifp->if_softc;
1924 	struct ieee80211com *ic = &sc->sc_ic;
1925 	uint16_t tmp;
1926 	usbd_status error;
1927 	int i, ntries;
1928 
1929 	ural_stop(ifp, 0);
1930 
1931 	/* initialize MAC registers to default values */
1932 	for (i = 0; i < nitems(ural_def_mac); i++)
1933 		ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
1934 
1935 	/* wait for BBP and RF to wake up (this can take a long time!) */
1936 	for (ntries = 0; ntries < 100; ntries++) {
1937 		tmp = ural_read(sc, RAL_MAC_CSR17);
1938 		if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
1939 		    (RAL_BBP_AWAKE | RAL_RF_AWAKE))
1940 			break;
1941 		DELAY(1000);
1942 	}
1943 	if (ntries == 100) {
1944 		printf("%s: timeout waiting for BBP/RF to wakeup\n",
1945 		    sc->sc_dev.dv_xname);
1946 		error = EIO;
1947 		goto fail;
1948 	}
1949 
1950 	/* we're ready! */
1951 	ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
1952 
1953 	/* set basic rate set (will be updated later) */
1954 	ural_write(sc, RAL_TXRX_CSR11, 0x153);
1955 
1956 	error = ural_bbp_init(sc);
1957 	if (error != 0)
1958 		goto fail;
1959 
1960 	/* set default BSS channel */
1961 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
1962 	ural_set_chan(sc, ic->ic_bss->ni_chan);
1963 
1964 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
1965 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
1966 
1967 	/* set default sensitivity */
1968 	ural_bbp_write(sc, 17, 0x48);
1969 
1970 	ural_set_txantenna(sc, 1);
1971 	ural_set_rxantenna(sc, 1);
1972 
1973 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
1974 	ural_set_macaddr(sc, ic->ic_myaddr);
1975 
1976 	/*
1977 	 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31).
1978 	 */
1979 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1980 		struct ieee80211_key *k = &ic->ic_nw_keys[i];
1981 		ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE,
1982 		    k->k_key, IEEE80211_KEYBUF_SIZE);
1983 	}
1984 
1985 	/*
1986 	 * Allocate xfer for AMRR statistics requests.
1987 	 */
1988 	sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev);
1989 	if (sc->amrr_xfer == NULL) {
1990 		printf("%s: could not allocate AMRR xfer\n",
1991 		    sc->sc_dev.dv_xname);
1992 		goto fail;
1993 	}
1994 
1995 	/*
1996 	 * Open Tx and Rx USB bulk pipes.
1997 	 */
1998 	error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE,
1999 	    &sc->sc_tx_pipeh);
2000 	if (error != 0) {
2001 		printf("%s: could not open Tx pipe: %s\n",
2002 		    sc->sc_dev.dv_xname, usbd_errstr(error));
2003 		goto fail;
2004 	}
2005 	error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE,
2006 	    &sc->sc_rx_pipeh);
2007 	if (error != 0) {
2008 		printf("%s: could not open Rx pipe: %s\n",
2009 		    sc->sc_dev.dv_xname, usbd_errstr(error));
2010 		goto fail;
2011 	}
2012 
2013 	/*
2014 	 * Allocate Tx and Rx xfer queues.
2015 	 */
2016 	error = ural_alloc_tx_list(sc);
2017 	if (error != 0) {
2018 		printf("%s: could not allocate Tx list\n",
2019 		    sc->sc_dev.dv_xname);
2020 		goto fail;
2021 	}
2022 	error = ural_alloc_rx_list(sc);
2023 	if (error != 0) {
2024 		printf("%s: could not allocate Rx list\n",
2025 		    sc->sc_dev.dv_xname);
2026 		goto fail;
2027 	}
2028 
2029 	/*
2030 	 * Start up the receive pipe.
2031 	 */
2032 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
2033 		struct ural_rx_data *data = &sc->rx_data[i];
2034 
2035 		usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf,
2036 		    MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
2037 		error = usbd_transfer(data->xfer);
2038 		if (error != 0 && error != USBD_IN_PROGRESS) {
2039 			printf("%s: could not queue Rx transfer\n",
2040 			    sc->sc_dev.dv_xname);
2041 			goto fail;
2042 		}
2043 	}
2044 
2045 	/* kick Rx */
2046 	tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR;
2047 	if (ic->ic_opmode != IEEE80211_M_MONITOR) {
2048 		tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR;
2049 #ifndef IEEE80211_STA_ONLY
2050 		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
2051 #endif
2052 			tmp |= RAL_DROP_TODS;
2053 		if (!(ifp->if_flags & IFF_PROMISC))
2054 			tmp |= RAL_DROP_NOT_TO_ME;
2055 	}
2056 	ural_write(sc, RAL_TXRX_CSR2, tmp);
2057 
2058 	ifq_clr_oactive(&ifp->if_snd);
2059 	ifp->if_flags |= IFF_RUNNING;
2060 
2061 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2062 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2063 	else
2064 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2065 
2066 	return 0;
2067 
2068 fail:	ural_stop(ifp, 1);
2069 	return error;
2070 }
2071 
2072 void
2073 ural_stop(struct ifnet *ifp, int disable)
2074 {
2075 	struct ural_softc *sc = ifp->if_softc;
2076 	struct ieee80211com *ic = &sc->sc_ic;
2077 
2078 	sc->sc_tx_timer = 0;
2079 	ifp->if_timer = 0;
2080 	ifp->if_flags &= ~IFF_RUNNING;
2081 	ifq_clr_oactive(&ifp->if_snd);
2082 
2083 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);	/* free all nodes */
2084 
2085 	/* disable Rx */
2086 	ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
2087 
2088 	/* reset ASIC and BBP (but won't reset MAC registers!) */
2089 	ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
2090 	ural_write(sc, RAL_MAC_CSR1, 0);
2091 
2092 	if (sc->amrr_xfer != NULL) {
2093 		usbd_free_xfer(sc->amrr_xfer);
2094 		sc->amrr_xfer = NULL;
2095 	}
2096 	if (sc->sc_rx_pipeh != NULL) {
2097 		usbd_abort_pipe(sc->sc_rx_pipeh);
2098 		usbd_close_pipe(sc->sc_rx_pipeh);
2099 		sc->sc_rx_pipeh = NULL;
2100 	}
2101 	if (sc->sc_tx_pipeh != NULL) {
2102 		usbd_abort_pipe(sc->sc_tx_pipeh);
2103 		usbd_close_pipe(sc->sc_tx_pipeh);
2104 		sc->sc_tx_pipeh = NULL;
2105 	}
2106 
2107 	ural_free_rx_list(sc);
2108 	ural_free_tx_list(sc);
2109 }
2110 
2111 void
2112 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2113 {
2114 	/* start with lowest Tx rate */
2115 	ni->ni_txrate = 0;
2116 }
2117 
2118 void
2119 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni)
2120 {
2121 	int i;
2122 
2123 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
2124 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
2125 
2126 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
2127 
2128 	/* set rate to some reasonable initial value */
2129 	for (i = ni->ni_rates.rs_nrates - 1;
2130 	     i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72;
2131 	     i--);
2132 	ni->ni_txrate = i;
2133 
2134 	if (!usbd_is_dying(sc->sc_udev))
2135 		timeout_add_sec(&sc->amrr_to, 1);
2136 }
2137 
2138 void
2139 ural_amrr_timeout(void *arg)
2140 {
2141 	struct ural_softc *sc = arg;
2142 	usb_device_request_t req;
2143 	int s;
2144 
2145 	if (usbd_is_dying(sc->sc_udev))
2146 		return;
2147 
2148 	usbd_ref_incr(sc->sc_udev);
2149 
2150 	s = splusb();
2151 
2152 	/*
2153 	 * Asynchronously read statistic registers (cleared by read).
2154 	 */
2155 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
2156 	req.bRequest = RAL_READ_MULTI_MAC;
2157 	USETW(req.wValue, 0);
2158 	USETW(req.wIndex, RAL_STA_CSR0);
2159 	USETW(req.wLength, sizeof sc->sta);
2160 
2161 	usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc,
2162 	    USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0,
2163 	    ural_amrr_update);
2164 	(void)usbd_transfer(sc->amrr_xfer);
2165 
2166 	splx(s);
2167 
2168 	usbd_ref_decr(sc->sc_udev);
2169 }
2170 
2171 void
2172 ural_amrr_update(struct usbd_xfer *xfer, void *priv,
2173     usbd_status status)
2174 {
2175 	struct ural_softc *sc = (struct ural_softc *)priv;
2176 	struct ifnet *ifp = &sc->sc_ic.ic_if;
2177 
2178 	if (status != USBD_NORMAL_COMPLETION) {
2179 		printf("%s: could not retrieve Tx statistics - cancelling "
2180 		    "automatic rate control\n", sc->sc_dev.dv_xname);
2181 		return;
2182 	}
2183 
2184 	/* count TX retry-fail as Tx errors */
2185 	ifp->if_oerrors += letoh16(sc->sta[9]);
2186 
2187 	sc->amn.amn_retrycnt =
2188 	    letoh16(sc->sta[7]) +	/* TX one-retry ok count */
2189 	    letoh16(sc->sta[8]) +	/* TX more-retry ok count */
2190 	    letoh16(sc->sta[9]);	/* TX retry-fail count */
2191 
2192 	sc->amn.amn_txcnt =
2193 	    sc->amn.amn_retrycnt +
2194 	    letoh16(sc->sta[6]);	/* TX no-retry ok count */
2195 
2196 	ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn);
2197 
2198 	if (!usbd_is_dying(sc->sc_udev))
2199 		timeout_add_sec(&sc->amrr_to, 1);
2200 }
2201