xref: /openbsd-src/sys/dev/pci/if_rtwn.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: if_rtwn.c,v 1.26 2017/02/01 12:46:40 stsp Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * PCI front-end for Realtek RTL8188CE driver.
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/task.h>
34 #include <sys/timeout.h>
35 #include <sys/conf.h>
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_amrr.h>
54 #include <net80211/ieee80211_radiotap.h>
55 
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59 
60 #include <dev/ic/r92creg.h>
61 #include <dev/ic/rtwnvar.h>
62 
63 /*
64  * Driver definitions.
65  */
66 
67 #define R92C_PUBQ_NPAGES	176
68 #define R92C_HPQ_NPAGES		41
69 #define R92C_LPQ_NPAGES		28
70 #define R92C_TXPKTBUF_COUNT	256
71 #define R92C_TX_PAGE_COUNT	\
72 	(R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES)
73 #define R92C_TX_PAGE_BOUNDARY	(R92C_TX_PAGE_COUNT + 1)
74 
75 #define RTWN_NTXQUEUES			9
76 #define RTWN_RX_LIST_COUNT		256
77 #define RTWN_TX_LIST_COUNT		256
78 
79 /* TX queue indices. */
80 #define RTWN_BK_QUEUE			0
81 #define RTWN_BE_QUEUE			1
82 #define RTWN_VI_QUEUE			2
83 #define RTWN_VO_QUEUE			3
84 #define RTWN_BEACON_QUEUE		4
85 #define RTWN_TXCMD_QUEUE		5
86 #define RTWN_MGNT_QUEUE			6
87 #define RTWN_HIGH_QUEUE			7
88 #define RTWN_HCCA_QUEUE			8
89 
90 struct rtwn_rx_radiotap_header {
91 	struct ieee80211_radiotap_header wr_ihdr;
92 	uint8_t		wr_flags;
93 	uint8_t		wr_rate;
94 	uint16_t	wr_chan_freq;
95 	uint16_t	wr_chan_flags;
96 	uint8_t		wr_dbm_antsignal;
97 } __packed;
98 
99 #define RTWN_RX_RADIOTAP_PRESENT			\
100 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
101 	 1 << IEEE80211_RADIOTAP_RATE |			\
102 	 1 << IEEE80211_RADIOTAP_CHANNEL |		\
103 	 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
104 
105 struct rtwn_tx_radiotap_header {
106 	struct ieee80211_radiotap_header wt_ihdr;
107 	uint8_t		wt_flags;
108 	uint16_t	wt_chan_freq;
109 	uint16_t	wt_chan_flags;
110 } __packed;
111 
112 #define RTWN_TX_RADIOTAP_PRESENT			\
113 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
114 	 1 << IEEE80211_RADIOTAP_CHANNEL)
115 
116 struct rtwn_rx_data {
117 	bus_dmamap_t		map;
118 	struct mbuf		*m;
119 };
120 
121 struct rtwn_rx_ring {
122 	struct r92c_rx_desc_pci	*desc;
123 	bus_dmamap_t		map;
124 	bus_dma_segment_t	seg;
125 	int			nsegs;
126 	struct rtwn_rx_data	rx_data[RTWN_RX_LIST_COUNT];
127 
128 };
129 struct rtwn_tx_data {
130 	bus_dmamap_t			map;
131 	struct mbuf			*m;
132 	struct ieee80211_node		*ni;
133 };
134 
135 struct rtwn_tx_ring {
136 	bus_dmamap_t		map;
137 	bus_dma_segment_t	seg;
138 	int			nsegs;
139 	struct r92c_tx_desc_pci	*desc;
140 	struct rtwn_tx_data	tx_data[RTWN_TX_LIST_COUNT];
141 	int			queued;
142 	int			cur;
143 };
144 
145 struct rtwn_pci_softc {
146 	struct device		sc_dev;
147 	struct rtwn_softc	sc_sc;
148 
149 	struct rtwn_rx_ring	rx_ring;
150 	struct rtwn_tx_ring	tx_ring[RTWN_NTXQUEUES];
151 	uint32_t		qfullmsk;
152 
153 	struct timeout		calib_to;
154 	struct timeout		scan_to;
155 
156 	/* PCI specific goo. */
157 	bus_dma_tag_t 		sc_dmat;
158 	pci_chipset_tag_t	sc_pc;
159 	pcitag_t		sc_tag;
160 	void			*sc_ih;
161 	bus_space_tag_t		sc_st;
162 	bus_space_handle_t	sc_sh;
163 	bus_size_t		sc_mapsize;
164 	int			sc_cap_off;
165 
166 	struct ieee80211_amrr		amrr;
167 	struct ieee80211_amrr_node	amn;
168 
169 #if NBPFILTER > 0
170 	caddr_t				sc_drvbpf;
171 
172 	union {
173 		struct rtwn_rx_radiotap_header th;
174 		uint8_t	pad[64];
175 	}				sc_rxtapu;
176 #define sc_rxtap	sc_rxtapu.th
177 	int				sc_rxtap_len;
178 
179 	union {
180 		struct rtwn_tx_radiotap_header th;
181 		uint8_t	pad[64];
182 	}				sc_txtapu;
183 #define sc_txtap	sc_txtapu.th
184 	int				sc_txtap_len;
185 #endif
186 };
187 
188 #ifdef RTWN_DEBUG
189 #define DPRINTF(x)	do { if (rtwn_debug) printf x; } while (0)
190 #define DPRINTFN(n, x)	do { if (rtwn_debug >= (n)) printf x; } while (0)
191 extern int rtwn_debug;
192 #else
193 #define DPRINTF(x)
194 #define DPRINTFN(n, x)
195 #endif
196 
197 /*
198  * PCI configuration space registers.
199  */
200 #define	RTWN_PCI_IOBA		0x10	/* i/o mapped base */
201 #define	RTWN_PCI_MMBA		0x18	/* memory mapped base */
202 
203 static const struct pci_matchid rtwn_pci_devices[] = {
204 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RT8188 }
205 };
206 
207 int		rtwn_pci_match(struct device *, void *, void *);
208 void		rtwn_pci_attach(struct device *, struct device *, void *);
209 int		rtwn_pci_detach(struct device *, int);
210 int		rtwn_pci_activate(struct device *, int);
211 int		rtwn_alloc_rx_list(struct rtwn_pci_softc *);
212 void		rtwn_reset_rx_list(struct rtwn_pci_softc *);
213 void		rtwn_free_rx_list(struct rtwn_pci_softc *);
214 void		rtwn_setup_rx_desc(struct rtwn_pci_softc *,
215 		    struct r92c_rx_desc_pci *, bus_addr_t, size_t, int);
216 int		rtwn_alloc_tx_list(struct rtwn_pci_softc *, int);
217 void		rtwn_reset_tx_list(struct rtwn_pci_softc *, int);
218 void		rtwn_free_tx_list(struct rtwn_pci_softc *, int);
219 void		rtwn_pci_write_1(void *, uint16_t, uint8_t);
220 void		rtwn_pci_write_2(void *, uint16_t, uint16_t);
221 void		rtwn_pci_write_4(void *, uint16_t, uint32_t);
222 uint8_t		rtwn_pci_read_1(void *, uint16_t);
223 uint16_t	rtwn_pci_read_2(void *, uint16_t);
224 uint32_t	rtwn_pci_read_4(void *, uint16_t);
225 void		rtwn_rx_frame(struct rtwn_pci_softc *,
226 		    struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int);
227 int		rtwn_tx(void *, struct mbuf *, struct ieee80211_node *);
228 void		rtwn_tx_done(struct rtwn_pci_softc *, int);
229 int		rtwn_alloc_buffers(void *);
230 int		rtwn_pci_init(void *);
231 void		rtwn_pci_stop(void *);
232 int		rtwn_intr(void *);
233 int		rtwn_is_oactive(void *);
234 int		rtwn_power_on(void *);
235 int		rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t);
236 int		rtwn_llt_init(struct rtwn_pci_softc *);
237 int		rtwn_dma_init(void *);
238 int		rtwn_fw_loadpage(void *, int, uint8_t *, int);
239 int		rtwn_pci_load_firmware(void *, u_char **, size_t *);
240 void		rtwn_mac_init(void *);
241 void		rtwn_bb_init(void *);
242 void		rtwn_calib_to(void *);
243 void		rtwn_next_calib(void *);
244 void		rtwn_cancel_calib(void *);
245 void		rtwn_scan_to(void *);
246 void		rtwn_pci_next_scan(void *);
247 void		rtwn_cancel_scan(void *);
248 void		rtwn_wait_async(void *);
249 void		rtwn_poll_c2h_events(struct rtwn_pci_softc *);
250 void		rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int);
251 
252 /* Aliases. */
253 #define	rtwn_bb_write	rtwn_pci_write_4
254 #define rtwn_bb_read	rtwn_pci_read_4
255 
256 struct cfdriver rtwn_cd = {
257 	NULL, "rtwn", DV_IFNET
258 };
259 
260 const struct cfattach rtwn_pci_ca = {
261 	sizeof(struct rtwn_pci_softc),
262 	rtwn_pci_match,
263 	rtwn_pci_attach,
264 	rtwn_pci_detach,
265 	rtwn_pci_activate
266 };
267 
268 int
269 rtwn_pci_match(struct device *parent, void *match, void *aux)
270 {
271 	return (pci_matchbyid(aux, rtwn_pci_devices,
272 	    nitems(rtwn_pci_devices)));
273 }
274 
275 void
276 rtwn_pci_attach(struct device *parent, struct device *self, void *aux)
277 {
278 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self;
279 	struct pci_attach_args *pa = aux;
280 	struct ifnet *ifp;
281 	int i, error;
282 	pcireg_t memtype;
283 	pci_intr_handle_t ih;
284 	const char *intrstr;
285 
286 	sc->sc_dmat = pa->pa_dmat;
287 	sc->sc_pc = pa->pa_pc;
288 	sc->sc_tag = pa->pa_tag;
289 
290 	timeout_set(&sc->calib_to, rtwn_calib_to, sc);
291 	timeout_set(&sc->scan_to, rtwn_scan_to, sc);
292 
293 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
294 
295 	/* Map control/status registers. */
296 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA);
297 	error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st,
298 	    &sc->sc_sh, NULL, &sc->sc_mapsize, 0);
299 	if (error != 0) {
300 		printf(": can't map mem space\n");
301 		return;
302 	}
303 
304 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
305 		printf(": can't map interrupt\n");
306 		return;
307 	}
308 	intrstr = pci_intr_string(sc->sc_pc, ih);
309 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET,
310 	    rtwn_intr, sc, sc->sc_dev.dv_xname);
311 	if (sc->sc_ih == NULL) {
312 		printf(": can't establish interrupt");
313 		if (intrstr != NULL)
314 			printf(" at %s", intrstr);
315 		printf("\n");
316 		return;
317 	}
318 	printf(": %s\n", intrstr);
319 
320 	/* Disable PCIe Active State Power Management (ASPM). */
321 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
322 	    &sc->sc_cap_off, NULL)) {
323 		uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag,
324 		    sc->sc_cap_off + PCI_PCIE_LCSR);
325 		lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
326 		pci_conf_write(sc->sc_pc, sc->sc_tag,
327 		    sc->sc_cap_off + PCI_PCIE_LCSR, lcsr);
328 	}
329 
330 	/* Allocate Tx/Rx buffers. */
331 	error = rtwn_alloc_rx_list(sc);
332 	if (error != 0) {
333 		printf("%s: could not allocate Rx buffers\n",
334 		    sc->sc_dev.dv_xname);
335 		return;
336 	}
337 	for (i = 0; i < RTWN_NTXQUEUES; i++) {
338 		error = rtwn_alloc_tx_list(sc, i);
339 		if (error != 0) {
340 			printf("%s: could not allocate Tx buffers\n",
341 			    sc->sc_dev.dv_xname);
342 			rtwn_free_rx_list(sc);
343 			return;
344 		}
345 	}
346 
347 	sc->amrr.amrr_min_success_threshold = 1;
348 	sc->amrr.amrr_max_success_threshold = 15;
349 
350 	/* Attach the bus-agnostic driver. */
351 	sc->sc_sc.sc_ops.cookie = sc;
352 	sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1;
353 	sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2;
354 	sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4;
355 	sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1;
356 	sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2;
357 	sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4;
358 	sc->sc_sc.sc_ops.tx = rtwn_tx;
359 	sc->sc_sc.sc_ops.power_on = rtwn_power_on;
360 	sc->sc_sc.sc_ops.dma_init = rtwn_dma_init;
361 	sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware;
362 	sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage;
363 	sc->sc_sc.sc_ops.mac_init = rtwn_mac_init;
364 	sc->sc_sc.sc_ops.bb_init = rtwn_bb_init;
365 	sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers;
366 	sc->sc_sc.sc_ops.init = rtwn_pci_init;
367 	sc->sc_sc.sc_ops.stop = rtwn_pci_stop;
368 	sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive;
369 	sc->sc_sc.sc_ops.next_calib = rtwn_next_calib;
370 	sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib;
371 	sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan;
372 	sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan;
373 	sc->sc_sc.sc_ops.wait_async = rtwn_wait_async;
374 	error = rtwn_attach(&sc->sc_dev, &sc->sc_sc,
375 	    RTWN_CHIP_88C | RTWN_CHIP_PCI);
376 	if (error != 0) {
377 		rtwn_free_rx_list(sc);
378 		for (i = 0; i < RTWN_NTXQUEUES; i++)
379 			rtwn_free_tx_list(sc, i);
380 		return;
381 	}
382 
383 	/* ifp is now valid */
384 	ifp = &sc->sc_sc.sc_ic.ic_if;
385 #if NBPFILTER > 0
386 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
387 	    sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
388 
389 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
390 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
391 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT);
392 
393 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
394 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
395 	sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT);
396 #endif
397 }
398 
399 int
400 rtwn_pci_detach(struct device *self, int flags)
401 {
402 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
403 	int s, i;
404 
405 	s = splnet();
406 
407 	if (timeout_initialized(&sc->calib_to))
408 		timeout_del(&sc->calib_to);
409 	if (timeout_initialized(&sc->scan_to))
410 		timeout_del(&sc->scan_to);
411 
412 	rtwn_detach(&sc->sc_sc, flags);
413 
414 	/* Free Tx/Rx buffers. */
415 	for (i = 0; i < RTWN_NTXQUEUES; i++)
416 		rtwn_free_tx_list(sc, i);
417 	rtwn_free_rx_list(sc);
418 	splx(s);
419 
420 	return (0);
421 }
422 
423 int
424 rtwn_pci_activate(struct device *self, int act)
425 {
426 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
427 
428 	return rtwn_activate(&sc->sc_sc, act);
429 }
430 
431 void
432 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc,
433     bus_addr_t addr, size_t len, int idx)
434 {
435 	memset(desc, 0, sizeof(*desc));
436 	desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) |
437 		((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0));
438 	desc->rxbufaddr = htole32(addr);
439 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
440 	    BUS_SPACE_BARRIER_WRITE);
441 	desc->rxdw0 |= htole32(R92C_RXDW0_OWN);
442 }
443 
444 int
445 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
446 {
447 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
448 	struct rtwn_rx_data *rx_data;
449 	size_t size;
450 	int i, error = 0;
451 
452 	/* Allocate Rx descriptors. */
453 	size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
454 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
455 		&rx_ring->map);
456 	if (error != 0) {
457 		printf("%s: could not create rx desc DMA map\n",
458 		    sc->sc_dev.dv_xname);
459 		rx_ring->map = NULL;
460 		goto fail;
461 	}
462 
463 	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
464 	    &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
465 	if (error != 0) {
466 		printf("%s: could not allocate rx desc\n",
467 		    sc->sc_dev.dv_xname);
468 		goto fail;
469 	}
470 
471 	error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
472 	    size, (caddr_t *)&rx_ring->desc,
473 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
474 	if (error != 0) {
475 		bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
476 		rx_ring->desc = NULL;
477 		printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
478 		goto fail;
479 	}
480 
481 	error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
482 	    1, size, BUS_DMA_NOWAIT);
483 	if (error != 0) {
484 		printf("%s: could not load rx desc\n",
485 		    sc->sc_dev.dv_xname);
486 		goto fail;
487 	}
488 
489 	bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
490 	    BUS_DMASYNC_PREWRITE);
491 
492 	/* Allocate Rx buffers. */
493 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
494 		rx_data = &rx_ring->rx_data[i];
495 
496 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
497 		    0, BUS_DMA_NOWAIT, &rx_data->map);
498 		if (error != 0) {
499 			printf("%s: could not create rx buf DMA map\n",
500 			    sc->sc_dev.dv_xname);
501 			goto fail;
502 		}
503 
504 		rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
505 		if (rx_data->m == NULL) {
506 			printf("%s: could not allocate rx mbuf\n",
507 			    sc->sc_dev.dv_xname);
508 			error = ENOMEM;
509 			goto fail;
510 		}
511 
512 		error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
513 		    mtod(rx_data->m, void *), MCLBYTES, NULL,
514 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
515 		if (error != 0) {
516 			printf("%s: could not load rx buf DMA map\n",
517 			    sc->sc_dev.dv_xname);
518 			goto fail;
519 		}
520 
521 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
522 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
523 	}
524 fail:	if (error != 0)
525 		rtwn_free_rx_list(sc);
526 	return (error);
527 }
528 
529 void
530 rtwn_reset_rx_list(struct rtwn_pci_softc *sc)
531 {
532 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
533 	struct rtwn_rx_data *rx_data;
534 	int i;
535 
536 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
537 		rx_data = &rx_ring->rx_data[i];
538 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
539 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
540 	}
541 }
542 
543 void
544 rtwn_free_rx_list(struct rtwn_pci_softc *sc)
545 {
546 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
547 	struct rtwn_rx_data *rx_data;
548 	int i, s;
549 
550 	s = splnet();
551 
552 	if (rx_ring->map) {
553 		if (rx_ring->desc) {
554 			bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
555 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
556 			    sizeof (struct r92c_rx_desc_pci) *
557 			    RTWN_RX_LIST_COUNT);
558 			bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
559 			    rx_ring->nsegs);
560 			rx_ring->desc = NULL;
561 		}
562 		bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
563 		rx_ring->map = NULL;
564 	}
565 
566 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
567 		rx_data = &rx_ring->rx_data[i];
568 
569 		if (rx_data->m != NULL) {
570 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
571 			m_freem(rx_data->m);
572 			rx_data->m = NULL;
573 		}
574 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
575 		rx_data->map = NULL;
576 	}
577 
578 	splx(s);
579 }
580 
581 int
582 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid)
583 {
584 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
585 	struct rtwn_tx_data *tx_data;
586 	int i = 0, error = 0;
587 
588 	error = bus_dmamap_create(sc->sc_dmat,
589 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1,
590 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0,
591 	    BUS_DMA_NOWAIT, &tx_ring->map);
592 	if (error != 0) {
593 		printf("%s: could not create tx ring DMA map\n",
594 		    sc->sc_dev.dv_xname);
595 		goto fail;
596 	}
597 
598 	error = bus_dmamem_alloc(sc->sc_dmat,
599 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0,
600 	    &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
601 	if (error != 0) {
602 		printf("%s: could not allocate tx ring DMA memory\n",
603 		    sc->sc_dev.dv_xname);
604 		goto fail;
605 	}
606 
607 	error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
608 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT,
609 	    (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
610 	if (error != 0) {
611 		bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
612 		printf("%s: can't map tx ring DMA memory\n",
613 		    sc->sc_dev.dv_xname);
614 		goto fail;
615 	}
616 
617 	error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
618 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL,
619 	    BUS_DMA_NOWAIT);
620 	if (error != 0) {
621 		printf("%s: could not load tx ring DMA map\n",
622 		    sc->sc_dev.dv_xname);
623 		goto fail;
624 	}
625 
626 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
627 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
628 
629 		/* setup tx desc */
630 		desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
631 		  + sizeof(struct r92c_tx_desc_pci)
632 		  * ((i + 1) % RTWN_TX_LIST_COUNT));
633 
634 		tx_data = &tx_ring->tx_data[i];
635 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
636 		    0, BUS_DMA_NOWAIT, &tx_data->map);
637 		if (error != 0) {
638 			printf("%s: could not create tx buf DMA map\n",
639 			    sc->sc_dev.dv_xname);
640 			goto fail;
641 		}
642 		tx_data->m = NULL;
643 		tx_data->ni = NULL;
644 	}
645 fail:
646 	if (error != 0)
647 		rtwn_free_tx_list(sc, qid);
648 	return (error);
649 }
650 
651 void
652 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid)
653 {
654 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
655 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
656 	int i;
657 
658 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
659 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
660 		struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
661 
662 		memset(desc, 0, sizeof(*desc) -
663 		    (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) +
664 		    sizeof(desc->nextdescaddr)));
665 
666 		if (tx_data->m != NULL) {
667 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
668 			m_freem(tx_data->m);
669 			tx_data->m = NULL;
670 			ieee80211_release_node(ic, tx_data->ni);
671 			tx_data->ni = NULL;
672 		}
673 	}
674 
675 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
676 	    BUS_DMASYNC_POSTWRITE);
677 
678 	sc->qfullmsk &= ~(1 << qid);
679 	tx_ring->queued = 0;
680 	tx_ring->cur = 0;
681 }
682 
683 void
684 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid)
685 {
686 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
687 	struct rtwn_tx_data *tx_data;
688 	int i;
689 
690 	if (tx_ring->map != NULL) {
691 		if (tx_ring->desc != NULL) {
692 			bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
693 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
694 			    sizeof (struct r92c_tx_desc_pci) *
695 			    RTWN_TX_LIST_COUNT);
696 			bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
697 		}
698 		bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
699 	}
700 
701 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
702 		tx_data = &tx_ring->tx_data[i];
703 
704 		if (tx_data->m != NULL) {
705 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
706 			m_freem(tx_data->m);
707 			tx_data->m = NULL;
708 		}
709 		bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
710 	}
711 
712 	sc->qfullmsk &= ~(1 << qid);
713 	tx_ring->queued = 0;
714 	tx_ring->cur = 0;
715 }
716 
717 void
718 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val)
719 {
720 	struct rtwn_pci_softc *sc = cookie;
721 
722 	bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val);
723 }
724 
725 void
726 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val)
727 {
728 	struct rtwn_pci_softc *sc = cookie;
729 
730 	val = htole16(val);
731 	bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val);
732 }
733 
734 void
735 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val)
736 {
737 	struct rtwn_pci_softc *sc = cookie;
738 
739 	val = htole32(val);
740 	bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val);
741 }
742 
743 uint8_t
744 rtwn_pci_read_1(void *cookie, uint16_t addr)
745 {
746 	struct rtwn_pci_softc *sc = cookie;
747 
748 	return bus_space_read_1(sc->sc_st, sc->sc_sh, addr);
749 }
750 
751 uint16_t
752 rtwn_pci_read_2(void *cookie, uint16_t addr)
753 {
754 	struct rtwn_pci_softc *sc = cookie;
755 	uint16_t val;
756 
757 	val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr);
758 	return le16toh(val);
759 }
760 
761 uint32_t
762 rtwn_pci_read_4(void *cookie, uint16_t addr)
763 {
764 	struct rtwn_pci_softc *sc = cookie;
765 	uint32_t val;
766 
767 	val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr);
768 	return le32toh(val);
769 }
770 
771 void
772 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc,
773     struct rtwn_rx_data *rx_data, int desc_idx)
774 {
775 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
776 	struct ifnet *ifp = &ic->ic_if;
777 	struct ieee80211_rxinfo rxi;
778 	struct ieee80211_frame *wh;
779 	struct ieee80211_node *ni;
780 	struct r92c_rx_phystat *phy = NULL;
781 	uint32_t rxdw0, rxdw3;
782 	struct mbuf *m, *m1;
783 	uint8_t rate;
784 	int8_t rssi = 0;
785 	int infosz, pktlen, shift, error;
786 
787 	rxdw0 = letoh32(rx_desc->rxdw0);
788 	rxdw3 = letoh32(rx_desc->rxdw3);
789 
790 	if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
791 		/*
792 		 * This should not happen since we setup our Rx filter
793 		 * to not receive these frames.
794 		 */
795 		ifp->if_ierrors++;
796 		return;
797 	}
798 
799 	pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
800 	if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
801 		ifp->if_ierrors++;
802 		return;
803 	}
804 
805 	rate = MS(rxdw3, R92C_RXDW3_RATE);
806 	infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
807 	if (infosz > sizeof(struct r92c_rx_phystat))
808 		infosz = sizeof(struct r92c_rx_phystat);
809 	shift = MS(rxdw0, R92C_RXDW0_SHIFT);
810 
811 	/* Get RSSI from PHY status descriptor if present. */
812 	if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
813 		phy = mtod(rx_data->m, struct r92c_rx_phystat *);
814 		rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
815 		/* Update our average RSSI. */
816 		rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
817 	}
818 
819 	DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
820 	    pktlen, rate, infosz, shift, rssi));
821 
822 	m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
823 	if (m1 == NULL) {
824 		ifp->if_ierrors++;
825 		return;
826 	}
827 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
828 	error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
829 	    mtod(m1, void *), MCLBYTES, NULL,
830 	    BUS_DMA_NOWAIT | BUS_DMA_READ);
831 	if (error != 0) {
832 		m_freem(m1);
833 
834 		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
835 		    rx_data->m, BUS_DMA_NOWAIT))
836 			panic("%s: could not load old RX mbuf",
837 			    sc->sc_dev.dv_xname);
838 
839 		/* Physical address may have changed. */
840 		rtwn_setup_rx_desc(sc, rx_desc,
841 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx);
842 
843 		ifp->if_ierrors++;
844 		return;
845 	}
846 
847 	/* Finalize mbuf. */
848 	m = rx_data->m;
849 	rx_data->m = m1;
850 	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
851 
852 	/* Update RX descriptor. */
853 	rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr,
854 	    MCLBYTES, desc_idx);
855 
856 	/* Get ieee80211 frame header. */
857 	if (rxdw0 & R92C_RXDW0_PHYST)
858 		m_adj(m, infosz + shift);
859 	else
860 		m_adj(m, shift);
861 	wh = mtod(m, struct ieee80211_frame *);
862 
863 #if NBPFILTER > 0
864 	if (__predict_false(sc->sc_drvbpf != NULL)) {
865 		struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
866 		struct mbuf mb;
867 
868 		tap->wr_flags = 0;
869 		/* Map HW rate index to 802.11 rate. */
870 		tap->wr_flags = 2;
871 		if (!(rxdw3 & R92C_RXDW3_HT)) {
872 			switch (rate) {
873 			/* CCK. */
874 			case  0: tap->wr_rate =   2; break;
875 			case  1: tap->wr_rate =   4; break;
876 			case  2: tap->wr_rate =  11; break;
877 			case  3: tap->wr_rate =  22; break;
878 			/* OFDM. */
879 			case  4: tap->wr_rate =  12; break;
880 			case  5: tap->wr_rate =  18; break;
881 			case  6: tap->wr_rate =  24; break;
882 			case  7: tap->wr_rate =  36; break;
883 			case  8: tap->wr_rate =  48; break;
884 			case  9: tap->wr_rate =  72; break;
885 			case 10: tap->wr_rate =  96; break;
886 			case 11: tap->wr_rate = 108; break;
887 			}
888 		} else if (rate >= 12) {	/* MCS0~15. */
889 			/* Bit 7 set means HT MCS instead of rate. */
890 			tap->wr_rate = 0x80 | (rate - 12);
891 		}
892 		tap->wr_dbm_antsignal = rssi;
893 		tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq);
894 		tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags);
895 
896 		mb.m_data = (caddr_t)tap;
897 		mb.m_len = sc->sc_rxtap_len;
898 		mb.m_next = m;
899 		mb.m_nextpkt = NULL;
900 		mb.m_type = 0;
901 		mb.m_flags = 0;
902 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
903 	}
904 #endif
905 
906 	ni = ieee80211_find_rxnode(ic, wh);
907 	rxi.rxi_flags = 0;
908 	rxi.rxi_rssi = rssi;
909 	rxi.rxi_tstamp = 0;	/* Unused. */
910 	ieee80211_input(ifp, m, ni, &rxi);
911 	/* Node is no longer needed. */
912 	ieee80211_release_node(ic, ni);
913 }
914 
915 int
916 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni)
917 {
918 	struct rtwn_pci_softc *sc = cookie;
919 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
920 	struct ieee80211_frame *wh;
921 	struct ieee80211_key *k = NULL;
922 	struct rtwn_tx_ring *tx_ring;
923 	struct rtwn_tx_data *data;
924 	struct r92c_tx_desc_pci *txd;
925 	uint16_t qos;
926 	uint8_t raid, type, tid, qid;
927 	int hasqos, error;
928 
929 	wh = mtod(m, struct ieee80211_frame *);
930 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
931 
932 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
933 		k = ieee80211_get_txkey(ic, wh, ni);
934 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
935 			return (ENOBUFS);
936 		wh = mtod(m, struct ieee80211_frame *);
937 	}
938 
939 	if ((hasqos = ieee80211_has_qos(wh))) {
940 		qos = ieee80211_get_qos(wh);
941 		tid = qos & IEEE80211_QOS_TID;
942 		qid = ieee80211_up_to_ac(ic, tid);
943 	} else if (type != IEEE80211_FC0_TYPE_DATA) {
944 		qid = RTWN_VO_QUEUE;
945 	} else
946 		qid = RTWN_BE_QUEUE;
947 
948 	/* Grab a Tx buffer from the ring. */
949 	tx_ring = &sc->tx_ring[qid];
950 	data = &tx_ring->tx_data[tx_ring->cur];
951 	if (data->m != NULL) {
952 		m_freem(m);
953 		return (ENOBUFS);
954 	}
955 
956 	/* Fill Tx descriptor. */
957 	txd = &tx_ring->desc[tx_ring->cur];
958 	if (htole32(txd->txdw0) & R92C_RXDW0_OWN) {
959 		m_freem(m);
960 		return (ENOBUFS);
961 	}
962 	txd->txdw0 = htole32(
963 	    SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) |
964 	    SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
965 	    R92C_TXDW0_FSG | R92C_TXDW0_LSG);
966 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
967 		txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
968 
969 	txd->txdw1 = 0;
970 #ifdef notyet
971 	if (k != NULL) {
972 		switch (k->k_cipher) {
973 		case IEEE80211_CIPHER_WEP40:
974 		case IEEE80211_CIPHER_WEP104:
975 		case IEEE80211_CIPHER_TKIP:
976 			cipher = R92C_TXDW1_CIPHER_RC4;
977 			break;
978 		case IEEE80211_CIPHER_CCMP:
979 			cipher = R92C_TXDW1_CIPHER_AES;
980 			break;
981 		default:
982 			cipher = R92C_TXDW1_CIPHER_NONE;
983 		}
984 		txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher));
985 	}
986 #endif
987 	txd->txdw4 = 0;
988 	txd->txdw5 = 0;
989 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
990 	    type == IEEE80211_FC0_TYPE_DATA) {
991 		if (ic->ic_curmode == IEEE80211_MODE_11B ||
992 		    (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B))
993 			raid = R92C_RAID_11B;
994 		else
995 			raid = R92C_RAID_11BG;
996 		txd->txdw1 |= htole32(
997 		    SM(R92C_TXDW1_MACID, R92C_MACID_BSS) |
998 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
999 		    SM(R92C_TXDW1_RAID, raid) |
1000 		    R92C_TXDW1_AGGBK);
1001 
1002 		/* Request TX status report for AMRR. */
1003 		txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
1004 
1005 		if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
1006 			txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1007 			    R92C_TXDW4_HWRTSEN);
1008 		} else if (ic->ic_flags & IEEE80211_F_USEPROT) {
1009 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1010 				txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
1011 				    R92C_TXDW4_HWRTSEN);
1012 			} else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1013 				txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1014 				    R92C_TXDW4_HWRTSEN);
1015 			}
1016 		}
1017 
1018 		if (ic->ic_curmode == IEEE80211_MODE_11B)
1019 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0));
1020 		else
1021 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 3));
1022 		txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf));
1023 
1024 		/* Use AMMR rate for data. */
1025 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1026 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ni->ni_txrate));
1027 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f));
1028 	} else {
1029 		txd->txdw1 |= htole32(
1030 		    SM(R92C_TXDW1_MACID, 0) |
1031 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
1032 		    SM(R92C_TXDW1_RAID, R92C_RAID_11B));
1033 
1034 		/* Force CCK1. */
1035 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1036 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
1037 	}
1038 	/* Set sequence number (already little endian). */
1039 	txd->txdseq = *(uint16_t *)wh->i_seq;
1040 
1041 	if (!hasqos) {
1042 		/* Use HW sequence numbering for non-QoS frames. */
1043 		txd->txdw4  |= htole32(R92C_TXDW4_HWSEQ);
1044 		txd->txdseq |= htole16(0x8000);		/* WTF? */
1045 	} else
1046 		txd->txdw4 |= htole32(R92C_TXDW4_QOS);
1047 
1048 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1049 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1050 	if (error && error != EFBIG) {
1051 		printf("%s: can't map mbuf (error %d)\n",
1052 		    sc->sc_dev.dv_xname, error);
1053 		m_freem(m);
1054 		return error;
1055 	}
1056 	if (error != 0) {
1057 		/* Too many DMA segments, linearize mbuf. */
1058 		if (m_defrag(m, M_DONTWAIT)) {
1059 			m_freem(m);
1060 			return ENOBUFS;
1061 		}
1062 
1063 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1064 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1065 		if (error != 0) {
1066 			printf("%s: can't map mbuf (error %d)\n",
1067 			    sc->sc_dev.dv_xname, error);
1068 			m_freem(m);
1069 			return error;
1070 		}
1071 	}
1072 
1073 	txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr);
1074 	txd->txbufsize = htole16(m->m_pkthdr.len);
1075 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
1076 	    BUS_SPACE_BARRIER_WRITE);
1077 	txd->txdw0 |= htole32(R92C_TXDW0_OWN);
1078 
1079 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1080 	    BUS_DMASYNC_POSTWRITE);
1081 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
1082 	    BUS_DMASYNC_POSTWRITE);
1083 
1084 	data->m = m;
1085 	data->ni = ni;
1086 
1087 #if NBPFILTER > 0
1088 	if (__predict_false(sc->sc_drvbpf != NULL)) {
1089 		struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
1090 		struct mbuf mb;
1091 
1092 		tap->wt_flags = 0;
1093 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1094 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1095 
1096 		mb.m_data = (caddr_t)tap;
1097 		mb.m_len = sc->sc_txtap_len;
1098 		mb.m_next = m;
1099 		mb.m_nextpkt = NULL;
1100 		mb.m_type = 0;
1101 		mb.m_flags = 0;
1102 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1103 	}
1104 #endif
1105 
1106 	tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
1107 	tx_ring->queued++;
1108 
1109 	if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
1110 		sc->qfullmsk |= (1 << qid);
1111 
1112 	/* Kick TX. */
1113 	rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid));
1114 
1115 	return (0);
1116 }
1117 
1118 void
1119 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid)
1120 {
1121 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1122 	struct ifnet *ifp = &ic->ic_if;
1123 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
1124 	struct rtwn_tx_data *tx_data;
1125 	struct r92c_tx_desc_pci *tx_desc;
1126 	int i;
1127 
1128 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1129 	    BUS_DMASYNC_POSTREAD);
1130 
1131 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
1132 		tx_data = &tx_ring->tx_data[i];
1133 		if (tx_data->m == NULL)
1134 			continue;
1135 
1136 		tx_desc = &tx_ring->desc[i];
1137 		if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN)
1138 			continue;
1139 
1140 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
1141 		m_freem(tx_data->m);
1142 		tx_data->m = NULL;
1143 		ieee80211_release_node(ic, tx_data->ni);
1144 		tx_data->ni = NULL;
1145 
1146 		sc->sc_sc.sc_tx_timer = 0;
1147 		tx_ring->queued--;
1148 
1149 		rtwn_poll_c2h_events(sc);
1150 	}
1151 
1152 	if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
1153 		sc->qfullmsk &= ~(1 << qid);
1154 
1155 	if (sc->qfullmsk == 0) {
1156 		ifq_clr_oactive(&ifp->if_snd);
1157 		(*ifp->if_start)(ifp);
1158 	}
1159 }
1160 
1161 int
1162 rtwn_alloc_buffers(void *cookie)
1163 {
1164 	/* Tx/Rx buffers were already allocated in rtwn_pci_attach() */
1165 	return (0);
1166 }
1167 
1168 int
1169 rtwn_pci_init(void *cookie)
1170 {
1171 	struct rtwn_pci_softc *sc = cookie;
1172 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
1173 	return (0);
1174 }
1175 
1176 void
1177 rtwn_pci_stop(void *cookie)
1178 {
1179 	struct rtwn_pci_softc *sc = cookie;
1180 	uint16_t reg;
1181 	int i, s;
1182 
1183 	s = splnet();
1184 
1185 	/* Disable interrupts. */
1186 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1187 
1188 	/* Stop hardware. */
1189 	rtwn_pci_write_1(sc, R92C_TXPAUSE, 0xff);
1190 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1191 	reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN);
1192 	reg |= R92C_SYS_FUNC_EN_BB_GLB_RST;
1193 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1194 	reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST;
1195 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1196 	reg = rtwn_pci_read_2(sc, R92C_CR);
1197 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1198 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1199 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1200 	    R92C_CR_ENSEC);
1201 	rtwn_pci_write_2(sc, R92C_CR, reg);
1202 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1203 		rtwn_fw_reset(&sc->sc_sc);
1204 	/* TODO: linux does additional btcoex stuff here */
1205 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */
1206 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */
1207 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */
1208 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x0e);
1209 	rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN);
1210 
1211 	for (i = 0; i < RTWN_NTXQUEUES; i++)
1212 		rtwn_reset_tx_list(sc, i);
1213 	rtwn_reset_rx_list(sc);
1214 
1215 	splx(s);
1216 }
1217 
1218 int
1219 rtwn_intr(void *xsc)
1220 {
1221 	struct rtwn_pci_softc *sc = xsc;
1222 	u_int32_t status;
1223 	int i;
1224 
1225 	status = rtwn_pci_read_4(sc, R92C_HISR);
1226 	if (status == 0 || status == 0xffffffff)
1227 		return (0);
1228 
1229 	/* Disable interrupts. */
1230 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1231 
1232 	/* Ack interrupts. */
1233 	rtwn_pci_write_4(sc, R92C_HISR, status);
1234 
1235 	/* Vendor driver treats RX errors like ROK... */
1236 	if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) {
1237 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1238 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1239 		    BUS_DMASYNC_POSTREAD);
1240 
1241 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1242 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1243 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1244 
1245 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1246 				continue;
1247 
1248 			rtwn_rx_frame(sc, rx_desc, rx_data, i);
1249 		}
1250 	}
1251 
1252 	if (status & R92C_IMR_BDOK)
1253 		rtwn_tx_done(sc, RTWN_BEACON_QUEUE);
1254 	if (status & R92C_IMR_HIGHDOK)
1255 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1256 	if (status & R92C_IMR_MGNTDOK)
1257 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1258 	if (status & R92C_IMR_BKDOK)
1259 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1260 	if (status & R92C_IMR_BEDOK)
1261 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1262 	if (status & R92C_IMR_VIDOK)
1263 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1264 	if (status & R92C_IMR_VODOK)
1265 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1266 
1267 	/* Enable interrupts. */
1268 	rtwn_pci_write_4(sc, R92C_HIMR, RTWN_INT_ENABLE);
1269 
1270 	return (1);
1271 }
1272 
1273 int
1274 rtwn_is_oactive(void *cookie)
1275 {
1276 	struct rtwn_pci_softc *sc = cookie;
1277 
1278 	return (sc->qfullmsk != 0);
1279 }
1280 
1281 int
1282 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data)
1283 {
1284 	int ntries;
1285 
1286 	rtwn_pci_write_4(sc, R92C_LLT_INIT,
1287 	    SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
1288 	    SM(R92C_LLT_INIT_ADDR, addr) |
1289 	    SM(R92C_LLT_INIT_DATA, data));
1290 	/* Wait for write operation to complete. */
1291 	for (ntries = 0; ntries < 20; ntries++) {
1292 		if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
1293 		    R92C_LLT_INIT_OP_NO_ACTIVE)
1294 			return (0);
1295 		DELAY(5);
1296 	}
1297 	return (ETIMEDOUT);
1298 }
1299 
1300 int
1301 rtwn_llt_init(struct rtwn_pci_softc *sc)
1302 {
1303 	int i, error;
1304 
1305 	/* Reserve pages [0; R92C_TX_PAGE_COUNT]. */
1306 	for (i = 0; i < R92C_TX_PAGE_COUNT; i++) {
1307 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1308 			return (error);
1309 	}
1310 	/* NB: 0xff indicates end-of-list. */
1311 	if ((error = rtwn_llt_write(sc, i, 0xff)) != 0)
1312 		return (error);
1313 	/*
1314 	 * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1]
1315 	 * as ring buffer.
1316 	 */
1317 	for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) {
1318 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1319 			return (error);
1320 	}
1321 	/* Make the last page point to the beginning of the ring buffer. */
1322 	error = rtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1);
1323 	return (error);
1324 }
1325 
1326 int
1327 rtwn_power_on(void *cookie)
1328 {
1329 	struct rtwn_pci_softc *sc = cookie;
1330 	uint32_t reg;
1331 	int ntries;
1332 
1333 	/* Wait for autoload done bit. */
1334 	for (ntries = 0; ntries < 1000; ntries++) {
1335 		if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) &
1336 		    R92C_APS_FSMCO_PFM_ALDN)
1337 			break;
1338 		DELAY(5);
1339 	}
1340 	if (ntries == 1000) {
1341 		printf("%s: timeout waiting for chip autoload\n",
1342 		    sc->sc_dev.dv_xname);
1343 		return (ETIMEDOUT);
1344 	}
1345 
1346 	/* Unlock ISO/CLK/Power control register. */
1347 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1348 
1349 	/* TODO: check if we need this for 8188CE */
1350 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1351 		/* bt coex */
1352 		reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO);
1353 		reg |= (R92C_APS_FSMCO_SOP_ABG |
1354 			R92C_APS_FSMCO_SOP_AMB |
1355 			R92C_APS_FSMCO_XOP_BTCK);
1356 		rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg);
1357 	}
1358 
1359 	/* Move SPS into PWM mode. */
1360 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b);
1361 	DELAY(100);
1362 
1363 	/* Set low byte to 0x0f, leave others unchanged. */
1364 	rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL,
1365 	    (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f);
1366 
1367 	/* TODO: check if we need this for 8188CE */
1368 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1369 		/* bt coex */
1370 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL);
1371 		reg &= (~0x00024800); /* XXX magic from linux */
1372 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg);
1373 	}
1374 
1375 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1376 	  (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) |
1377 	  R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR);
1378 	DELAY(200);
1379 
1380 	/* TODO: linux does additional btcoex stuff here */
1381 
1382 	/* Auto enable WLAN. */
1383 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1384 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1385 	for (ntries = 0; ntries < 1000; ntries++) {
1386 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1387 		    R92C_APS_FSMCO_APFM_ONMAC))
1388 			break;
1389 		DELAY(5);
1390 	}
1391 	if (ntries == 1000) {
1392 		printf("%s: timeout waiting for MAC auto ON\n",
1393 		    sc->sc_dev.dv_xname);
1394 		return (ETIMEDOUT);
1395 	}
1396 
1397 	/* Enable radio, GPIO and LED functions. */
1398 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1399 	    R92C_APS_FSMCO_AFSM_PCIE |
1400 	    R92C_APS_FSMCO_PDN_EN |
1401 	    R92C_APS_FSMCO_PFM_ALDN);
1402 	/* Release RF digital isolation. */
1403 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1404 	    rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
1405 
1406 	if (sc->sc_sc.chip & RTWN_CHIP_92C)
1407 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77);
1408 	else
1409 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22);
1410 
1411 	rtwn_pci_write_4(sc, R92C_INT_MIG, 0);
1412 
1413 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1414 		/* bt coex */
1415 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2);
1416 		reg &= 0xfd; /* XXX magic from linux */
1417 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg);
1418 	}
1419 
1420 	rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG,
1421 	    rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL);
1422 
1423 	reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL);
1424 	if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) {
1425 		printf("%s: radio is disabled by hardware switch\n",
1426 		    sc->sc_dev.dv_xname);
1427 		return (EPERM);	/* :-) */
1428 	}
1429 
1430 	/* Initialize MAC. */
1431 	reg = rtwn_pci_read_1(sc, R92C_APSD_CTRL);
1432 	rtwn_pci_write_1(sc, R92C_APSD_CTRL,
1433 	    rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
1434 	for (ntries = 0; ntries < 200; ntries++) {
1435 		if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) &
1436 		    R92C_APSD_CTRL_OFF_STATUS))
1437 			break;
1438 		DELAY(500);
1439 	}
1440 	if (ntries == 200) {
1441 		printf("%s: timeout waiting for MAC initialization\n",
1442 		    sc->sc_dev.dv_xname);
1443 		return (ETIMEDOUT);
1444 	}
1445 
1446 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1447 	reg = rtwn_pci_read_2(sc, R92C_CR);
1448 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1449 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1450 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1451 	    R92C_CR_ENSEC;
1452 	rtwn_pci_write_2(sc, R92C_CR, reg);
1453 
1454 	rtwn_pci_write_1(sc, 0xfe10, 0x19);
1455 
1456 	return (0);
1457 }
1458 
1459 int
1460 rtwn_dma_init(void *cookie)
1461 {
1462 	struct rtwn_pci_softc *sc = cookie;
1463 	uint32_t reg;
1464 	int error;
1465 
1466 	/* Initialize LLT table. */
1467 	error = rtwn_llt_init(sc);
1468 	if (error != 0)
1469 		return error;
1470 
1471 	/* Set number of pages for normal priority queue. */
1472 	rtwn_pci_write_2(sc, R92C_RQPN_NPQ, 0);
1473 	rtwn_pci_write_4(sc, R92C_RQPN,
1474 	    /* Set number of pages for public queue. */
1475 	    SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) |
1476 	    /* Set number of pages for high priority queue. */
1477 	    SM(R92C_RQPN_HPQ, R92C_HPQ_NPAGES) |
1478 	    /* Set number of pages for low priority queue. */
1479 	    SM(R92C_RQPN_LPQ, R92C_LPQ_NPAGES) |
1480 	    /* Load values. */
1481 	    R92C_RQPN_LD);
1482 
1483 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY);
1484 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY);
1485 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD,
1486 	    R92C_TX_PAGE_BOUNDARY);
1487 	rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY);
1488 	rtwn_pci_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY);
1489 
1490 	reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL);
1491 	reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
1492 	reg |= 0xF771;
1493 	rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg);
1494 
1495 	rtwn_pci_write_4(sc, R92C_TCR,
1496 	    R92C_TCR_CFENDFORM | (1 << 12) | (1 << 13));
1497 
1498 	/* Configure Tx DMA. */
1499 	rtwn_pci_write_4(sc, R92C_BKQ_DESA,
1500 		sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
1501 	rtwn_pci_write_4(sc, R92C_BEQ_DESA,
1502 		sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
1503 	rtwn_pci_write_4(sc, R92C_VIQ_DESA,
1504 		sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
1505 	rtwn_pci_write_4(sc, R92C_VOQ_DESA,
1506 		sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
1507 	rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
1508 		sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
1509 	rtwn_pci_write_4(sc, R92C_MGQ_DESA,
1510 		sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
1511 	rtwn_pci_write_4(sc, R92C_HQ_DESA,
1512 		sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
1513 
1514 	/* Configure Rx DMA. */
1515 	rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr);
1516 
1517 	/* Set Tx/Rx transfer page boundary. */
1518 	rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff);
1519 
1520 	/* Set Tx/Rx transfer page size. */
1521 	rtwn_pci_write_1(sc, R92C_PBP,
1522 	    SM(R92C_PBP_PSRX, R92C_PBP_128) |
1523 	    SM(R92C_PBP_PSTX, R92C_PBP_128));
1524 
1525 	return (0);
1526 }
1527 
1528 int
1529 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len)
1530 {
1531 	struct rtwn_pci_softc *sc = cookie;
1532 	uint32_t reg;
1533 	int off, mlen, error = 0, i;
1534 
1535 	reg = rtwn_pci_read_4(sc, R92C_MCUFWDL);
1536 	reg = RW(reg, R92C_MCUFWDL_PAGE, page);
1537 	rtwn_pci_write_4(sc, R92C_MCUFWDL, reg);
1538 
1539 	DELAY(5);
1540 
1541 	off = R92C_FW_START_ADDR;
1542 	while (len > 0) {
1543 		if (len > 196)
1544 			mlen = 196;
1545 		else if (len > 4)
1546 			mlen = 4;
1547 		else
1548 			mlen = 1;
1549 		for (i = 0; i < mlen; i++)
1550 			rtwn_pci_write_1(sc, off++, buf[i]);
1551 		buf += mlen;
1552 		len -= mlen;
1553 	}
1554 
1555 	return (error);
1556 }
1557 
1558 int
1559 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len)
1560 {
1561 	struct rtwn_pci_softc *sc = cookie;
1562 	const char *name;
1563 	int error;
1564 
1565 	if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) ==
1566 	    RTWN_CHIP_UMC_A_CUT)
1567 		name = "rtwn-rtl8192cfwU";
1568 	else
1569 		name = "rtwn-rtl8192cfwU_B";
1570 
1571 	error = loadfirmware(name, fw, len);
1572 	if (error)
1573 		printf("%s: could not read firmware %s (error %d)\n",
1574 		    sc->sc_dev.dv_xname, name, error);
1575 	return (error);
1576 }
1577 
1578 void
1579 rtwn_mac_init(void *cookie)
1580 {
1581 	struct rtwn_pci_softc *sc = cookie;
1582 	int i;
1583 
1584 	/* Write MAC initialization values. */
1585 	for (i = 0; i < nitems(rtl8192ce_mac); i++)
1586 		rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg,
1587 		    rtl8192ce_mac[i].val);
1588 }
1589 
1590 void
1591 rtwn_bb_init(void *cookie)
1592 {
1593 	struct rtwn_pci_softc *sc = cookie;
1594 	const struct r92c_bb_prog *prog;
1595 	uint32_t reg;
1596 	int i;
1597 
1598 	/* Enable BB and RF. */
1599 	rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN,
1600 	    rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) |
1601 	    R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
1602 	    R92C_SYS_FUNC_EN_DIO_RF);
1603 
1604 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
1605 
1606 	rtwn_pci_write_1(sc, R92C_RF_CTRL,
1607 	    R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
1608 
1609 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1610 	    R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA |
1611 	    R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST |
1612 	    R92C_SYS_FUNC_EN_BBRSTB);
1613 
1614 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
1615 
1616 	rtwn_pci_write_4(sc, R92C_LEDCFG0,
1617 	    rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000);
1618 
1619 	/* Select BB programming. */
1620 	prog = (sc->sc_sc.chip & RTWN_CHIP_92C) ?
1621 	    &rtl8192ce_bb_prog_2t : &rtl8192ce_bb_prog_1t;
1622 
1623 	/* Write BB initialization values. */
1624 	for (i = 0; i < prog->count; i++) {
1625 		rtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
1626 		DELAY(1);
1627 	}
1628 
1629 	if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) {
1630 		/* 8192C 1T only configuration. */
1631 		reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO);
1632 		reg = (reg & ~0x00000003) | 0x2;
1633 		rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
1634 
1635 		reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO);
1636 		reg = (reg & ~0x00300033) | 0x00200022;
1637 		rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
1638 
1639 		reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING);
1640 		reg = (reg & ~0xff000000) | 0x45 << 24;
1641 		rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
1642 
1643 		reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
1644 		reg = (reg & ~0x000000ff) | 0x23;
1645 		rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
1646 
1647 		reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
1648 		reg = (reg & ~0x00000030) | 1 << 4;
1649 		rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
1650 
1651 		reg = rtwn_bb_read(sc, 0xe74);
1652 		reg = (reg & ~0x0c000000) | 2 << 26;
1653 		rtwn_bb_write(sc, 0xe74, reg);
1654 		reg = rtwn_bb_read(sc, 0xe78);
1655 		reg = (reg & ~0x0c000000) | 2 << 26;
1656 		rtwn_bb_write(sc, 0xe78, reg);
1657 		reg = rtwn_bb_read(sc, 0xe7c);
1658 		reg = (reg & ~0x0c000000) | 2 << 26;
1659 		rtwn_bb_write(sc, 0xe7c, reg);
1660 		reg = rtwn_bb_read(sc, 0xe80);
1661 		reg = (reg & ~0x0c000000) | 2 << 26;
1662 		rtwn_bb_write(sc, 0xe80, reg);
1663 		reg = rtwn_bb_read(sc, 0xe88);
1664 		reg = (reg & ~0x0c000000) | 2 << 26;
1665 		rtwn_bb_write(sc, 0xe88, reg);
1666 	}
1667 
1668 	/* Write AGC values. */
1669 	for (i = 0; i < prog->agccount; i++) {
1670 		rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
1671 		    prog->agcvals[i]);
1672 		DELAY(1);
1673 	}
1674 
1675 	if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) &
1676 	    R92C_HSSI_PARAM2_CCK_HIPWR)
1677 		sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR;
1678 }
1679 
1680 void
1681 rtwn_calib_to(void *arg)
1682 {
1683 	struct rtwn_pci_softc *sc = arg;
1684 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1685 	int s;
1686 
1687 	s = splnet();
1688 	ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn);
1689 	splx(s);
1690 
1691 	rtwn_calib(&sc->sc_sc);
1692 }
1693 
1694 void
1695 rtwn_next_calib(void *cookie)
1696 {
1697 	struct rtwn_pci_softc *sc = cookie;
1698 
1699 	timeout_add_sec(&sc->calib_to, 2);
1700 }
1701 
1702 void
1703 rtwn_cancel_calib(void *cookie)
1704 {
1705 	struct rtwn_pci_softc *sc = cookie;
1706 
1707 	if (timeout_initialized(&sc->calib_to))
1708 		timeout_del(&sc->calib_to);
1709 }
1710 
1711 void
1712 rtwn_scan_to(void *arg)
1713 {
1714 	struct rtwn_pci_softc *sc = arg;
1715 
1716 	rtwn_next_scan(&sc->sc_sc);
1717 }
1718 
1719 void
1720 rtwn_pci_next_scan(void *cookie)
1721 {
1722 	struct rtwn_pci_softc *sc = cookie;
1723 
1724 	timeout_add_msec(&sc->scan_to, 200);
1725 }
1726 
1727 void
1728 rtwn_cancel_scan(void *cookie)
1729 {
1730 	struct rtwn_pci_softc *sc = cookie;
1731 
1732 	if (timeout_initialized(&sc->scan_to))
1733 		timeout_del(&sc->scan_to);
1734 }
1735 
1736 void
1737 rtwn_wait_async(void *cookie)
1738 {
1739 	/* nothing to do */
1740 }
1741 
1742 void
1743 rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len)
1744 {
1745 	struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf;
1746 	int packets, tries, tx_ok, drop, expire, over;
1747 
1748 	if (len != sizeof(*rpt))
1749 		return;
1750 
1751 	packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM);
1752 	tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT);
1753 	tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK);
1754 	drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP);
1755 	expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE);
1756 	over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER);
1757 
1758 	if (packets > 0) {
1759 		if (tx_ok)
1760 			sc->amn.amn_txcnt += packets;
1761 		if (tries > 1 || drop || expire || over)
1762 			sc->amn.amn_retrycnt++;
1763 	}
1764 }
1765 
1766 void
1767 rtwn_poll_c2h_events(struct rtwn_pci_softc *sc)
1768 {
1769 	const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt);
1770 	uint8_t buf[R92C_C2H_MSG_MAX_LEN];
1771 	uint8_t id, len, status;
1772 	int i;
1773 
1774 	/* Read current status. */
1775 	status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR);
1776 	if (status == R92C_C2HEVT_HOST_CLOSE)
1777 		return;	/* nothing to do */
1778 
1779 	if (status == R92C_C2HEVT_FW_CLOSE) {
1780 		len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG);
1781 		id = MS(len, R92C_C2H_EVTB0_ID);
1782 		len = MS(len, R92C_C2H_EVTB0_LEN);
1783 
1784 		if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) {
1785 			memset(buf, 0, sizeof(buf));
1786 			for (i = 0; i < len; i++)
1787 				buf[i] = rtwn_pci_read_1(sc, off + i);
1788 			rtwn_tx_report(sc, buf, len);
1789 		} else
1790 			DPRINTF(("unhandled C2H event %d (%d bytes)\n",
1791 			    id, len));
1792 	}
1793 
1794 	/* Prepare for next event. */
1795 	rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE);
1796 }
1797