xref: /openbsd-src/sys/dev/pci/if_rtwn.c (revision 897fc685943471cf985a0fe38ba076ea6fe74fa5)
1 /*	$OpenBSD: if_rtwn.c,v 1.30 2017/09/03 16:20:46 stsp Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * PCI front-end for Realtek RTL8188CE/RTL8192CE driver.
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/task.h>
34 #include <sys/timeout.h>
35 #include <sys/conf.h>
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_amrr.h>
54 #include <net80211/ieee80211_radiotap.h>
55 
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59 
60 #include <dev/ic/r92creg.h>
61 #include <dev/ic/rtwnvar.h>
62 
63 /*
64  * Driver definitions.
65  */
66 
67 #define R92C_PUBQ_NPAGES	176
68 #define R92C_HPQ_NPAGES		41
69 #define R92C_LPQ_NPAGES		28
70 #define R92C_TXPKTBUF_COUNT	256
71 #define R92C_TX_PAGE_COUNT	\
72 	(R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES)
73 #define R92C_TX_PAGE_BOUNDARY	(R92C_TX_PAGE_COUNT + 1)
74 
75 #define RTWN_NTXQUEUES			9
76 #define RTWN_RX_LIST_COUNT		256
77 #define RTWN_TX_LIST_COUNT		256
78 
79 /* TX queue indices. */
80 #define RTWN_BK_QUEUE			0
81 #define RTWN_BE_QUEUE			1
82 #define RTWN_VI_QUEUE			2
83 #define RTWN_VO_QUEUE			3
84 #define RTWN_BEACON_QUEUE		4
85 #define RTWN_TXCMD_QUEUE		5
86 #define RTWN_MGNT_QUEUE			6
87 #define RTWN_HIGH_QUEUE			7
88 #define RTWN_HCCA_QUEUE			8
89 
90 struct rtwn_rx_radiotap_header {
91 	struct ieee80211_radiotap_header wr_ihdr;
92 	uint8_t		wr_flags;
93 	uint8_t		wr_rate;
94 	uint16_t	wr_chan_freq;
95 	uint16_t	wr_chan_flags;
96 	uint8_t		wr_dbm_antsignal;
97 } __packed;
98 
99 #define RTWN_RX_RADIOTAP_PRESENT			\
100 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
101 	 1 << IEEE80211_RADIOTAP_RATE |			\
102 	 1 << IEEE80211_RADIOTAP_CHANNEL |		\
103 	 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
104 
105 struct rtwn_tx_radiotap_header {
106 	struct ieee80211_radiotap_header wt_ihdr;
107 	uint8_t		wt_flags;
108 	uint16_t	wt_chan_freq;
109 	uint16_t	wt_chan_flags;
110 } __packed;
111 
112 #define RTWN_TX_RADIOTAP_PRESENT			\
113 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
114 	 1 << IEEE80211_RADIOTAP_CHANNEL)
115 
116 struct rtwn_rx_data {
117 	bus_dmamap_t		map;
118 	struct mbuf		*m;
119 };
120 
121 struct rtwn_rx_ring {
122 	struct r92c_rx_desc_pci	*desc;
123 	bus_dmamap_t		map;
124 	bus_dma_segment_t	seg;
125 	int			nsegs;
126 	struct rtwn_rx_data	rx_data[RTWN_RX_LIST_COUNT];
127 
128 };
129 struct rtwn_tx_data {
130 	bus_dmamap_t			map;
131 	struct mbuf			*m;
132 	struct ieee80211_node		*ni;
133 };
134 
135 struct rtwn_tx_ring {
136 	bus_dmamap_t		map;
137 	bus_dma_segment_t	seg;
138 	int			nsegs;
139 	struct r92c_tx_desc_pci	*desc;
140 	struct rtwn_tx_data	tx_data[RTWN_TX_LIST_COUNT];
141 	int			queued;
142 	int			cur;
143 };
144 
145 struct rtwn_pci_softc {
146 	struct device		sc_dev;
147 	struct rtwn_softc	sc_sc;
148 
149 	struct rtwn_rx_ring	rx_ring;
150 	struct rtwn_tx_ring	tx_ring[RTWN_NTXQUEUES];
151 	uint32_t		qfullmsk;
152 
153 	struct timeout		calib_to;
154 	struct timeout		scan_to;
155 
156 	/* PCI specific goo. */
157 	bus_dma_tag_t 		sc_dmat;
158 	pci_chipset_tag_t	sc_pc;
159 	pcitag_t		sc_tag;
160 	void			*sc_ih;
161 	bus_space_tag_t		sc_st;
162 	bus_space_handle_t	sc_sh;
163 	bus_size_t		sc_mapsize;
164 	int			sc_cap_off;
165 
166 	struct ieee80211_amrr		amrr;
167 	struct ieee80211_amrr_node	amn;
168 
169 #if NBPFILTER > 0
170 	caddr_t				sc_drvbpf;
171 
172 	union {
173 		struct rtwn_rx_radiotap_header th;
174 		uint8_t	pad[64];
175 	}				sc_rxtapu;
176 #define sc_rxtap	sc_rxtapu.th
177 	int				sc_rxtap_len;
178 
179 	union {
180 		struct rtwn_tx_radiotap_header th;
181 		uint8_t	pad[64];
182 	}				sc_txtapu;
183 #define sc_txtap	sc_txtapu.th
184 	int				sc_txtap_len;
185 #endif
186 };
187 
188 #ifdef RTWN_DEBUG
189 #define DPRINTF(x)	do { if (rtwn_debug) printf x; } while (0)
190 #define DPRINTFN(n, x)	do { if (rtwn_debug >= (n)) printf x; } while (0)
191 extern int rtwn_debug;
192 #else
193 #define DPRINTF(x)
194 #define DPRINTFN(n, x)
195 #endif
196 
197 /*
198  * PCI configuration space registers.
199  */
200 #define	RTWN_PCI_IOBA		0x10	/* i/o mapped base */
201 #define	RTWN_PCI_MMBA		0x18	/* memory mapped base */
202 
203 static const struct pci_matchid rtwn_pci_devices[] = {
204 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RT8188 },
205 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8192CE }
206 };
207 
208 int		rtwn_pci_match(struct device *, void *, void *);
209 void		rtwn_pci_attach(struct device *, struct device *, void *);
210 int		rtwn_pci_detach(struct device *, int);
211 int		rtwn_pci_activate(struct device *, int);
212 int		rtwn_alloc_rx_list(struct rtwn_pci_softc *);
213 void		rtwn_reset_rx_list(struct rtwn_pci_softc *);
214 void		rtwn_free_rx_list(struct rtwn_pci_softc *);
215 void		rtwn_setup_rx_desc(struct rtwn_pci_softc *,
216 		    struct r92c_rx_desc_pci *, bus_addr_t, size_t, int);
217 int		rtwn_alloc_tx_list(struct rtwn_pci_softc *, int);
218 void		rtwn_reset_tx_list(struct rtwn_pci_softc *, int);
219 void		rtwn_free_tx_list(struct rtwn_pci_softc *, int);
220 void		rtwn_pci_write_1(void *, uint16_t, uint8_t);
221 void		rtwn_pci_write_2(void *, uint16_t, uint16_t);
222 void		rtwn_pci_write_4(void *, uint16_t, uint32_t);
223 uint8_t		rtwn_pci_read_1(void *, uint16_t);
224 uint16_t	rtwn_pci_read_2(void *, uint16_t);
225 uint32_t	rtwn_pci_read_4(void *, uint16_t);
226 void		rtwn_rx_frame(struct rtwn_pci_softc *,
227 		    struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int);
228 int		rtwn_tx(void *, struct mbuf *, struct ieee80211_node *);
229 void		rtwn_tx_done(struct rtwn_pci_softc *, int);
230 int		rtwn_alloc_buffers(void *);
231 int		rtwn_pci_init(void *);
232 void		rtwn_pci_stop(void *);
233 int		rtwn_intr(void *);
234 int		rtwn_is_oactive(void *);
235 int		rtwn_power_on(void *);
236 int		rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t);
237 int		rtwn_llt_init(struct rtwn_pci_softc *);
238 int		rtwn_dma_init(void *);
239 int		rtwn_fw_loadpage(void *, int, uint8_t *, int);
240 int		rtwn_pci_load_firmware(void *, u_char **, size_t *);
241 void		rtwn_mac_init(void *);
242 void		rtwn_bb_init(void *);
243 void		rtwn_calib_to(void *);
244 void		rtwn_next_calib(void *);
245 void		rtwn_cancel_calib(void *);
246 void		rtwn_scan_to(void *);
247 void		rtwn_pci_next_scan(void *);
248 void		rtwn_cancel_scan(void *);
249 void		rtwn_wait_async(void *);
250 void		rtwn_poll_c2h_events(struct rtwn_pci_softc *);
251 void		rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int);
252 
253 /* Aliases. */
254 #define	rtwn_bb_write	rtwn_pci_write_4
255 #define rtwn_bb_read	rtwn_pci_read_4
256 
257 struct cfdriver rtwn_cd = {
258 	NULL, "rtwn", DV_IFNET
259 };
260 
261 const struct cfattach rtwn_pci_ca = {
262 	sizeof(struct rtwn_pci_softc),
263 	rtwn_pci_match,
264 	rtwn_pci_attach,
265 	rtwn_pci_detach,
266 	rtwn_pci_activate
267 };
268 
269 int
270 rtwn_pci_match(struct device *parent, void *match, void *aux)
271 {
272 	return (pci_matchbyid(aux, rtwn_pci_devices,
273 	    nitems(rtwn_pci_devices)));
274 }
275 
276 void
277 rtwn_pci_attach(struct device *parent, struct device *self, void *aux)
278 {
279 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self;
280 	struct pci_attach_args *pa = aux;
281 	struct ifnet *ifp;
282 	int i, error;
283 	pcireg_t memtype;
284 	pci_intr_handle_t ih;
285 	const char *intrstr;
286 
287 	sc->sc_dmat = pa->pa_dmat;
288 	sc->sc_pc = pa->pa_pc;
289 	sc->sc_tag = pa->pa_tag;
290 
291 	timeout_set(&sc->calib_to, rtwn_calib_to, sc);
292 	timeout_set(&sc->scan_to, rtwn_scan_to, sc);
293 
294 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
295 
296 	/* Map control/status registers. */
297 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA);
298 	error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st,
299 	    &sc->sc_sh, NULL, &sc->sc_mapsize, 0);
300 	if (error != 0) {
301 		printf(": can't map mem space\n");
302 		return;
303 	}
304 
305 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
306 		printf(": can't map interrupt\n");
307 		return;
308 	}
309 	intrstr = pci_intr_string(sc->sc_pc, ih);
310 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET,
311 	    rtwn_intr, sc, sc->sc_dev.dv_xname);
312 	if (sc->sc_ih == NULL) {
313 		printf(": can't establish interrupt");
314 		if (intrstr != NULL)
315 			printf(" at %s", intrstr);
316 		printf("\n");
317 		return;
318 	}
319 	printf(": %s\n", intrstr);
320 
321 	/* Disable PCIe Active State Power Management (ASPM). */
322 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
323 	    &sc->sc_cap_off, NULL)) {
324 		uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag,
325 		    sc->sc_cap_off + PCI_PCIE_LCSR);
326 		lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
327 		pci_conf_write(sc->sc_pc, sc->sc_tag,
328 		    sc->sc_cap_off + PCI_PCIE_LCSR, lcsr);
329 	}
330 
331 	/* Allocate Tx/Rx buffers. */
332 	error = rtwn_alloc_rx_list(sc);
333 	if (error != 0) {
334 		printf("%s: could not allocate Rx buffers\n",
335 		    sc->sc_dev.dv_xname);
336 		return;
337 	}
338 	for (i = 0; i < RTWN_NTXQUEUES; i++) {
339 		error = rtwn_alloc_tx_list(sc, i);
340 		if (error != 0) {
341 			printf("%s: could not allocate Tx buffers\n",
342 			    sc->sc_dev.dv_xname);
343 			rtwn_free_rx_list(sc);
344 			return;
345 		}
346 	}
347 
348 	sc->amrr.amrr_min_success_threshold = 1;
349 	sc->amrr.amrr_max_success_threshold = 15;
350 
351 	/* Attach the bus-agnostic driver. */
352 	sc->sc_sc.sc_ops.cookie = sc;
353 	sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1;
354 	sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2;
355 	sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4;
356 	sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1;
357 	sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2;
358 	sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4;
359 	sc->sc_sc.sc_ops.tx = rtwn_tx;
360 	sc->sc_sc.sc_ops.power_on = rtwn_power_on;
361 	sc->sc_sc.sc_ops.dma_init = rtwn_dma_init;
362 	sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware;
363 	sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage;
364 	sc->sc_sc.sc_ops.mac_init = rtwn_mac_init;
365 	sc->sc_sc.sc_ops.bb_init = rtwn_bb_init;
366 	sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers;
367 	sc->sc_sc.sc_ops.init = rtwn_pci_init;
368 	sc->sc_sc.sc_ops.stop = rtwn_pci_stop;
369 	sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive;
370 	sc->sc_sc.sc_ops.next_calib = rtwn_next_calib;
371 	sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib;
372 	sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan;
373 	sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan;
374 	sc->sc_sc.sc_ops.wait_async = rtwn_wait_async;
375 
376 	sc->sc_sc.chip = RTWN_CHIP_88C | RTWN_CHIP_92C | RTWN_CHIP_PCI;
377 
378 	error = rtwn_attach(&sc->sc_dev, &sc->sc_sc);
379 	if (error != 0) {
380 		rtwn_free_rx_list(sc);
381 		for (i = 0; i < RTWN_NTXQUEUES; i++)
382 			rtwn_free_tx_list(sc, i);
383 		return;
384 	}
385 
386 	/* ifp is now valid */
387 	ifp = &sc->sc_sc.sc_ic.ic_if;
388 #if NBPFILTER > 0
389 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
390 	    sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
391 
392 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
393 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
394 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT);
395 
396 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
397 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
398 	sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT);
399 #endif
400 }
401 
402 int
403 rtwn_pci_detach(struct device *self, int flags)
404 {
405 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
406 	int s, i;
407 
408 	s = splnet();
409 
410 	if (timeout_initialized(&sc->calib_to))
411 		timeout_del(&sc->calib_to);
412 	if (timeout_initialized(&sc->scan_to))
413 		timeout_del(&sc->scan_to);
414 
415 	rtwn_detach(&sc->sc_sc, flags);
416 
417 	/* Free Tx/Rx buffers. */
418 	for (i = 0; i < RTWN_NTXQUEUES; i++)
419 		rtwn_free_tx_list(sc, i);
420 	rtwn_free_rx_list(sc);
421 	splx(s);
422 
423 	return (0);
424 }
425 
426 int
427 rtwn_pci_activate(struct device *self, int act)
428 {
429 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
430 
431 	return rtwn_activate(&sc->sc_sc, act);
432 }
433 
434 void
435 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc,
436     bus_addr_t addr, size_t len, int idx)
437 {
438 	memset(desc, 0, sizeof(*desc));
439 	desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) |
440 		((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0));
441 	desc->rxbufaddr = htole32(addr);
442 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
443 	    BUS_SPACE_BARRIER_WRITE);
444 	desc->rxdw0 |= htole32(R92C_RXDW0_OWN);
445 }
446 
447 int
448 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
449 {
450 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
451 	struct rtwn_rx_data *rx_data;
452 	size_t size;
453 	int i, error = 0;
454 
455 	/* Allocate Rx descriptors. */
456 	size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
457 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
458 		&rx_ring->map);
459 	if (error != 0) {
460 		printf("%s: could not create rx desc DMA map\n",
461 		    sc->sc_dev.dv_xname);
462 		rx_ring->map = NULL;
463 		goto fail;
464 	}
465 
466 	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
467 	    &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
468 	if (error != 0) {
469 		printf("%s: could not allocate rx desc\n",
470 		    sc->sc_dev.dv_xname);
471 		goto fail;
472 	}
473 
474 	error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
475 	    size, (caddr_t *)&rx_ring->desc,
476 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
477 	if (error != 0) {
478 		bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
479 		rx_ring->desc = NULL;
480 		printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
481 		goto fail;
482 	}
483 
484 	error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
485 	    1, size, BUS_DMA_NOWAIT);
486 	if (error != 0) {
487 		printf("%s: could not load rx desc\n",
488 		    sc->sc_dev.dv_xname);
489 		goto fail;
490 	}
491 
492 	bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
493 	    BUS_DMASYNC_PREWRITE);
494 
495 	/* Allocate Rx buffers. */
496 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
497 		rx_data = &rx_ring->rx_data[i];
498 
499 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
500 		    0, BUS_DMA_NOWAIT, &rx_data->map);
501 		if (error != 0) {
502 			printf("%s: could not create rx buf DMA map\n",
503 			    sc->sc_dev.dv_xname);
504 			goto fail;
505 		}
506 
507 		rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
508 		if (rx_data->m == NULL) {
509 			printf("%s: could not allocate rx mbuf\n",
510 			    sc->sc_dev.dv_xname);
511 			error = ENOMEM;
512 			goto fail;
513 		}
514 
515 		error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
516 		    mtod(rx_data->m, void *), MCLBYTES, NULL,
517 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
518 		if (error != 0) {
519 			printf("%s: could not load rx buf DMA map\n",
520 			    sc->sc_dev.dv_xname);
521 			goto fail;
522 		}
523 
524 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
525 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
526 	}
527 fail:	if (error != 0)
528 		rtwn_free_rx_list(sc);
529 	return (error);
530 }
531 
532 void
533 rtwn_reset_rx_list(struct rtwn_pci_softc *sc)
534 {
535 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
536 	struct rtwn_rx_data *rx_data;
537 	int i;
538 
539 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
540 		rx_data = &rx_ring->rx_data[i];
541 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
542 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
543 	}
544 }
545 
546 void
547 rtwn_free_rx_list(struct rtwn_pci_softc *sc)
548 {
549 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
550 	struct rtwn_rx_data *rx_data;
551 	int i, s;
552 
553 	s = splnet();
554 
555 	if (rx_ring->map) {
556 		if (rx_ring->desc) {
557 			bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
558 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
559 			    sizeof (struct r92c_rx_desc_pci) *
560 			    RTWN_RX_LIST_COUNT);
561 			bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
562 			    rx_ring->nsegs);
563 			rx_ring->desc = NULL;
564 		}
565 		bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
566 		rx_ring->map = NULL;
567 	}
568 
569 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
570 		rx_data = &rx_ring->rx_data[i];
571 
572 		if (rx_data->m != NULL) {
573 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
574 			m_freem(rx_data->m);
575 			rx_data->m = NULL;
576 		}
577 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
578 		rx_data->map = NULL;
579 	}
580 
581 	splx(s);
582 }
583 
584 int
585 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid)
586 {
587 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
588 	struct rtwn_tx_data *tx_data;
589 	int i = 0, error = 0;
590 
591 	error = bus_dmamap_create(sc->sc_dmat,
592 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1,
593 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0,
594 	    BUS_DMA_NOWAIT, &tx_ring->map);
595 	if (error != 0) {
596 		printf("%s: could not create tx ring DMA map\n",
597 		    sc->sc_dev.dv_xname);
598 		goto fail;
599 	}
600 
601 	error = bus_dmamem_alloc(sc->sc_dmat,
602 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0,
603 	    &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
604 	if (error != 0) {
605 		printf("%s: could not allocate tx ring DMA memory\n",
606 		    sc->sc_dev.dv_xname);
607 		goto fail;
608 	}
609 
610 	error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
611 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT,
612 	    (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
613 	if (error != 0) {
614 		bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
615 		printf("%s: can't map tx ring DMA memory\n",
616 		    sc->sc_dev.dv_xname);
617 		goto fail;
618 	}
619 
620 	error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
621 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL,
622 	    BUS_DMA_NOWAIT);
623 	if (error != 0) {
624 		printf("%s: could not load tx ring DMA map\n",
625 		    sc->sc_dev.dv_xname);
626 		goto fail;
627 	}
628 
629 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
630 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
631 
632 		/* setup tx desc */
633 		desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
634 		  + sizeof(struct r92c_tx_desc_pci)
635 		  * ((i + 1) % RTWN_TX_LIST_COUNT));
636 
637 		tx_data = &tx_ring->tx_data[i];
638 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
639 		    0, BUS_DMA_NOWAIT, &tx_data->map);
640 		if (error != 0) {
641 			printf("%s: could not create tx buf DMA map\n",
642 			    sc->sc_dev.dv_xname);
643 			goto fail;
644 		}
645 		tx_data->m = NULL;
646 		tx_data->ni = NULL;
647 	}
648 fail:
649 	if (error != 0)
650 		rtwn_free_tx_list(sc, qid);
651 	return (error);
652 }
653 
654 void
655 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid)
656 {
657 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
658 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
659 	int i;
660 
661 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
662 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
663 		struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
664 
665 		memset(desc, 0, sizeof(*desc) -
666 		    (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) +
667 		    sizeof(desc->nextdescaddr)));
668 
669 		if (tx_data->m != NULL) {
670 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
671 			m_freem(tx_data->m);
672 			tx_data->m = NULL;
673 			ieee80211_release_node(ic, tx_data->ni);
674 			tx_data->ni = NULL;
675 		}
676 	}
677 
678 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
679 	    BUS_DMASYNC_POSTWRITE);
680 
681 	sc->qfullmsk &= ~(1 << qid);
682 	tx_ring->queued = 0;
683 	tx_ring->cur = 0;
684 }
685 
686 void
687 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid)
688 {
689 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
690 	struct rtwn_tx_data *tx_data;
691 	int i;
692 
693 	if (tx_ring->map != NULL) {
694 		if (tx_ring->desc != NULL) {
695 			bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
696 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
697 			    sizeof (struct r92c_tx_desc_pci) *
698 			    RTWN_TX_LIST_COUNT);
699 			bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
700 		}
701 		bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
702 	}
703 
704 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
705 		tx_data = &tx_ring->tx_data[i];
706 
707 		if (tx_data->m != NULL) {
708 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
709 			m_freem(tx_data->m);
710 			tx_data->m = NULL;
711 		}
712 		bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
713 	}
714 
715 	sc->qfullmsk &= ~(1 << qid);
716 	tx_ring->queued = 0;
717 	tx_ring->cur = 0;
718 }
719 
720 void
721 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val)
722 {
723 	struct rtwn_pci_softc *sc = cookie;
724 
725 	bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val);
726 }
727 
728 void
729 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val)
730 {
731 	struct rtwn_pci_softc *sc = cookie;
732 
733 	val = htole16(val);
734 	bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val);
735 }
736 
737 void
738 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val)
739 {
740 	struct rtwn_pci_softc *sc = cookie;
741 
742 	val = htole32(val);
743 	bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val);
744 }
745 
746 uint8_t
747 rtwn_pci_read_1(void *cookie, uint16_t addr)
748 {
749 	struct rtwn_pci_softc *sc = cookie;
750 
751 	return bus_space_read_1(sc->sc_st, sc->sc_sh, addr);
752 }
753 
754 uint16_t
755 rtwn_pci_read_2(void *cookie, uint16_t addr)
756 {
757 	struct rtwn_pci_softc *sc = cookie;
758 	uint16_t val;
759 
760 	val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr);
761 	return le16toh(val);
762 }
763 
764 uint32_t
765 rtwn_pci_read_4(void *cookie, uint16_t addr)
766 {
767 	struct rtwn_pci_softc *sc = cookie;
768 	uint32_t val;
769 
770 	val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr);
771 	return le32toh(val);
772 }
773 
774 void
775 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc,
776     struct rtwn_rx_data *rx_data, int desc_idx)
777 {
778 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
779 	struct ifnet *ifp = &ic->ic_if;
780 	struct ieee80211_rxinfo rxi;
781 	struct ieee80211_frame *wh;
782 	struct ieee80211_node *ni;
783 	struct r92c_rx_phystat *phy = NULL;
784 	uint32_t rxdw0, rxdw3;
785 	struct mbuf *m, *m1;
786 	uint8_t rate;
787 	int8_t rssi = 0;
788 	int infosz, pktlen, shift, error;
789 
790 	rxdw0 = letoh32(rx_desc->rxdw0);
791 	rxdw3 = letoh32(rx_desc->rxdw3);
792 
793 	if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
794 		/*
795 		 * This should not happen since we setup our Rx filter
796 		 * to not receive these frames.
797 		 */
798 		ifp->if_ierrors++;
799 		return;
800 	}
801 
802 	pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
803 	if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
804 		ifp->if_ierrors++;
805 		return;
806 	}
807 
808 	rate = MS(rxdw3, R92C_RXDW3_RATE);
809 	infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
810 	if (infosz > sizeof(struct r92c_rx_phystat))
811 		infosz = sizeof(struct r92c_rx_phystat);
812 	shift = MS(rxdw0, R92C_RXDW0_SHIFT);
813 
814 	/* Get RSSI from PHY status descriptor if present. */
815 	if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
816 		phy = mtod(rx_data->m, struct r92c_rx_phystat *);
817 		rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
818 		/* Update our average RSSI. */
819 		rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
820 	}
821 
822 	DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
823 	    pktlen, rate, infosz, shift, rssi));
824 
825 	m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
826 	if (m1 == NULL) {
827 		ifp->if_ierrors++;
828 		return;
829 	}
830 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
831 	error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
832 	    mtod(m1, void *), MCLBYTES, NULL,
833 	    BUS_DMA_NOWAIT | BUS_DMA_READ);
834 	if (error != 0) {
835 		m_freem(m1);
836 
837 		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
838 		    rx_data->m, BUS_DMA_NOWAIT))
839 			panic("%s: could not load old RX mbuf",
840 			    sc->sc_dev.dv_xname);
841 
842 		/* Physical address may have changed. */
843 		rtwn_setup_rx_desc(sc, rx_desc,
844 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx);
845 
846 		ifp->if_ierrors++;
847 		return;
848 	}
849 
850 	/* Finalize mbuf. */
851 	m = rx_data->m;
852 	rx_data->m = m1;
853 	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
854 
855 	/* Update RX descriptor. */
856 	rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr,
857 	    MCLBYTES, desc_idx);
858 
859 	/* Get ieee80211 frame header. */
860 	if (rxdw0 & R92C_RXDW0_PHYST)
861 		m_adj(m, infosz + shift);
862 	else
863 		m_adj(m, shift);
864 	wh = mtod(m, struct ieee80211_frame *);
865 
866 #if NBPFILTER > 0
867 	if (__predict_false(sc->sc_drvbpf != NULL)) {
868 		struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
869 		struct mbuf mb;
870 
871 		tap->wr_flags = 0;
872 		/* Map HW rate index to 802.11 rate. */
873 		tap->wr_flags = 2;
874 		if (!(rxdw3 & R92C_RXDW3_HT)) {
875 			switch (rate) {
876 			/* CCK. */
877 			case  0: tap->wr_rate =   2; break;
878 			case  1: tap->wr_rate =   4; break;
879 			case  2: tap->wr_rate =  11; break;
880 			case  3: tap->wr_rate =  22; break;
881 			/* OFDM. */
882 			case  4: tap->wr_rate =  12; break;
883 			case  5: tap->wr_rate =  18; break;
884 			case  6: tap->wr_rate =  24; break;
885 			case  7: tap->wr_rate =  36; break;
886 			case  8: tap->wr_rate =  48; break;
887 			case  9: tap->wr_rate =  72; break;
888 			case 10: tap->wr_rate =  96; break;
889 			case 11: tap->wr_rate = 108; break;
890 			}
891 		} else if (rate >= 12) {	/* MCS0~15. */
892 			/* Bit 7 set means HT MCS instead of rate. */
893 			tap->wr_rate = 0x80 | (rate - 12);
894 		}
895 		tap->wr_dbm_antsignal = rssi;
896 		tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq);
897 		tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags);
898 
899 		mb.m_data = (caddr_t)tap;
900 		mb.m_len = sc->sc_rxtap_len;
901 		mb.m_next = m;
902 		mb.m_nextpkt = NULL;
903 		mb.m_type = 0;
904 		mb.m_flags = 0;
905 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
906 	}
907 #endif
908 
909 	ni = ieee80211_find_rxnode(ic, wh);
910 	rxi.rxi_flags = 0;
911 	rxi.rxi_rssi = rssi;
912 	rxi.rxi_tstamp = 0;	/* Unused. */
913 	ieee80211_input(ifp, m, ni, &rxi);
914 	/* Node is no longer needed. */
915 	ieee80211_release_node(ic, ni);
916 }
917 
918 int
919 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni)
920 {
921 	struct rtwn_pci_softc *sc = cookie;
922 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
923 	struct ieee80211_frame *wh;
924 	struct ieee80211_key *k = NULL;
925 	struct rtwn_tx_ring *tx_ring;
926 	struct rtwn_tx_data *data;
927 	struct r92c_tx_desc_pci *txd;
928 	uint16_t qos;
929 	uint8_t raid, type, tid, qid;
930 	int hasqos, error;
931 
932 	wh = mtod(m, struct ieee80211_frame *);
933 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
934 
935 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
936 		k = ieee80211_get_txkey(ic, wh, ni);
937 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
938 			return (ENOBUFS);
939 		wh = mtod(m, struct ieee80211_frame *);
940 	}
941 
942 	if ((hasqos = ieee80211_has_qos(wh))) {
943 		qos = ieee80211_get_qos(wh);
944 		tid = qos & IEEE80211_QOS_TID;
945 		qid = ieee80211_up_to_ac(ic, tid);
946 	} else if (type != IEEE80211_FC0_TYPE_DATA) {
947 		qid = RTWN_VO_QUEUE;
948 	} else
949 		qid = RTWN_BE_QUEUE;
950 
951 	/* Grab a Tx buffer from the ring. */
952 	tx_ring = &sc->tx_ring[qid];
953 	data = &tx_ring->tx_data[tx_ring->cur];
954 	if (data->m != NULL) {
955 		m_freem(m);
956 		return (ENOBUFS);
957 	}
958 
959 	/* Fill Tx descriptor. */
960 	txd = &tx_ring->desc[tx_ring->cur];
961 	if (htole32(txd->txdw0) & R92C_RXDW0_OWN) {
962 		m_freem(m);
963 		return (ENOBUFS);
964 	}
965 	txd->txdw0 = htole32(
966 	    SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) |
967 	    SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
968 	    R92C_TXDW0_FSG | R92C_TXDW0_LSG);
969 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
970 		txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
971 
972 	txd->txdw1 = 0;
973 #ifdef notyet
974 	if (k != NULL) {
975 		switch (k->k_cipher) {
976 		case IEEE80211_CIPHER_WEP40:
977 		case IEEE80211_CIPHER_WEP104:
978 		case IEEE80211_CIPHER_TKIP:
979 			cipher = R92C_TXDW1_CIPHER_RC4;
980 			break;
981 		case IEEE80211_CIPHER_CCMP:
982 			cipher = R92C_TXDW1_CIPHER_AES;
983 			break;
984 		default:
985 			cipher = R92C_TXDW1_CIPHER_NONE;
986 		}
987 		txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher));
988 	}
989 #endif
990 	txd->txdw4 = 0;
991 	txd->txdw5 = 0;
992 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
993 	    type == IEEE80211_FC0_TYPE_DATA) {
994 		if (ic->ic_curmode == IEEE80211_MODE_11B ||
995 		    (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B))
996 			raid = R92C_RAID_11B;
997 		else
998 			raid = R92C_RAID_11BG;
999 		txd->txdw1 |= htole32(
1000 		    SM(R92C_TXDW1_MACID, R92C_MACID_BSS) |
1001 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1002 		    SM(R92C_TXDW1_RAID, raid) |
1003 		    R92C_TXDW1_AGGBK);
1004 
1005 		/* Request TX status report for AMRR. */
1006 		txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
1007 
1008 		if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
1009 			txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1010 			    R92C_TXDW4_HWRTSEN);
1011 		} else if (ic->ic_flags & IEEE80211_F_USEPROT) {
1012 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1013 				txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
1014 				    R92C_TXDW4_HWRTSEN);
1015 			} else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1016 				txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1017 				    R92C_TXDW4_HWRTSEN);
1018 			}
1019 		}
1020 
1021 		if (ic->ic_curmode == IEEE80211_MODE_11B)
1022 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0));
1023 		else
1024 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 3));
1025 		txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf));
1026 
1027 		/* Use AMMR rate for data. */
1028 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1029 		if (ic->ic_fixed_rate != -1)
1030 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1031 			    ic->ic_fixed_rate));
1032 		else
1033 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1034 			    ni->ni_txrate));
1035 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f));
1036 	} else {
1037 		txd->txdw1 |= htole32(
1038 		    SM(R92C_TXDW1_MACID, 0) |
1039 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
1040 		    SM(R92C_TXDW1_RAID, R92C_RAID_11B));
1041 
1042 		/* Force CCK1. */
1043 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1044 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
1045 	}
1046 	/* Set sequence number (already little endian). */
1047 	txd->txdseq = *(uint16_t *)wh->i_seq;
1048 
1049 	if (!hasqos) {
1050 		/* Use HW sequence numbering for non-QoS frames. */
1051 		txd->txdw4  |= htole32(R92C_TXDW4_HWSEQ);
1052 		txd->txdseq |= htole16(0x8000);		/* WTF? */
1053 	} else
1054 		txd->txdw4 |= htole32(R92C_TXDW4_QOS);
1055 
1056 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1057 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1058 	if (error && error != EFBIG) {
1059 		printf("%s: can't map mbuf (error %d)\n",
1060 		    sc->sc_dev.dv_xname, error);
1061 		m_freem(m);
1062 		return error;
1063 	}
1064 	if (error != 0) {
1065 		/* Too many DMA segments, linearize mbuf. */
1066 		if (m_defrag(m, M_DONTWAIT)) {
1067 			m_freem(m);
1068 			return ENOBUFS;
1069 		}
1070 
1071 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1072 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1073 		if (error != 0) {
1074 			printf("%s: can't map mbuf (error %d)\n",
1075 			    sc->sc_dev.dv_xname, error);
1076 			m_freem(m);
1077 			return error;
1078 		}
1079 	}
1080 
1081 	txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr);
1082 	txd->txbufsize = htole16(m->m_pkthdr.len);
1083 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
1084 	    BUS_SPACE_BARRIER_WRITE);
1085 	txd->txdw0 |= htole32(R92C_TXDW0_OWN);
1086 
1087 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1088 	    BUS_DMASYNC_POSTWRITE);
1089 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
1090 	    BUS_DMASYNC_POSTWRITE);
1091 
1092 	data->m = m;
1093 	data->ni = ni;
1094 
1095 #if NBPFILTER > 0
1096 	if (__predict_false(sc->sc_drvbpf != NULL)) {
1097 		struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
1098 		struct mbuf mb;
1099 
1100 		tap->wt_flags = 0;
1101 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1102 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1103 
1104 		mb.m_data = (caddr_t)tap;
1105 		mb.m_len = sc->sc_txtap_len;
1106 		mb.m_next = m;
1107 		mb.m_nextpkt = NULL;
1108 		mb.m_type = 0;
1109 		mb.m_flags = 0;
1110 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1111 	}
1112 #endif
1113 
1114 	tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
1115 	tx_ring->queued++;
1116 
1117 	if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
1118 		sc->qfullmsk |= (1 << qid);
1119 
1120 	/* Kick TX. */
1121 	rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid));
1122 
1123 	return (0);
1124 }
1125 
1126 void
1127 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid)
1128 {
1129 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1130 	struct ifnet *ifp = &ic->ic_if;
1131 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
1132 	struct rtwn_tx_data *tx_data;
1133 	struct r92c_tx_desc_pci *tx_desc;
1134 	int i;
1135 
1136 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1137 	    BUS_DMASYNC_POSTREAD);
1138 
1139 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
1140 		tx_data = &tx_ring->tx_data[i];
1141 		if (tx_data->m == NULL)
1142 			continue;
1143 
1144 		tx_desc = &tx_ring->desc[i];
1145 		if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN)
1146 			continue;
1147 
1148 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
1149 		m_freem(tx_data->m);
1150 		tx_data->m = NULL;
1151 		ieee80211_release_node(ic, tx_data->ni);
1152 		tx_data->ni = NULL;
1153 
1154 		sc->sc_sc.sc_tx_timer = 0;
1155 		tx_ring->queued--;
1156 
1157 		rtwn_poll_c2h_events(sc);
1158 	}
1159 
1160 	if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
1161 		sc->qfullmsk &= ~(1 << qid);
1162 
1163 	if (sc->qfullmsk == 0) {
1164 		ifq_clr_oactive(&ifp->if_snd);
1165 		(*ifp->if_start)(ifp);
1166 	}
1167 }
1168 
1169 int
1170 rtwn_alloc_buffers(void *cookie)
1171 {
1172 	/* Tx/Rx buffers were already allocated in rtwn_pci_attach() */
1173 	return (0);
1174 }
1175 
1176 int
1177 rtwn_pci_init(void *cookie)
1178 {
1179 	struct rtwn_pci_softc *sc = cookie;
1180 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
1181 	return (0);
1182 }
1183 
1184 void
1185 rtwn_pci_stop(void *cookie)
1186 {
1187 	struct rtwn_pci_softc *sc = cookie;
1188 	uint16_t reg;
1189 	int i, s;
1190 
1191 	s = splnet();
1192 
1193 	/* Disable interrupts. */
1194 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1195 
1196 	/* Stop hardware. */
1197 	rtwn_pci_write_1(sc, R92C_TXPAUSE, 0xff);
1198 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1199 	reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN);
1200 	reg |= R92C_SYS_FUNC_EN_BB_GLB_RST;
1201 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1202 	reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST;
1203 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1204 	reg = rtwn_pci_read_2(sc, R92C_CR);
1205 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1206 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1207 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1208 	    R92C_CR_ENSEC);
1209 	rtwn_pci_write_2(sc, R92C_CR, reg);
1210 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1211 		rtwn_fw_reset(&sc->sc_sc);
1212 	/* TODO: linux does additional btcoex stuff here */
1213 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */
1214 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */
1215 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */
1216 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x0e);
1217 	rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN);
1218 
1219 	for (i = 0; i < RTWN_NTXQUEUES; i++)
1220 		rtwn_reset_tx_list(sc, i);
1221 	rtwn_reset_rx_list(sc);
1222 
1223 	splx(s);
1224 }
1225 
1226 int
1227 rtwn_intr(void *xsc)
1228 {
1229 	struct rtwn_pci_softc *sc = xsc;
1230 	u_int32_t status;
1231 	int i;
1232 
1233 	status = rtwn_pci_read_4(sc, R92C_HISR);
1234 	if (status == 0 || status == 0xffffffff)
1235 		return (0);
1236 
1237 	/* Disable interrupts. */
1238 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1239 
1240 	/* Ack interrupts. */
1241 	rtwn_pci_write_4(sc, R92C_HISR, status);
1242 
1243 	/* Vendor driver treats RX errors like ROK... */
1244 	if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) {
1245 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1246 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1247 		    BUS_DMASYNC_POSTREAD);
1248 
1249 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1250 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1251 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1252 
1253 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1254 				continue;
1255 
1256 			rtwn_rx_frame(sc, rx_desc, rx_data, i);
1257 		}
1258 	}
1259 
1260 	if (status & R92C_IMR_BDOK)
1261 		rtwn_tx_done(sc, RTWN_BEACON_QUEUE);
1262 	if (status & R92C_IMR_HIGHDOK)
1263 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1264 	if (status & R92C_IMR_MGNTDOK)
1265 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1266 	if (status & R92C_IMR_BKDOK)
1267 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1268 	if (status & R92C_IMR_BEDOK)
1269 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1270 	if (status & R92C_IMR_VIDOK)
1271 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1272 	if (status & R92C_IMR_VODOK)
1273 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1274 
1275 	/* Enable interrupts. */
1276 	rtwn_pci_write_4(sc, R92C_HIMR, RTWN_INT_ENABLE);
1277 
1278 	return (1);
1279 }
1280 
1281 int
1282 rtwn_is_oactive(void *cookie)
1283 {
1284 	struct rtwn_pci_softc *sc = cookie;
1285 
1286 	return (sc->qfullmsk != 0);
1287 }
1288 
1289 int
1290 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data)
1291 {
1292 	int ntries;
1293 
1294 	rtwn_pci_write_4(sc, R92C_LLT_INIT,
1295 	    SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
1296 	    SM(R92C_LLT_INIT_ADDR, addr) |
1297 	    SM(R92C_LLT_INIT_DATA, data));
1298 	/* Wait for write operation to complete. */
1299 	for (ntries = 0; ntries < 20; ntries++) {
1300 		if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
1301 		    R92C_LLT_INIT_OP_NO_ACTIVE)
1302 			return (0);
1303 		DELAY(5);
1304 	}
1305 	return (ETIMEDOUT);
1306 }
1307 
1308 int
1309 rtwn_llt_init(struct rtwn_pci_softc *sc)
1310 {
1311 	int i, error;
1312 
1313 	/* Reserve pages [0; R92C_TX_PAGE_COUNT]. */
1314 	for (i = 0; i < R92C_TX_PAGE_COUNT; i++) {
1315 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1316 			return (error);
1317 	}
1318 	/* NB: 0xff indicates end-of-list. */
1319 	if ((error = rtwn_llt_write(sc, i, 0xff)) != 0)
1320 		return (error);
1321 	/*
1322 	 * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1]
1323 	 * as ring buffer.
1324 	 */
1325 	for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) {
1326 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1327 			return (error);
1328 	}
1329 	/* Make the last page point to the beginning of the ring buffer. */
1330 	error = rtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1);
1331 	return (error);
1332 }
1333 
1334 int
1335 rtwn_power_on(void *cookie)
1336 {
1337 	struct rtwn_pci_softc *sc = cookie;
1338 	uint32_t reg;
1339 	int ntries;
1340 
1341 	/* Wait for autoload done bit. */
1342 	for (ntries = 0; ntries < 1000; ntries++) {
1343 		if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) &
1344 		    R92C_APS_FSMCO_PFM_ALDN)
1345 			break;
1346 		DELAY(5);
1347 	}
1348 	if (ntries == 1000) {
1349 		printf("%s: timeout waiting for chip autoload\n",
1350 		    sc->sc_dev.dv_xname);
1351 		return (ETIMEDOUT);
1352 	}
1353 
1354 	/* Unlock ISO/CLK/Power control register. */
1355 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1356 
1357 	/* TODO: check if we need this for 8188CE */
1358 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1359 		/* bt coex */
1360 		reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO);
1361 		reg |= (R92C_APS_FSMCO_SOP_ABG |
1362 			R92C_APS_FSMCO_SOP_AMB |
1363 			R92C_APS_FSMCO_XOP_BTCK);
1364 		rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg);
1365 	}
1366 
1367 	/* Move SPS into PWM mode. */
1368 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b);
1369 	DELAY(100);
1370 
1371 	/* Set low byte to 0x0f, leave others unchanged. */
1372 	rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL,
1373 	    (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f);
1374 
1375 	/* TODO: check if we need this for 8188CE */
1376 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1377 		/* bt coex */
1378 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL);
1379 		reg &= (~0x00024800); /* XXX magic from linux */
1380 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg);
1381 	}
1382 
1383 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1384 	  (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) |
1385 	  R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR);
1386 	DELAY(200);
1387 
1388 	/* TODO: linux does additional btcoex stuff here */
1389 
1390 	/* Auto enable WLAN. */
1391 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1392 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1393 	for (ntries = 0; ntries < 1000; ntries++) {
1394 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1395 		    R92C_APS_FSMCO_APFM_ONMAC))
1396 			break;
1397 		DELAY(5);
1398 	}
1399 	if (ntries == 1000) {
1400 		printf("%s: timeout waiting for MAC auto ON\n",
1401 		    sc->sc_dev.dv_xname);
1402 		return (ETIMEDOUT);
1403 	}
1404 
1405 	/* Enable radio, GPIO and LED functions. */
1406 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1407 	    R92C_APS_FSMCO_AFSM_PCIE |
1408 	    R92C_APS_FSMCO_PDN_EN |
1409 	    R92C_APS_FSMCO_PFM_ALDN);
1410 	/* Release RF digital isolation. */
1411 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1412 	    rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
1413 
1414 	if (sc->sc_sc.chip & RTWN_CHIP_92C)
1415 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77);
1416 	else
1417 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22);
1418 
1419 	rtwn_pci_write_4(sc, R92C_INT_MIG, 0);
1420 
1421 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1422 		/* bt coex */
1423 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2);
1424 		reg &= 0xfd; /* XXX magic from linux */
1425 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg);
1426 	}
1427 
1428 	rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG,
1429 	    rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL);
1430 
1431 	reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL);
1432 	if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) {
1433 		printf("%s: radio is disabled by hardware switch\n",
1434 		    sc->sc_dev.dv_xname);
1435 		return (EPERM);	/* :-) */
1436 	}
1437 
1438 	/* Initialize MAC. */
1439 	reg = rtwn_pci_read_1(sc, R92C_APSD_CTRL);
1440 	rtwn_pci_write_1(sc, R92C_APSD_CTRL,
1441 	    rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
1442 	for (ntries = 0; ntries < 200; ntries++) {
1443 		if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) &
1444 		    R92C_APSD_CTRL_OFF_STATUS))
1445 			break;
1446 		DELAY(500);
1447 	}
1448 	if (ntries == 200) {
1449 		printf("%s: timeout waiting for MAC initialization\n",
1450 		    sc->sc_dev.dv_xname);
1451 		return (ETIMEDOUT);
1452 	}
1453 
1454 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1455 	reg = rtwn_pci_read_2(sc, R92C_CR);
1456 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1457 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1458 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1459 	    R92C_CR_ENSEC;
1460 	rtwn_pci_write_2(sc, R92C_CR, reg);
1461 
1462 	rtwn_pci_write_1(sc, 0xfe10, 0x19);
1463 
1464 	return (0);
1465 }
1466 
1467 int
1468 rtwn_dma_init(void *cookie)
1469 {
1470 	struct rtwn_pci_softc *sc = cookie;
1471 	uint32_t reg;
1472 	int error;
1473 
1474 	/* Initialize LLT table. */
1475 	error = rtwn_llt_init(sc);
1476 	if (error != 0)
1477 		return error;
1478 
1479 	/* Set number of pages for normal priority queue. */
1480 	rtwn_pci_write_2(sc, R92C_RQPN_NPQ, 0);
1481 	rtwn_pci_write_4(sc, R92C_RQPN,
1482 	    /* Set number of pages for public queue. */
1483 	    SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) |
1484 	    /* Set number of pages for high priority queue. */
1485 	    SM(R92C_RQPN_HPQ, R92C_HPQ_NPAGES) |
1486 	    /* Set number of pages for low priority queue. */
1487 	    SM(R92C_RQPN_LPQ, R92C_LPQ_NPAGES) |
1488 	    /* Load values. */
1489 	    R92C_RQPN_LD);
1490 
1491 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY);
1492 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY);
1493 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD,
1494 	    R92C_TX_PAGE_BOUNDARY);
1495 	rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY);
1496 	rtwn_pci_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY);
1497 
1498 	reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL);
1499 	reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
1500 	reg |= 0xF771;
1501 	rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg);
1502 
1503 	rtwn_pci_write_4(sc, R92C_TCR,
1504 	    R92C_TCR_CFENDFORM | (1 << 12) | (1 << 13));
1505 
1506 	/* Configure Tx DMA. */
1507 	rtwn_pci_write_4(sc, R92C_BKQ_DESA,
1508 		sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
1509 	rtwn_pci_write_4(sc, R92C_BEQ_DESA,
1510 		sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
1511 	rtwn_pci_write_4(sc, R92C_VIQ_DESA,
1512 		sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
1513 	rtwn_pci_write_4(sc, R92C_VOQ_DESA,
1514 		sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
1515 	rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
1516 		sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
1517 	rtwn_pci_write_4(sc, R92C_MGQ_DESA,
1518 		sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
1519 	rtwn_pci_write_4(sc, R92C_HQ_DESA,
1520 		sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
1521 
1522 	/* Configure Rx DMA. */
1523 	rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr);
1524 
1525 	/* Set Tx/Rx transfer page boundary. */
1526 	rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff);
1527 
1528 	/* Set Tx/Rx transfer page size. */
1529 	rtwn_pci_write_1(sc, R92C_PBP,
1530 	    SM(R92C_PBP_PSRX, R92C_PBP_128) |
1531 	    SM(R92C_PBP_PSTX, R92C_PBP_128));
1532 
1533 	return (0);
1534 }
1535 
1536 int
1537 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len)
1538 {
1539 	struct rtwn_pci_softc *sc = cookie;
1540 	uint32_t reg;
1541 	int off, mlen, error = 0, i;
1542 
1543 	reg = rtwn_pci_read_4(sc, R92C_MCUFWDL);
1544 	reg = RW(reg, R92C_MCUFWDL_PAGE, page);
1545 	rtwn_pci_write_4(sc, R92C_MCUFWDL, reg);
1546 
1547 	DELAY(5);
1548 
1549 	off = R92C_FW_START_ADDR;
1550 	while (len > 0) {
1551 		if (len > 196)
1552 			mlen = 196;
1553 		else if (len > 4)
1554 			mlen = 4;
1555 		else
1556 			mlen = 1;
1557 		for (i = 0; i < mlen; i++)
1558 			rtwn_pci_write_1(sc, off++, buf[i]);
1559 		buf += mlen;
1560 		len -= mlen;
1561 	}
1562 
1563 	return (error);
1564 }
1565 
1566 int
1567 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len)
1568 {
1569 	struct rtwn_pci_softc *sc = cookie;
1570 	const char *name;
1571 	int error;
1572 
1573 	if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) ==
1574 	    RTWN_CHIP_UMC_A_CUT)
1575 		name = "rtwn-rtl8192cfwU";
1576 	else
1577 		name = "rtwn-rtl8192cfwU_B";
1578 
1579 	error = loadfirmware(name, fw, len);
1580 	if (error)
1581 		printf("%s: could not read firmware %s (error %d)\n",
1582 		    sc->sc_dev.dv_xname, name, error);
1583 	return (error);
1584 }
1585 
1586 void
1587 rtwn_mac_init(void *cookie)
1588 {
1589 	struct rtwn_pci_softc *sc = cookie;
1590 	int i;
1591 
1592 	/* Write MAC initialization values. */
1593 	for (i = 0; i < nitems(rtl8192ce_mac); i++)
1594 		rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg,
1595 		    rtl8192ce_mac[i].val);
1596 }
1597 
1598 void
1599 rtwn_bb_init(void *cookie)
1600 {
1601 	struct rtwn_pci_softc *sc = cookie;
1602 	const struct r92c_bb_prog *prog;
1603 	uint32_t reg;
1604 	int i;
1605 
1606 	/* Enable BB and RF. */
1607 	rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN,
1608 	    rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) |
1609 	    R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
1610 	    R92C_SYS_FUNC_EN_DIO_RF);
1611 
1612 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
1613 
1614 	rtwn_pci_write_1(sc, R92C_RF_CTRL,
1615 	    R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
1616 
1617 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1618 	    R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA |
1619 	    R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST |
1620 	    R92C_SYS_FUNC_EN_BBRSTB);
1621 
1622 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
1623 
1624 	rtwn_pci_write_4(sc, R92C_LEDCFG0,
1625 	    rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000);
1626 
1627 	/* Select BB programming. */
1628 	prog = (sc->sc_sc.chip & RTWN_CHIP_92C) ?
1629 	    &rtl8192ce_bb_prog_2t : &rtl8192ce_bb_prog_1t;
1630 
1631 	/* Write BB initialization values. */
1632 	for (i = 0; i < prog->count; i++) {
1633 		rtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
1634 		DELAY(1);
1635 	}
1636 
1637 	if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) {
1638 		/* 8192C 1T only configuration. */
1639 		reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO);
1640 		reg = (reg & ~0x00000003) | 0x2;
1641 		rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
1642 
1643 		reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO);
1644 		reg = (reg & ~0x00300033) | 0x00200022;
1645 		rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
1646 
1647 		reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING);
1648 		reg = (reg & ~0xff000000) | 0x45 << 24;
1649 		rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
1650 
1651 		reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
1652 		reg = (reg & ~0x000000ff) | 0x23;
1653 		rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
1654 
1655 		reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
1656 		reg = (reg & ~0x00000030) | 1 << 4;
1657 		rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
1658 
1659 		reg = rtwn_bb_read(sc, 0xe74);
1660 		reg = (reg & ~0x0c000000) | 2 << 26;
1661 		rtwn_bb_write(sc, 0xe74, reg);
1662 		reg = rtwn_bb_read(sc, 0xe78);
1663 		reg = (reg & ~0x0c000000) | 2 << 26;
1664 		rtwn_bb_write(sc, 0xe78, reg);
1665 		reg = rtwn_bb_read(sc, 0xe7c);
1666 		reg = (reg & ~0x0c000000) | 2 << 26;
1667 		rtwn_bb_write(sc, 0xe7c, reg);
1668 		reg = rtwn_bb_read(sc, 0xe80);
1669 		reg = (reg & ~0x0c000000) | 2 << 26;
1670 		rtwn_bb_write(sc, 0xe80, reg);
1671 		reg = rtwn_bb_read(sc, 0xe88);
1672 		reg = (reg & ~0x0c000000) | 2 << 26;
1673 		rtwn_bb_write(sc, 0xe88, reg);
1674 	}
1675 
1676 	/* Write AGC values. */
1677 	for (i = 0; i < prog->agccount; i++) {
1678 		rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
1679 		    prog->agcvals[i]);
1680 		DELAY(1);
1681 	}
1682 
1683 	if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) &
1684 	    R92C_HSSI_PARAM2_CCK_HIPWR)
1685 		sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR;
1686 }
1687 
1688 void
1689 rtwn_calib_to(void *arg)
1690 {
1691 	struct rtwn_pci_softc *sc = arg;
1692 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1693 	int s;
1694 
1695 	s = splnet();
1696 	ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn);
1697 	splx(s);
1698 
1699 	rtwn_calib(&sc->sc_sc);
1700 }
1701 
1702 void
1703 rtwn_next_calib(void *cookie)
1704 {
1705 	struct rtwn_pci_softc *sc = cookie;
1706 
1707 	timeout_add_sec(&sc->calib_to, 2);
1708 }
1709 
1710 void
1711 rtwn_cancel_calib(void *cookie)
1712 {
1713 	struct rtwn_pci_softc *sc = cookie;
1714 
1715 	if (timeout_initialized(&sc->calib_to))
1716 		timeout_del(&sc->calib_to);
1717 }
1718 
1719 void
1720 rtwn_scan_to(void *arg)
1721 {
1722 	struct rtwn_pci_softc *sc = arg;
1723 
1724 	rtwn_next_scan(&sc->sc_sc);
1725 }
1726 
1727 void
1728 rtwn_pci_next_scan(void *cookie)
1729 {
1730 	struct rtwn_pci_softc *sc = cookie;
1731 
1732 	timeout_add_msec(&sc->scan_to, 200);
1733 }
1734 
1735 void
1736 rtwn_cancel_scan(void *cookie)
1737 {
1738 	struct rtwn_pci_softc *sc = cookie;
1739 
1740 	if (timeout_initialized(&sc->scan_to))
1741 		timeout_del(&sc->scan_to);
1742 }
1743 
1744 void
1745 rtwn_wait_async(void *cookie)
1746 {
1747 	/* nothing to do */
1748 }
1749 
1750 void
1751 rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len)
1752 {
1753 	struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf;
1754 	int packets, tries, tx_ok, drop, expire, over;
1755 
1756 	if (len != sizeof(*rpt))
1757 		return;
1758 
1759 	packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM);
1760 	tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT);
1761 	tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK);
1762 	drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP);
1763 	expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE);
1764 	over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER);
1765 
1766 	if (packets > 0) {
1767 		sc->amn.amn_txcnt += packets;
1768 		if (!tx_ok || tries > 1 || drop || expire || over)
1769 			sc->amn.amn_retrycnt++;
1770 	}
1771 }
1772 
1773 void
1774 rtwn_poll_c2h_events(struct rtwn_pci_softc *sc)
1775 {
1776 	const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt);
1777 	uint8_t buf[R92C_C2H_MSG_MAX_LEN];
1778 	uint8_t id, len, status;
1779 	int i;
1780 
1781 	/* Read current status. */
1782 	status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR);
1783 	if (status == R92C_C2HEVT_HOST_CLOSE)
1784 		return;	/* nothing to do */
1785 
1786 	if (status == R92C_C2HEVT_FW_CLOSE) {
1787 		len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG);
1788 		id = MS(len, R92C_C2H_EVTB0_ID);
1789 		len = MS(len, R92C_C2H_EVTB0_LEN);
1790 
1791 		if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) {
1792 			memset(buf, 0, sizeof(buf));
1793 			for (i = 0; i < len; i++)
1794 				buf[i] = rtwn_pci_read_1(sc, off + i);
1795 			rtwn_tx_report(sc, buf, len);
1796 		} else
1797 			DPRINTF(("unhandled C2H event %d (%d bytes)\n",
1798 			    id, len));
1799 	}
1800 
1801 	/* Prepare for next event. */
1802 	rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE);
1803 }
1804