xref: /openbsd-src/sys/dev/pci/if_rtwn.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_rtwn.c,v 1.36 2019/09/12 12:55:07 stsp Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * PCI front-end for Realtek RTL8188CE/RTL8188EE/RTL8192CE/RTL8723AE driver.
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/task.h>
34 #include <sys/timeout.h>
35 #include <sys/conf.h>
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_amrr.h>
54 #include <net80211/ieee80211_radiotap.h>
55 
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59 
60 #include <dev/ic/r92creg.h>
61 #include <dev/ic/rtwnvar.h>
62 
63 /*
64  * Driver definitions.
65  */
66 
67 #define R92C_NPQ_NPAGES		0
68 #define R92C_PUBQ_NPAGES	176
69 #define R92C_HPQ_NPAGES		41
70 #define R92C_LPQ_NPAGES		28
71 #define R92C_TXPKTBUF_COUNT	256
72 #define R92C_TX_PAGE_COUNT	\
73 	(R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES)
74 #define R92C_TX_PAGE_BOUNDARY	(R92C_TX_PAGE_COUNT + 1)
75 #define R92C_MAX_RX_DMA_SIZE	0x2800
76 
77 #define R88E_NPQ_NPAGES		0
78 #define R88E_PUBQ_NPAGES	116
79 #define R88E_HPQ_NPAGES		41
80 #define R88E_LPQ_NPAGES		13
81 #define R88E_TXPKTBUF_COUNT	176
82 #define R88E_TX_PAGE_COUNT	\
83 	(R88E_PUBQ_NPAGES + R88E_HPQ_NPAGES + R88E_LPQ_NPAGES)
84 #define R88E_TX_PAGE_BOUNDARY	(R88E_TX_PAGE_COUNT + 1)
85 #define R88E_MAX_RX_DMA_SIZE	0x2600
86 
87 #define R23A_NPQ_NPAGES		0
88 #define R23A_PUBQ_NPAGES	189
89 #define R23A_HPQ_NPAGES		28
90 #define R23A_LPQ_NPAGES		28
91 #define R23A_TXPKTBUF_COUNT	256
92 #define R23A_TX_PAGE_COUNT	\
93 	(R23A_PUBQ_NPAGES + R23A_HPQ_NPAGES + R23A_LPQ_NPAGES)
94 #define R23A_TX_PAGE_BOUNDARY	(R23A_TX_PAGE_COUNT + 1)
95 #define R23A_MAX_RX_DMA_SIZE	0x2800
96 
97 #define RTWN_NTXQUEUES			9
98 #define RTWN_RX_LIST_COUNT		256
99 #define RTWN_TX_LIST_COUNT		256
100 
101 /* TX queue indices. */
102 #define RTWN_BK_QUEUE			0
103 #define RTWN_BE_QUEUE			1
104 #define RTWN_VI_QUEUE			2
105 #define RTWN_VO_QUEUE			3
106 #define RTWN_BEACON_QUEUE		4
107 #define RTWN_TXCMD_QUEUE		5
108 #define RTWN_MGNT_QUEUE			6
109 #define RTWN_HIGH_QUEUE			7
110 #define RTWN_HCCA_QUEUE			8
111 
112 struct rtwn_rx_radiotap_header {
113 	struct ieee80211_radiotap_header wr_ihdr;
114 	uint8_t		wr_flags;
115 	uint8_t		wr_rate;
116 	uint16_t	wr_chan_freq;
117 	uint16_t	wr_chan_flags;
118 	uint8_t		wr_dbm_antsignal;
119 } __packed;
120 
121 #define RTWN_RX_RADIOTAP_PRESENT			\
122 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
123 	 1 << IEEE80211_RADIOTAP_RATE |			\
124 	 1 << IEEE80211_RADIOTAP_CHANNEL |		\
125 	 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
126 
127 struct rtwn_tx_radiotap_header {
128 	struct ieee80211_radiotap_header wt_ihdr;
129 	uint8_t		wt_flags;
130 	uint16_t	wt_chan_freq;
131 	uint16_t	wt_chan_flags;
132 } __packed;
133 
134 #define RTWN_TX_RADIOTAP_PRESENT			\
135 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
136 	 1 << IEEE80211_RADIOTAP_CHANNEL)
137 
138 struct rtwn_rx_data {
139 	bus_dmamap_t		map;
140 	struct mbuf		*m;
141 };
142 
143 struct rtwn_rx_ring {
144 	struct r92c_rx_desc_pci	*desc;
145 	bus_dmamap_t		map;
146 	bus_dma_segment_t	seg;
147 	int			nsegs;
148 	struct rtwn_rx_data	rx_data[RTWN_RX_LIST_COUNT];
149 
150 };
151 struct rtwn_tx_data {
152 	bus_dmamap_t			map;
153 	struct mbuf			*m;
154 	struct ieee80211_node		*ni;
155 };
156 
157 struct rtwn_tx_ring {
158 	bus_dmamap_t		map;
159 	bus_dma_segment_t	seg;
160 	int			nsegs;
161 	struct r92c_tx_desc_pci	*desc;
162 	struct rtwn_tx_data	tx_data[RTWN_TX_LIST_COUNT];
163 	int			queued;
164 	int			cur;
165 };
166 
167 struct rtwn_pci_softc {
168 	struct device		sc_dev;
169 	struct rtwn_softc	sc_sc;
170 
171 	struct rtwn_rx_ring	rx_ring;
172 	struct rtwn_tx_ring	tx_ring[RTWN_NTXQUEUES];
173 	uint32_t		qfullmsk;
174 
175 	struct timeout		calib_to;
176 	struct timeout		scan_to;
177 
178 	/* PCI specific goo. */
179 	bus_dma_tag_t 		sc_dmat;
180 	pci_chipset_tag_t	sc_pc;
181 	pcitag_t		sc_tag;
182 	void			*sc_ih;
183 	bus_space_tag_t		sc_st;
184 	bus_space_handle_t	sc_sh;
185 	bus_size_t		sc_mapsize;
186 	int			sc_cap_off;
187 
188 	struct ieee80211_amrr		amrr;
189 	struct ieee80211_amrr_node	amn;
190 
191 #if NBPFILTER > 0
192 	caddr_t				sc_drvbpf;
193 
194 	union {
195 		struct rtwn_rx_radiotap_header th;
196 		uint8_t	pad[64];
197 	}				sc_rxtapu;
198 #define sc_rxtap	sc_rxtapu.th
199 	int				sc_rxtap_len;
200 
201 	union {
202 		struct rtwn_tx_radiotap_header th;
203 		uint8_t	pad[64];
204 	}				sc_txtapu;
205 #define sc_txtap	sc_txtapu.th
206 	int				sc_txtap_len;
207 #endif
208 };
209 
210 #ifdef RTWN_DEBUG
211 #define DPRINTF(x)	do { if (rtwn_debug) printf x; } while (0)
212 #define DPRINTFN(n, x)	do { if (rtwn_debug >= (n)) printf x; } while (0)
213 extern int rtwn_debug;
214 #else
215 #define DPRINTF(x)
216 #define DPRINTFN(n, x)
217 #endif
218 
219 /*
220  * PCI configuration space registers.
221  */
222 #define	RTWN_PCI_IOBA		0x10	/* i/o mapped base */
223 #define	RTWN_PCI_MMBA		0x18	/* memory mapped base */
224 
225 static const struct pci_matchid rtwn_pci_devices[] = {
226 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8188CE },
227 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8188EE },
228 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8192CE },
229 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8723AE }
230 };
231 
232 int		rtwn_pci_match(struct device *, void *, void *);
233 void		rtwn_pci_attach(struct device *, struct device *, void *);
234 int		rtwn_pci_detach(struct device *, int);
235 int		rtwn_pci_activate(struct device *, int);
236 int		rtwn_alloc_rx_list(struct rtwn_pci_softc *);
237 void		rtwn_reset_rx_list(struct rtwn_pci_softc *);
238 void		rtwn_free_rx_list(struct rtwn_pci_softc *);
239 void		rtwn_setup_rx_desc(struct rtwn_pci_softc *,
240 		    struct r92c_rx_desc_pci *, bus_addr_t, size_t, int);
241 int		rtwn_alloc_tx_list(struct rtwn_pci_softc *, int);
242 void		rtwn_reset_tx_list(struct rtwn_pci_softc *, int);
243 void		rtwn_free_tx_list(struct rtwn_pci_softc *, int);
244 void		rtwn_pci_write_1(void *, uint16_t, uint8_t);
245 void		rtwn_pci_write_2(void *, uint16_t, uint16_t);
246 void		rtwn_pci_write_4(void *, uint16_t, uint32_t);
247 uint8_t		rtwn_pci_read_1(void *, uint16_t);
248 uint16_t	rtwn_pci_read_2(void *, uint16_t);
249 uint32_t	rtwn_pci_read_4(void *, uint16_t);
250 void		rtwn_rx_frame(struct rtwn_pci_softc *,
251 		    struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int,
252 		    struct mbuf_list *);
253 int		rtwn_tx(void *, struct mbuf *, struct ieee80211_node *);
254 void		rtwn_tx_done(struct rtwn_pci_softc *, int);
255 int		rtwn_alloc_buffers(void *);
256 int		rtwn_pci_init(void *);
257 void		rtwn_pci_88e_stop(struct rtwn_pci_softc *);
258 void		rtwn_pci_stop(void *);
259 int		rtwn_intr(void *);
260 int		rtwn_is_oactive(void *);
261 int		rtwn_92c_power_on(struct rtwn_pci_softc *);
262 int		rtwn_88e_power_on(struct rtwn_pci_softc *);
263 int		rtwn_23a_power_on(struct rtwn_pci_softc *);
264 int		rtwn_power_on(void *);
265 int		rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t);
266 int		rtwn_llt_init(struct rtwn_pci_softc *, int);
267 int		rtwn_dma_init(void *);
268 int		rtwn_fw_loadpage(void *, int, uint8_t *, int);
269 int		rtwn_pci_load_firmware(void *, u_char **, size_t *);
270 void		rtwn_mac_init(void *);
271 void		rtwn_bb_init(void *);
272 void		rtwn_calib_to(void *);
273 void		rtwn_next_calib(void *);
274 void		rtwn_cancel_calib(void *);
275 void		rtwn_scan_to(void *);
276 void		rtwn_pci_next_scan(void *);
277 void		rtwn_cancel_scan(void *);
278 void		rtwn_wait_async(void *);
279 void		rtwn_poll_c2h_events(struct rtwn_pci_softc *);
280 void		rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int);
281 
282 /* Aliases. */
283 #define	rtwn_bb_write	rtwn_pci_write_4
284 #define rtwn_bb_read	rtwn_pci_read_4
285 
286 struct cfdriver rtwn_cd = {
287 	NULL, "rtwn", DV_IFNET
288 };
289 
290 const struct cfattach rtwn_pci_ca = {
291 	sizeof(struct rtwn_pci_softc),
292 	rtwn_pci_match,
293 	rtwn_pci_attach,
294 	rtwn_pci_detach,
295 	rtwn_pci_activate
296 };
297 
298 int
299 rtwn_pci_match(struct device *parent, void *match, void *aux)
300 {
301 	return (pci_matchbyid(aux, rtwn_pci_devices,
302 	    nitems(rtwn_pci_devices)));
303 }
304 
305 void
306 rtwn_pci_attach(struct device *parent, struct device *self, void *aux)
307 {
308 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self;
309 	struct pci_attach_args *pa = aux;
310 	struct ifnet *ifp;
311 	int i, error;
312 	pcireg_t memtype;
313 	pci_intr_handle_t ih;
314 	const char *intrstr;
315 
316 	sc->sc_dmat = pa->pa_dmat;
317 	sc->sc_pc = pa->pa_pc;
318 	sc->sc_tag = pa->pa_tag;
319 
320 	timeout_set(&sc->calib_to, rtwn_calib_to, sc);
321 	timeout_set(&sc->scan_to, rtwn_scan_to, sc);
322 
323 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
324 
325 	/* Map control/status registers. */
326 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA);
327 	error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st,
328 	    &sc->sc_sh, NULL, &sc->sc_mapsize, 0);
329 	if (error != 0) {
330 		printf(": can't map mem space\n");
331 		return;
332 	}
333 
334 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
335 		printf(": can't map interrupt\n");
336 		return;
337 	}
338 	intrstr = pci_intr_string(sc->sc_pc, ih);
339 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET,
340 	    rtwn_intr, sc, sc->sc_dev.dv_xname);
341 	if (sc->sc_ih == NULL) {
342 		printf(": can't establish interrupt");
343 		if (intrstr != NULL)
344 			printf(" at %s", intrstr);
345 		printf("\n");
346 		return;
347 	}
348 	printf(": %s\n", intrstr);
349 
350 	/* Disable PCIe Active State Power Management (ASPM). */
351 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
352 	    &sc->sc_cap_off, NULL)) {
353 		uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag,
354 		    sc->sc_cap_off + PCI_PCIE_LCSR);
355 		lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
356 		pci_conf_write(sc->sc_pc, sc->sc_tag,
357 		    sc->sc_cap_off + PCI_PCIE_LCSR, lcsr);
358 	}
359 
360 	/* Allocate Tx/Rx buffers. */
361 	error = rtwn_alloc_rx_list(sc);
362 	if (error != 0) {
363 		printf("%s: could not allocate Rx buffers\n",
364 		    sc->sc_dev.dv_xname);
365 		return;
366 	}
367 	for (i = 0; i < RTWN_NTXQUEUES; i++) {
368 		error = rtwn_alloc_tx_list(sc, i);
369 		if (error != 0) {
370 			printf("%s: could not allocate Tx buffers\n",
371 			    sc->sc_dev.dv_xname);
372 			rtwn_free_rx_list(sc);
373 			return;
374 		}
375 	}
376 
377 	sc->amrr.amrr_min_success_threshold = 1;
378 	sc->amrr.amrr_max_success_threshold = 15;
379 
380 	/* Attach the bus-agnostic driver. */
381 	sc->sc_sc.sc_ops.cookie = sc;
382 	sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1;
383 	sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2;
384 	sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4;
385 	sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1;
386 	sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2;
387 	sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4;
388 	sc->sc_sc.sc_ops.tx = rtwn_tx;
389 	sc->sc_sc.sc_ops.power_on = rtwn_power_on;
390 	sc->sc_sc.sc_ops.dma_init = rtwn_dma_init;
391 	sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware;
392 	sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage;
393 	sc->sc_sc.sc_ops.mac_init = rtwn_mac_init;
394 	sc->sc_sc.sc_ops.bb_init = rtwn_bb_init;
395 	sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers;
396 	sc->sc_sc.sc_ops.init = rtwn_pci_init;
397 	sc->sc_sc.sc_ops.stop = rtwn_pci_stop;
398 	sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive;
399 	sc->sc_sc.sc_ops.next_calib = rtwn_next_calib;
400 	sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib;
401 	sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan;
402 	sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan;
403 	sc->sc_sc.sc_ops.wait_async = rtwn_wait_async;
404 
405 	sc->sc_sc.chip = RTWN_CHIP_PCI;
406 	switch (PCI_PRODUCT(pa->pa_id)) {
407 	case PCI_PRODUCT_REALTEK_RTL8188CE:
408 	case PCI_PRODUCT_REALTEK_RTL8192CE:
409 		sc->sc_sc.chip |= RTWN_CHIP_88C | RTWN_CHIP_92C;
410 		break;
411 	case PCI_PRODUCT_REALTEK_RTL8188EE:
412 		sc->sc_sc.chip |= RTWN_CHIP_88E;
413 		break;
414 	case PCI_PRODUCT_REALTEK_RTL8723AE:
415 		sc->sc_sc.chip |= RTWN_CHIP_23A;
416 		break;
417 	}
418 
419 	error = rtwn_attach(&sc->sc_dev, &sc->sc_sc);
420 	if (error != 0) {
421 		rtwn_free_rx_list(sc);
422 		for (i = 0; i < RTWN_NTXQUEUES; i++)
423 			rtwn_free_tx_list(sc, i);
424 		return;
425 	}
426 
427 	/* ifp is now valid */
428 	ifp = &sc->sc_sc.sc_ic.ic_if;
429 #if NBPFILTER > 0
430 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
431 	    sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
432 
433 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
434 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
435 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT);
436 
437 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
438 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
439 	sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT);
440 #endif
441 }
442 
443 int
444 rtwn_pci_detach(struct device *self, int flags)
445 {
446 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
447 	int s, i;
448 
449 	s = splnet();
450 
451 	if (timeout_initialized(&sc->calib_to))
452 		timeout_del(&sc->calib_to);
453 	if (timeout_initialized(&sc->scan_to))
454 		timeout_del(&sc->scan_to);
455 
456 	rtwn_detach(&sc->sc_sc, flags);
457 
458 	/* Free Tx/Rx buffers. */
459 	for (i = 0; i < RTWN_NTXQUEUES; i++)
460 		rtwn_free_tx_list(sc, i);
461 	rtwn_free_rx_list(sc);
462 	splx(s);
463 
464 	return (0);
465 }
466 
467 int
468 rtwn_pci_activate(struct device *self, int act)
469 {
470 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
471 
472 	return rtwn_activate(&sc->sc_sc, act);
473 }
474 
475 void
476 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc,
477     bus_addr_t addr, size_t len, int idx)
478 {
479 	memset(desc, 0, sizeof(*desc));
480 	desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) |
481 		((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0));
482 	desc->rxbufaddr = htole32(addr);
483 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
484 	    BUS_SPACE_BARRIER_WRITE);
485 	desc->rxdw0 |= htole32(R92C_RXDW0_OWN);
486 }
487 
488 int
489 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
490 {
491 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
492 	struct rtwn_rx_data *rx_data;
493 	size_t size;
494 	int i, error = 0;
495 
496 	/* Allocate Rx descriptors. */
497 	size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
498 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
499 		&rx_ring->map);
500 	if (error != 0) {
501 		printf("%s: could not create rx desc DMA map\n",
502 		    sc->sc_dev.dv_xname);
503 		rx_ring->map = NULL;
504 		goto fail;
505 	}
506 
507 	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
508 	    &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
509 	if (error != 0) {
510 		printf("%s: could not allocate rx desc\n",
511 		    sc->sc_dev.dv_xname);
512 		goto fail;
513 	}
514 
515 	error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
516 	    size, (caddr_t *)&rx_ring->desc,
517 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
518 	if (error != 0) {
519 		bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
520 		rx_ring->desc = NULL;
521 		printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
522 		goto fail;
523 	}
524 
525 	error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
526 	    1, size, BUS_DMA_NOWAIT);
527 	if (error != 0) {
528 		printf("%s: could not load rx desc\n",
529 		    sc->sc_dev.dv_xname);
530 		goto fail;
531 	}
532 
533 	bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
534 	    BUS_DMASYNC_PREWRITE);
535 
536 	/* Allocate Rx buffers. */
537 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
538 		rx_data = &rx_ring->rx_data[i];
539 
540 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
541 		    0, BUS_DMA_NOWAIT, &rx_data->map);
542 		if (error != 0) {
543 			printf("%s: could not create rx buf DMA map\n",
544 			    sc->sc_dev.dv_xname);
545 			goto fail;
546 		}
547 
548 		rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
549 		if (rx_data->m == NULL) {
550 			printf("%s: could not allocate rx mbuf\n",
551 			    sc->sc_dev.dv_xname);
552 			error = ENOMEM;
553 			goto fail;
554 		}
555 
556 		error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
557 		    mtod(rx_data->m, void *), MCLBYTES, NULL,
558 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
559 		if (error != 0) {
560 			printf("%s: could not load rx buf DMA map\n",
561 			    sc->sc_dev.dv_xname);
562 			goto fail;
563 		}
564 
565 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
566 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
567 	}
568 fail:	if (error != 0)
569 		rtwn_free_rx_list(sc);
570 	return (error);
571 }
572 
573 void
574 rtwn_reset_rx_list(struct rtwn_pci_softc *sc)
575 {
576 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
577 	struct rtwn_rx_data *rx_data;
578 	int i;
579 
580 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
581 		rx_data = &rx_ring->rx_data[i];
582 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
583 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
584 	}
585 }
586 
587 void
588 rtwn_free_rx_list(struct rtwn_pci_softc *sc)
589 {
590 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
591 	struct rtwn_rx_data *rx_data;
592 	int i, s;
593 
594 	s = splnet();
595 
596 	if (rx_ring->map) {
597 		if (rx_ring->desc) {
598 			bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
599 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
600 			    sizeof (struct r92c_rx_desc_pci) *
601 			    RTWN_RX_LIST_COUNT);
602 			bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
603 			    rx_ring->nsegs);
604 			rx_ring->desc = NULL;
605 		}
606 		bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
607 		rx_ring->map = NULL;
608 	}
609 
610 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
611 		rx_data = &rx_ring->rx_data[i];
612 
613 		if (rx_data->m != NULL) {
614 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
615 			m_freem(rx_data->m);
616 			rx_data->m = NULL;
617 		}
618 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
619 		rx_data->map = NULL;
620 	}
621 
622 	splx(s);
623 }
624 
625 int
626 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid)
627 {
628 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
629 	struct rtwn_tx_data *tx_data;
630 	int i = 0, error = 0;
631 
632 	error = bus_dmamap_create(sc->sc_dmat,
633 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1,
634 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0,
635 	    BUS_DMA_NOWAIT, &tx_ring->map);
636 	if (error != 0) {
637 		printf("%s: could not create tx ring DMA map\n",
638 		    sc->sc_dev.dv_xname);
639 		goto fail;
640 	}
641 
642 	error = bus_dmamem_alloc(sc->sc_dmat,
643 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0,
644 	    &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
645 	if (error != 0) {
646 		printf("%s: could not allocate tx ring DMA memory\n",
647 		    sc->sc_dev.dv_xname);
648 		goto fail;
649 	}
650 
651 	error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
652 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT,
653 	    (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
654 	if (error != 0) {
655 		bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
656 		printf("%s: can't map tx ring DMA memory\n",
657 		    sc->sc_dev.dv_xname);
658 		goto fail;
659 	}
660 
661 	error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
662 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL,
663 	    BUS_DMA_NOWAIT);
664 	if (error != 0) {
665 		printf("%s: could not load tx ring DMA map\n",
666 		    sc->sc_dev.dv_xname);
667 		goto fail;
668 	}
669 
670 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
671 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
672 
673 		/* setup tx desc */
674 		desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
675 		  + sizeof(struct r92c_tx_desc_pci)
676 		  * ((i + 1) % RTWN_TX_LIST_COUNT));
677 
678 		tx_data = &tx_ring->tx_data[i];
679 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
680 		    0, BUS_DMA_NOWAIT, &tx_data->map);
681 		if (error != 0) {
682 			printf("%s: could not create tx buf DMA map\n",
683 			    sc->sc_dev.dv_xname);
684 			goto fail;
685 		}
686 		tx_data->m = NULL;
687 		tx_data->ni = NULL;
688 	}
689 fail:
690 	if (error != 0)
691 		rtwn_free_tx_list(sc, qid);
692 	return (error);
693 }
694 
695 void
696 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid)
697 {
698 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
699 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
700 	int i;
701 
702 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
703 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
704 		struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
705 
706 		memset(desc, 0, sizeof(*desc) -
707 		    (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) +
708 		    sizeof(desc->nextdescaddr)));
709 
710 		if (tx_data->m != NULL) {
711 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
712 			m_freem(tx_data->m);
713 			tx_data->m = NULL;
714 			ieee80211_release_node(ic, tx_data->ni);
715 			tx_data->ni = NULL;
716 		}
717 	}
718 
719 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
720 	    BUS_DMASYNC_POSTWRITE);
721 
722 	sc->qfullmsk &= ~(1 << qid);
723 	tx_ring->queued = 0;
724 	tx_ring->cur = 0;
725 }
726 
727 void
728 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid)
729 {
730 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
731 	struct rtwn_tx_data *tx_data;
732 	int i;
733 
734 	if (tx_ring->map != NULL) {
735 		if (tx_ring->desc != NULL) {
736 			bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
737 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
738 			    sizeof (struct r92c_tx_desc_pci) *
739 			    RTWN_TX_LIST_COUNT);
740 			bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
741 		}
742 		bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
743 	}
744 
745 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
746 		tx_data = &tx_ring->tx_data[i];
747 
748 		if (tx_data->m != NULL) {
749 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
750 			m_freem(tx_data->m);
751 			tx_data->m = NULL;
752 		}
753 		bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
754 	}
755 
756 	sc->qfullmsk &= ~(1 << qid);
757 	tx_ring->queued = 0;
758 	tx_ring->cur = 0;
759 }
760 
761 void
762 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val)
763 {
764 	struct rtwn_pci_softc *sc = cookie;
765 
766 	bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val);
767 }
768 
769 void
770 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val)
771 {
772 	struct rtwn_pci_softc *sc = cookie;
773 
774 	val = htole16(val);
775 	bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val);
776 }
777 
778 void
779 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val)
780 {
781 	struct rtwn_pci_softc *sc = cookie;
782 
783 	val = htole32(val);
784 	bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val);
785 }
786 
787 uint8_t
788 rtwn_pci_read_1(void *cookie, uint16_t addr)
789 {
790 	struct rtwn_pci_softc *sc = cookie;
791 
792 	return bus_space_read_1(sc->sc_st, sc->sc_sh, addr);
793 }
794 
795 uint16_t
796 rtwn_pci_read_2(void *cookie, uint16_t addr)
797 {
798 	struct rtwn_pci_softc *sc = cookie;
799 	uint16_t val;
800 
801 	val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr);
802 	return le16toh(val);
803 }
804 
805 uint32_t
806 rtwn_pci_read_4(void *cookie, uint16_t addr)
807 {
808 	struct rtwn_pci_softc *sc = cookie;
809 	uint32_t val;
810 
811 	val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr);
812 	return le32toh(val);
813 }
814 
815 void
816 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc,
817     struct rtwn_rx_data *rx_data, int desc_idx, struct mbuf_list *ml)
818 {
819 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
820 	struct ifnet *ifp = &ic->ic_if;
821 	struct ieee80211_rxinfo rxi;
822 	struct ieee80211_frame *wh;
823 	struct ieee80211_node *ni;
824 	struct r92c_rx_phystat *phy = NULL;
825 	uint32_t rxdw0, rxdw3;
826 	struct mbuf *m, *m1;
827 	uint8_t rate;
828 	int8_t rssi = 0;
829 	int infosz, pktlen, shift, error;
830 
831 	rxdw0 = letoh32(rx_desc->rxdw0);
832 	rxdw3 = letoh32(rx_desc->rxdw3);
833 
834 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
835 		int ntries, type;
836 		struct r88e_tx_rpt_ccx *rxstat;
837 
838 		type = MS(rxdw3, R88E_RXDW3_RPT);
839 		if (type == R88E_RXDW3_RPT_TX1) {
840 			uint32_t rptb1, rptb2;
841 
842 			rxstat = mtod(rx_data->m, struct r88e_tx_rpt_ccx *);
843 			rptb1 = letoh32(rxstat->rptb1);
844 			rptb2 = letoh32(rxstat->rptb2);
845 			ntries = MS(rptb2, R88E_RPTB2_RETRY_CNT);
846 			if (rptb1 & R88E_RPTB1_PKT_OK)
847 				sc->amn.amn_txcnt++;
848 			if (ntries > 0)
849 				sc->amn.amn_retrycnt++;
850 
851 			rtwn_setup_rx_desc(sc, rx_desc,
852 			    rx_data->map->dm_segs[0].ds_addr, MCLBYTES,
853 			    desc_idx);
854 			return;
855 		}
856 	}
857 
858 	if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
859 		/*
860 		 * This should not happen since we setup our Rx filter
861 		 * to not receive these frames.
862 		 */
863 		ifp->if_ierrors++;
864 		return;
865 	}
866 
867 	pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
868 	if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
869 		ifp->if_ierrors++;
870 		return;
871 	}
872 
873 	rate = MS(rxdw3, R92C_RXDW3_RATE);
874 	infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
875 	if (infosz > sizeof(struct r92c_rx_phystat))
876 		infosz = sizeof(struct r92c_rx_phystat);
877 	shift = MS(rxdw0, R92C_RXDW0_SHIFT);
878 
879 	/* Get RSSI from PHY status descriptor if present. */
880 	if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
881 		phy = mtod(rx_data->m, struct r92c_rx_phystat *);
882 		rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
883 		/* Update our average RSSI. */
884 		rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
885 	}
886 
887 	DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
888 	    pktlen, rate, infosz, shift, rssi));
889 
890 	m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
891 	if (m1 == NULL) {
892 		ifp->if_ierrors++;
893 		return;
894 	}
895 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
896 	error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
897 	    mtod(m1, void *), MCLBYTES, NULL,
898 	    BUS_DMA_NOWAIT | BUS_DMA_READ);
899 	if (error != 0) {
900 		m_freem(m1);
901 
902 		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
903 		    rx_data->m, BUS_DMA_NOWAIT))
904 			panic("%s: could not load old RX mbuf",
905 			    sc->sc_dev.dv_xname);
906 
907 		/* Physical address may have changed. */
908 		rtwn_setup_rx_desc(sc, rx_desc,
909 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx);
910 
911 		ifp->if_ierrors++;
912 		return;
913 	}
914 
915 	/* Finalize mbuf. */
916 	m = rx_data->m;
917 	rx_data->m = m1;
918 	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
919 
920 	/* Update RX descriptor. */
921 	rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr,
922 	    MCLBYTES, desc_idx);
923 
924 	/* Get ieee80211 frame header. */
925 	if (rxdw0 & R92C_RXDW0_PHYST)
926 		m_adj(m, infosz + shift);
927 	else
928 		m_adj(m, shift);
929 	wh = mtod(m, struct ieee80211_frame *);
930 
931 #if NBPFILTER > 0
932 	if (__predict_false(sc->sc_drvbpf != NULL)) {
933 		struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
934 		struct mbuf mb;
935 
936 		tap->wr_flags = 0;
937 		/* Map HW rate index to 802.11 rate. */
938 		tap->wr_flags = 2;
939 		if (!(rxdw3 & R92C_RXDW3_HT)) {
940 			switch (rate) {
941 			/* CCK. */
942 			case  0: tap->wr_rate =   2; break;
943 			case  1: tap->wr_rate =   4; break;
944 			case  2: tap->wr_rate =  11; break;
945 			case  3: tap->wr_rate =  22; break;
946 			/* OFDM. */
947 			case  4: tap->wr_rate =  12; break;
948 			case  5: tap->wr_rate =  18; break;
949 			case  6: tap->wr_rate =  24; break;
950 			case  7: tap->wr_rate =  36; break;
951 			case  8: tap->wr_rate =  48; break;
952 			case  9: tap->wr_rate =  72; break;
953 			case 10: tap->wr_rate =  96; break;
954 			case 11: tap->wr_rate = 108; break;
955 			}
956 		} else if (rate >= 12) {	/* MCS0~15. */
957 			/* Bit 7 set means HT MCS instead of rate. */
958 			tap->wr_rate = 0x80 | (rate - 12);
959 		}
960 		tap->wr_dbm_antsignal = rssi;
961 		tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq);
962 		tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags);
963 
964 		mb.m_data = (caddr_t)tap;
965 		mb.m_len = sc->sc_rxtap_len;
966 		mb.m_next = m;
967 		mb.m_nextpkt = NULL;
968 		mb.m_type = 0;
969 		mb.m_flags = 0;
970 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
971 	}
972 #endif
973 
974 	ni = ieee80211_find_rxnode(ic, wh);
975 	rxi.rxi_flags = 0;
976 	rxi.rxi_rssi = rssi;
977 	rxi.rxi_tstamp = 0;	/* Unused. */
978 	ieee80211_inputm(ifp, m, ni, &rxi, ml);
979 	/* Node is no longer needed. */
980 	ieee80211_release_node(ic, ni);
981 }
982 
983 int
984 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni)
985 {
986 	struct rtwn_pci_softc *sc = cookie;
987 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
988 	struct ieee80211_frame *wh;
989 	struct ieee80211_key *k = NULL;
990 	struct rtwn_tx_ring *tx_ring;
991 	struct rtwn_tx_data *data;
992 	struct r92c_tx_desc_pci *txd;
993 	uint16_t qos;
994 	uint8_t raid, type, tid, qid;
995 	int hasqos, error;
996 
997 	wh = mtod(m, struct ieee80211_frame *);
998 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
999 
1000 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1001 		k = ieee80211_get_txkey(ic, wh, ni);
1002 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
1003 			return (ENOBUFS);
1004 		wh = mtod(m, struct ieee80211_frame *);
1005 	}
1006 
1007 	if ((hasqos = ieee80211_has_qos(wh))) {
1008 		qos = ieee80211_get_qos(wh);
1009 		tid = qos & IEEE80211_QOS_TID;
1010 		qid = ieee80211_up_to_ac(ic, tid);
1011 	} else if (type != IEEE80211_FC0_TYPE_DATA) {
1012 		qid = RTWN_VO_QUEUE;
1013 	} else
1014 		qid = RTWN_BE_QUEUE;
1015 
1016 	/* Grab a Tx buffer from the ring. */
1017 	tx_ring = &sc->tx_ring[qid];
1018 	data = &tx_ring->tx_data[tx_ring->cur];
1019 	if (data->m != NULL) {
1020 		m_freem(m);
1021 		return (ENOBUFS);
1022 	}
1023 
1024 	/* Fill Tx descriptor. */
1025 	txd = &tx_ring->desc[tx_ring->cur];
1026 	if (htole32(txd->txdw0) & R92C_RXDW0_OWN) {
1027 		m_freem(m);
1028 		return (ENOBUFS);
1029 	}
1030 	txd->txdw0 = htole32(
1031 	    SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) |
1032 	    SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
1033 	    R92C_TXDW0_FSG | R92C_TXDW0_LSG);
1034 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
1035 		txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
1036 
1037 	txd->txdw1 = 0;
1038 #ifdef notyet
1039 	if (k != NULL) {
1040 		switch (k->k_cipher) {
1041 		case IEEE80211_CIPHER_WEP40:
1042 		case IEEE80211_CIPHER_WEP104:
1043 		case IEEE80211_CIPHER_TKIP:
1044 			cipher = R92C_TXDW1_CIPHER_RC4;
1045 			break;
1046 		case IEEE80211_CIPHER_CCMP:
1047 			cipher = R92C_TXDW1_CIPHER_AES;
1048 			break;
1049 		default:
1050 			cipher = R92C_TXDW1_CIPHER_NONE;
1051 		}
1052 		txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher));
1053 	}
1054 #endif
1055 	txd->txdw4 = 0;
1056 	txd->txdw5 = 0;
1057 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
1058 	    type == IEEE80211_FC0_TYPE_DATA) {
1059 		if (ic->ic_curmode == IEEE80211_MODE_11B ||
1060 		    (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B))
1061 			raid = R92C_RAID_11B;
1062 		else
1063 			raid = R92C_RAID_11BG;
1064 
1065 		if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1066 			txd->txdw1 |= htole32(
1067 			    SM(R88E_TXDW1_MACID, R92C_MACID_BSS) |
1068 			    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1069 			    SM(R92C_TXDW1_RAID, raid));
1070 			txd->txdw2 |= htole32(R88E_TXDW2_AGGBK);
1071 		} else {
1072 			txd->txdw1 |= htole32(
1073 			    SM(R92C_TXDW1_MACID, R92C_MACID_BSS) |
1074 			    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1075 			    SM(R92C_TXDW1_RAID, raid) |
1076 			    R92C_TXDW1_AGGBK);
1077 		}
1078 
1079 		/* Request TX status report for AMRR. */
1080 		txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
1081 
1082 		if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
1083 			txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1084 			    R92C_TXDW4_HWRTSEN);
1085 		} else if (ic->ic_flags & IEEE80211_F_USEPROT) {
1086 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1087 				txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
1088 				    R92C_TXDW4_HWRTSEN);
1089 			} else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1090 				txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1091 				    R92C_TXDW4_HWRTSEN);
1092 			}
1093 		}
1094 
1095 		if (ic->ic_curmode == IEEE80211_MODE_11B)
1096 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0));
1097 		else
1098 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 3));
1099 		txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf));
1100 
1101 		/* Use AMMR rate for data. */
1102 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1103 		if (ic->ic_fixed_rate != -1)
1104 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1105 			    ic->ic_fixed_rate));
1106 		else
1107 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1108 			    ni->ni_txrate));
1109 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f));
1110 	} else {
1111 		txd->txdw1 |= htole32(
1112 		    SM(R92C_TXDW1_MACID, 0) |
1113 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
1114 		    SM(R92C_TXDW1_RAID, R92C_RAID_11B));
1115 
1116 		/* Force CCK1. */
1117 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1118 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
1119 	}
1120 	/* Set sequence number (already little endian). */
1121 	txd->txdseq = (*(uint16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
1122 	if (sc->sc_sc.chip & RTWN_CHIP_23A)
1123 		txd->txdseq |= htole16(R23A_TXDW3_TXRPTEN);
1124 
1125 	if (!hasqos) {
1126 		/* Use HW sequence numbering for non-QoS frames. */
1127 		if (!(sc->sc_sc.chip & RTWN_CHIP_23A))
1128 			txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ);
1129 		txd->txdseq |= htole16(R92C_TXDW3_HWSEQEN);
1130 	} else
1131 		txd->txdw4 |= htole32(R92C_TXDW4_QOS);
1132 
1133 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1134 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1135 	if (error && error != EFBIG) {
1136 		printf("%s: can't map mbuf (error %d)\n",
1137 		    sc->sc_dev.dv_xname, error);
1138 		m_freem(m);
1139 		return error;
1140 	}
1141 	if (error != 0) {
1142 		/* Too many DMA segments, linearize mbuf. */
1143 		if (m_defrag(m, M_DONTWAIT)) {
1144 			m_freem(m);
1145 			return ENOBUFS;
1146 		}
1147 
1148 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1149 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1150 		if (error != 0) {
1151 			printf("%s: can't map mbuf (error %d)\n",
1152 			    sc->sc_dev.dv_xname, error);
1153 			m_freem(m);
1154 			return error;
1155 		}
1156 	}
1157 
1158 	txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr);
1159 	txd->txbufsize = htole16(m->m_pkthdr.len);
1160 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
1161 	    BUS_SPACE_BARRIER_WRITE);
1162 	txd->txdw0 |= htole32(R92C_TXDW0_OWN);
1163 
1164 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1165 	    BUS_DMASYNC_POSTWRITE);
1166 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
1167 	    BUS_DMASYNC_POSTWRITE);
1168 
1169 	data->m = m;
1170 	data->ni = ni;
1171 
1172 #if NBPFILTER > 0
1173 	if (__predict_false(sc->sc_drvbpf != NULL)) {
1174 		struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
1175 		struct mbuf mb;
1176 
1177 		tap->wt_flags = 0;
1178 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1179 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1180 
1181 		mb.m_data = (caddr_t)tap;
1182 		mb.m_len = sc->sc_txtap_len;
1183 		mb.m_next = m;
1184 		mb.m_nextpkt = NULL;
1185 		mb.m_type = 0;
1186 		mb.m_flags = 0;
1187 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1188 	}
1189 #endif
1190 
1191 	tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
1192 	tx_ring->queued++;
1193 
1194 	if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
1195 		sc->qfullmsk |= (1 << qid);
1196 
1197 	/* Kick TX. */
1198 	rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid));
1199 
1200 	return (0);
1201 }
1202 
1203 void
1204 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid)
1205 {
1206 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1207 	struct ifnet *ifp = &ic->ic_if;
1208 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
1209 	struct rtwn_tx_data *tx_data;
1210 	struct r92c_tx_desc_pci *tx_desc;
1211 	int i;
1212 
1213 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1214 	    BUS_DMASYNC_POSTREAD);
1215 
1216 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
1217 		tx_data = &tx_ring->tx_data[i];
1218 		if (tx_data->m == NULL)
1219 			continue;
1220 
1221 		tx_desc = &tx_ring->desc[i];
1222 		if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN)
1223 			continue;
1224 
1225 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
1226 		m_freem(tx_data->m);
1227 		tx_data->m = NULL;
1228 		ieee80211_release_node(ic, tx_data->ni);
1229 		tx_data->ni = NULL;
1230 
1231 		sc->sc_sc.sc_tx_timer = 0;
1232 		tx_ring->queued--;
1233 
1234 		if (!(sc->sc_sc.chip & RTWN_CHIP_23A))
1235 			rtwn_poll_c2h_events(sc);
1236 	}
1237 
1238 	if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
1239 		sc->qfullmsk &= ~(1 << qid);
1240 
1241 	if (sc->qfullmsk == 0) {
1242 		ifq_clr_oactive(&ifp->if_snd);
1243 		(*ifp->if_start)(ifp);
1244 	}
1245 }
1246 
1247 int
1248 rtwn_alloc_buffers(void *cookie)
1249 {
1250 	/* Tx/Rx buffers were already allocated in rtwn_pci_attach() */
1251 	return (0);
1252 }
1253 
1254 int
1255 rtwn_pci_init(void *cookie)
1256 {
1257 	struct rtwn_pci_softc *sc = cookie;
1258 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
1259 
1260 	/* Enable TX reports for AMRR */
1261 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1262 		rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL,
1263 		    (rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) & ~0) |
1264 		    R88E_TX_RPT_CTRL_EN);
1265 		rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL + 1, 0x02);
1266 
1267 		rtwn_pci_write_2(sc, R88E_TX_RPT_TIME, 0xcdf0);
1268 	}
1269 
1270 	return (0);
1271 }
1272 
1273 void
1274 rtwn_pci_92c_stop(struct rtwn_pci_softc *sc)
1275 {
1276 	uint16_t reg;
1277 
1278 	/* Disable interrupts. */
1279 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1280 
1281 	/* Stop hardware. */
1282 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1283 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1284 	reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN);
1285 	reg |= R92C_SYS_FUNC_EN_BB_GLB_RST;
1286 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1287 	reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST;
1288 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1289 	reg = rtwn_pci_read_2(sc, R92C_CR);
1290 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1291 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1292 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1293 	    R92C_CR_ENSEC);
1294 	rtwn_pci_write_2(sc, R92C_CR, reg);
1295 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1296 		rtwn_fw_reset(&sc->sc_sc);
1297 	/* TODO: linux does additional btcoex stuff here */
1298 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */
1299 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */
1300 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */
1301 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1302 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1303 	rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN);
1304 }
1305 
1306 void
1307 rtwn_pci_88e_stop(struct rtwn_pci_softc *sc)
1308 {
1309 	int i;
1310 	uint16_t reg;
1311 
1312 	/* Disable interrupts. */
1313 	rtwn_pci_write_4(sc, R88E_HIMR, 0x00000000);
1314 
1315 	/* Stop hardware. */
1316 	rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL,
1317 	    rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) &
1318 	    ~(R88E_TX_RPT_CTRL_EN));
1319 
1320 	for (i = 0; i < 100; i++) {
1321 		if (rtwn_pci_read_1(sc, R88E_RXDMA_CTRL) & 0x02)
1322 			break;
1323 		DELAY(10);
1324 	}
1325 	if (i == 100)
1326 		DPRINTF(("rxdma ctrl didn't go off, %x\n", rtwn_pci_read_1(sc, R88E_RXDMA_CTRL)));
1327 
1328 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff);
1329 
1330 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1331 
1332 	/* ensure transmission has stopped */
1333 	for (i = 0; i < 100; i++) {
1334 		if (rtwn_pci_read_4(sc, 0x5f8) == 0)
1335 			break;
1336 		DELAY(10);
1337 	}
1338 	if (i == 100)
1339 		DPRINTF(("tx didn't stop\n"));
1340 
1341 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1342 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1343 	    ~(R92C_SYS_FUNC_EN_BBRSTB));
1344 	DELAY(1);
1345 	reg = rtwn_pci_read_2(sc, R92C_CR);
1346 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1347 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1348 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1349 	    R92C_CR_ENSEC);
1350 	rtwn_pci_write_2(sc, R92C_CR, reg);
1351 	rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST,
1352 	    rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20);
1353 
1354 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1355 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1356 		rtwn_fw_reset(&sc->sc_sc);
1357 
1358 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1,
1359 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~0x02);
1360 	rtwn_pci_write_1(sc, R92C_MCUFWDL, 0);
1361 
1362 	rtwn_pci_write_1(sc, R88E_32K_CTRL,
1363 	    rtwn_pci_read_1(sc, R88E_32K_CTRL) & ~(0x01));
1364 
1365 	/* transition to cardemu state */
1366 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0);
1367 	rtwn_pci_write_1(sc, R92C_LPLDO_CTRL,
1368 	    rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) | 0x10);
1369 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1370 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_OFF);
1371 	for (i = 0; i < 100; i++) {
1372 		if ((rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1373 		    R92C_APS_FSMCO_APFM_OFF) == 0)
1374 			break;
1375 		DELAY(10);
1376 	}
1377 	if (i == 100)
1378 		DPRINTF(("apfm off didn't go off\n"));
1379 
1380 	/* transition to card disabled state */
1381 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
1382 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
1383 
1384 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1385 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08);
1386 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1387 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08);
1388 
1389 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1390 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1391 }
1392 
1393 void
1394 rtwn_pci_23a_stop(struct rtwn_pci_softc *sc)
1395 {
1396 	int i;
1397 
1398 	/* Disable interrupts. */
1399 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1400 
1401 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff);
1402 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1403 
1404 	/* ensure transmission has stopped */
1405 	for (i = 0; i < 100; i++) {
1406 		if (rtwn_pci_read_4(sc, 0x5f8) == 0)
1407 			break;
1408 		DELAY(10);
1409 	}
1410 	if (i == 100)
1411 		DPRINTF(("tx didn't stop\n"));
1412 
1413 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1414 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1415 	    ~(R92C_SYS_FUNC_EN_BBRSTB));
1416 	DELAY(1);
1417 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1418 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1419 	    ~(R92C_SYS_FUNC_EN_BB_GLB_RST));
1420 
1421 	rtwn_pci_write_2(sc, R92C_CR,
1422 	    rtwn_pci_read_2(sc, R92C_CR) &
1423 	    ~(R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSWBCN));
1424 
1425 	rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST,
1426 	    rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20);
1427 
1428 	/* Turn off RF */
1429 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1430 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1431 		rtwn_fw_reset(&sc->sc_sc);
1432 
1433 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1,
1434 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~R92C_SYS_FUNC_EN_DIOE);
1435 	rtwn_pci_write_1(sc, R92C_MCUFWDL, 0);
1436 
1437 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1438 	rtwn_pci_write_1(sc, R92C_LEDCFG2, rtwn_pci_read_1(sc, R92C_LEDCFG2) & ~(0x80));
1439 	rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) |
1440 	    R92C_APS_FSMCO_APFM_OFF);
1441 	rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1442 	    ~(R92C_APS_FSMCO_APFM_OFF));
1443 
1444 	rtwn_pci_write_4(sc, R92C_APS_FSMCO,
1445 	    rtwn_pci_read_4(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_RDY_MACON);
1446 	rtwn_pci_write_4(sc, R92C_APS_FSMCO,
1447 	    rtwn_pci_read_4(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APDM_HPDN);
1448 
1449 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1450 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08);
1451 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1452 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08);
1453 
1454 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1455 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1456 }
1457 
1458 void
1459 rtwn_pci_stop(void *cookie)
1460 {
1461 	struct rtwn_pci_softc *sc = cookie;
1462 	int i, s;
1463 
1464 	s = splnet();
1465 
1466 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1467 		rtwn_pci_88e_stop(sc);
1468 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
1469 		rtwn_pci_23a_stop(sc);
1470 	} else {
1471 		rtwn_pci_92c_stop(sc);
1472 	}
1473 
1474 	for (i = 0; i < RTWN_NTXQUEUES; i++)
1475 		rtwn_reset_tx_list(sc, i);
1476 	rtwn_reset_rx_list(sc);
1477 
1478 	splx(s);
1479 }
1480 
1481 int
1482 rtwn_88e_intr(struct rtwn_pci_softc *sc)
1483 {
1484 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1485 	u_int32_t status, estatus;
1486 	int i;
1487 
1488 	status = rtwn_pci_read_4(sc, R88E_HISR);
1489 	if (status == 0 || status == 0xffffffff)
1490 		return (0);
1491 
1492 	estatus = rtwn_pci_read_4(sc, R88E_HISRE);
1493 
1494 	status &= RTWN_88E_INT_ENABLE;
1495 	estatus &= R88E_HIMRE_RXFOVW;
1496 
1497 	rtwn_pci_write_4(sc, R88E_HIMR, 0);
1498 	rtwn_pci_write_4(sc, R88E_HIMRE, 0);
1499 	rtwn_pci_write_4(sc, R88E_HISR, status);
1500 	rtwn_pci_write_4(sc, R88E_HISRE, estatus);
1501 
1502 	if (status & R88E_HIMR_HIGHDOK)
1503 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1504 	if (status & R88E_HIMR_MGNTDOK)
1505 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1506 	if (status & R88E_HIMR_BKDOK)
1507 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1508 	if (status & R88E_HIMR_BEDOK)
1509 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1510 	if (status & R88E_HIMR_VIDOK)
1511 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1512 	if (status & R88E_HIMR_VODOK)
1513 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1514 	if ((status & (R88E_HIMR_ROK | R88E_HIMR_RDU)) ||
1515 	    (estatus & R88E_HIMRE_RXFOVW)) {
1516 		struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1517 
1518 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1519 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1520 		    BUS_DMASYNC_POSTREAD);
1521 
1522 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1523 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1524 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1525 
1526 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1527 				continue;
1528 
1529 			rtwn_rx_frame(sc, rx_desc, rx_data, i, &ml);
1530 		}
1531 		if_input(&ic->ic_if, &ml);
1532 	}
1533 
1534 	if (status & R88E_HIMR_HSISR_IND_ON_INT) {
1535 		rtwn_pci_write_1(sc, R92C_HSISR,
1536 		    rtwn_pci_read_1(sc, R92C_HSISR) |
1537 		    R88E_HSIMR_PDN_INT_EN | R88E_HSIMR_RON_INT_EN);
1538 	}
1539 
1540 	/* Enable interrupts. */
1541 	rtwn_pci_write_4(sc, R88E_HIMR, RTWN_88E_INT_ENABLE);
1542 	rtwn_pci_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW);
1543 
1544 	return (1);
1545 }
1546 
1547 int
1548 rtwn_intr(void *xsc)
1549 {
1550 	struct rtwn_pci_softc *sc = xsc;
1551 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1552 	u_int32_t status;
1553 	int i;
1554 
1555 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1556 		return (rtwn_88e_intr(sc));
1557 
1558 	status = rtwn_pci_read_4(sc, R92C_HISR);
1559 	if (status == 0 || status == 0xffffffff)
1560 		return (0);
1561 
1562 	/* Disable interrupts. */
1563 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1564 
1565 	/* Ack interrupts. */
1566 	rtwn_pci_write_4(sc, R92C_HISR, status);
1567 
1568 	/* Vendor driver treats RX errors like ROK... */
1569 	if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) {
1570 		struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1571 
1572 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1573 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1574 		    BUS_DMASYNC_POSTREAD);
1575 
1576 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1577 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1578 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1579 
1580 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1581 				continue;
1582 
1583 			rtwn_rx_frame(sc, rx_desc, rx_data, i, &ml);
1584 		}
1585 		if_input(&ic->ic_if, &ml);
1586 	}
1587 
1588 	if (status & R92C_IMR_BDOK)
1589 		rtwn_tx_done(sc, RTWN_BEACON_QUEUE);
1590 	if (status & R92C_IMR_HIGHDOK)
1591 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1592 	if (status & R92C_IMR_MGNTDOK)
1593 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1594 	if (status & R92C_IMR_BKDOK)
1595 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1596 	if (status & R92C_IMR_BEDOK)
1597 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1598 	if (status & R92C_IMR_VIDOK)
1599 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1600 	if (status & R92C_IMR_VODOK)
1601 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1602 
1603 	if (sc->sc_sc.chip & RTWN_CHIP_23A) {
1604 		if (status & R92C_IMR_ATIMEND)
1605 			rtwn_poll_c2h_events(sc);
1606 	}
1607 
1608 	/* Enable interrupts. */
1609 	rtwn_pci_write_4(sc, R92C_HIMR, RTWN_92C_INT_ENABLE);
1610 
1611 	return (1);
1612 }
1613 
1614 int
1615 rtwn_is_oactive(void *cookie)
1616 {
1617 	struct rtwn_pci_softc *sc = cookie;
1618 
1619 	return (sc->qfullmsk != 0);
1620 }
1621 
1622 int
1623 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data)
1624 {
1625 	int ntries;
1626 
1627 	rtwn_pci_write_4(sc, R92C_LLT_INIT,
1628 	    SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
1629 	    SM(R92C_LLT_INIT_ADDR, addr) |
1630 	    SM(R92C_LLT_INIT_DATA, data));
1631 	/* Wait for write operation to complete. */
1632 	for (ntries = 0; ntries < 20; ntries++) {
1633 		if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
1634 		    R92C_LLT_INIT_OP_NO_ACTIVE)
1635 			return (0);
1636 		DELAY(5);
1637 	}
1638 	return (ETIMEDOUT);
1639 }
1640 
1641 int
1642 rtwn_llt_init(struct rtwn_pci_softc *sc, int page_count)
1643 {
1644 	int i, error, pktbuf_count;
1645 
1646 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1647 		pktbuf_count = R88E_TXPKTBUF_COUNT;
1648 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
1649 		pktbuf_count = R23A_TXPKTBUF_COUNT;
1650 	else
1651 		pktbuf_count = R92C_TXPKTBUF_COUNT;
1652 
1653 	/* Reserve pages [0; page_count]. */
1654 	for (i = 0; i < page_count; i++) {
1655 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1656 			return (error);
1657 	}
1658 	/* NB: 0xff indicates end-of-list. */
1659 	if ((error = rtwn_llt_write(sc, i, 0xff)) != 0)
1660 		return (error);
1661 	/*
1662 	 * Use pages [page_count + 1; pktbuf_count - 1]
1663 	 * as ring buffer.
1664 	 */
1665 	for (++i; i < pktbuf_count - 1; i++) {
1666 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1667 			return (error);
1668 	}
1669 	/* Make the last page point to the beginning of the ring buffer. */
1670 	error = rtwn_llt_write(sc, i, pktbuf_count + 1);
1671 	return (error);
1672 }
1673 
1674 int
1675 rtwn_92c_power_on(struct rtwn_pci_softc *sc)
1676 {
1677 	uint32_t reg;
1678 	int ntries;
1679 
1680 	/* Wait for autoload done bit. */
1681 	for (ntries = 0; ntries < 1000; ntries++) {
1682 		if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) &
1683 		    R92C_APS_FSMCO_PFM_ALDN)
1684 			break;
1685 		DELAY(5);
1686 	}
1687 	if (ntries == 1000) {
1688 		printf("%s: timeout waiting for chip autoload\n",
1689 		    sc->sc_dev.dv_xname);
1690 		return (ETIMEDOUT);
1691 	}
1692 
1693 	/* Unlock ISO/CLK/Power control register. */
1694 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1695 
1696 	/* TODO: check if we need this for 8188CE */
1697 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1698 		/* bt coex */
1699 		reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO);
1700 		reg |= (R92C_APS_FSMCO_SOP_ABG |
1701 			R92C_APS_FSMCO_SOP_AMB |
1702 			R92C_APS_FSMCO_XOP_BTCK);
1703 		rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg);
1704 	}
1705 
1706 	/* Move SPS into PWM mode. */
1707 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b);
1708 	DELAY(100);
1709 
1710 	/* Set low byte to 0x0f, leave others unchanged. */
1711 	rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL,
1712 	    (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f);
1713 
1714 	/* TODO: check if we need this for 8188CE */
1715 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1716 		/* bt coex */
1717 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL);
1718 		reg &= (~0x00024800); /* XXX magic from linux */
1719 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg);
1720 	}
1721 
1722 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1723 	  (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) |
1724 	  R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR);
1725 	DELAY(200);
1726 
1727 	/* TODO: linux does additional btcoex stuff here */
1728 
1729 	/* Auto enable WLAN. */
1730 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1731 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1732 	for (ntries = 0; ntries < 1000; ntries++) {
1733 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1734 		    R92C_APS_FSMCO_APFM_ONMAC))
1735 			break;
1736 		DELAY(5);
1737 	}
1738 	if (ntries == 1000) {
1739 		printf("%s: timeout waiting for MAC auto ON\n",
1740 		    sc->sc_dev.dv_xname);
1741 		return (ETIMEDOUT);
1742 	}
1743 
1744 	/* Enable radio, GPIO and LED functions. */
1745 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1746 	    R92C_APS_FSMCO_AFSM_PCIE |
1747 	    R92C_APS_FSMCO_PDN_EN |
1748 	    R92C_APS_FSMCO_PFM_ALDN);
1749 	/* Release RF digital isolation. */
1750 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1751 	    rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
1752 
1753 	if (sc->sc_sc.chip & RTWN_CHIP_92C)
1754 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77);
1755 	else
1756 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22);
1757 
1758 	rtwn_pci_write_4(sc, R92C_INT_MIG, 0);
1759 
1760 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1761 		/* bt coex */
1762 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2);
1763 		reg &= 0xfd; /* XXX magic from linux */
1764 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg);
1765 	}
1766 
1767 	rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG,
1768 	    rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL);
1769 
1770 	reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL);
1771 	if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) {
1772 		printf("%s: radio is disabled by hardware switch\n",
1773 		    sc->sc_dev.dv_xname);
1774 		return (EPERM);	/* :-) */
1775 	}
1776 
1777 	/* Initialize MAC. */
1778 	rtwn_pci_write_1(sc, R92C_APSD_CTRL,
1779 	    rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
1780 	for (ntries = 0; ntries < 200; ntries++) {
1781 		if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) &
1782 		    R92C_APSD_CTRL_OFF_STATUS))
1783 			break;
1784 		DELAY(500);
1785 	}
1786 	if (ntries == 200) {
1787 		printf("%s: timeout waiting for MAC initialization\n",
1788 		    sc->sc_dev.dv_xname);
1789 		return (ETIMEDOUT);
1790 	}
1791 
1792 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1793 	reg = rtwn_pci_read_2(sc, R92C_CR);
1794 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1795 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1796 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1797 	    R92C_CR_ENSEC;
1798 	rtwn_pci_write_2(sc, R92C_CR, reg);
1799 
1800 	rtwn_pci_write_1(sc, 0xfe10, 0x19);
1801 
1802 	return (0);
1803 }
1804 
1805 int
1806 rtwn_88e_power_on(struct rtwn_pci_softc *sc)
1807 {
1808 	uint32_t reg;
1809 	int ntries;
1810 
1811 	/* Disable XTAL output for power saving. */
1812 	rtwn_pci_write_1(sc, R88E_XCK_OUT_CTRL,
1813 	    rtwn_pci_read_1(sc, R88E_XCK_OUT_CTRL) & ~R88E_XCK_OUT_CTRL_EN);
1814 
1815 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1816 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & (~R92C_APS_FSMCO_APDM_HPDN));
1817 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1818 
1819 	/* Wait for power ready bit. */
1820 	for (ntries = 0; ntries < 5000; ntries++) {
1821 		if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
1822 			break;
1823 		DELAY(10);
1824 	}
1825 	if (ntries == 5000) {
1826 		printf("%s: timeout waiting for chip power up\n",
1827 		    sc->sc_dev.dv_xname);
1828 		return (ETIMEDOUT);
1829 	}
1830 
1831 	/* Reset BB. */
1832 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1833 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB |
1834 	    R92C_SYS_FUNC_EN_BB_GLB_RST));
1835 
1836 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
1837 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
1838 
1839 	/* Disable HWPDN. */
1840 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1841 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
1842 	/* Disable WL suspend. */
1843 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1844 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1845 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1846 
1847 	/* Auto enable WLAN. */
1848 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1849 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1850 	for (ntries = 0; ntries < 5000; ntries++) {
1851 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1852 		    R92C_APS_FSMCO_APFM_ONMAC))
1853 			break;
1854 		DELAY(10);
1855 	}
1856 	if (ntries == 5000) {
1857 		printf("%s: timeout waiting for MAC auto ON\n",
1858 		    sc->sc_dev.dv_xname);
1859 		return (ETIMEDOUT);
1860 	}
1861 
1862 	/* Enable LDO normal mode. */
1863 	rtwn_pci_write_1(sc, R92C_LPLDO_CTRL,
1864 	    rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) & ~0x10);
1865 
1866 	rtwn_pci_write_1(sc, R92C_APS_FSMCO,
1867 	    rtwn_pci_read_1(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_PDN_EN);
1868 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2,
1869 	    rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04);
1870 
1871 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1,
1872 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1) | 0x02);
1873 
1874 	rtwn_pci_write_1(sc, R92C_SYS_CLKR,
1875 	    rtwn_pci_read_1(sc, R92C_SYS_CLKR) | 0x08);
1876 
1877 	rtwn_pci_write_2(sc, R92C_GPIO_MUXCFG,
1878 	    rtwn_pci_read_2(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENSIC);
1879 
1880 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1881 	rtwn_pci_write_2(sc, R92C_CR, 0);
1882 	reg = rtwn_pci_read_2(sc, R92C_CR);
1883 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1884 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1885 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1886 	    R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
1887 	rtwn_pci_write_2(sc, R92C_CR, reg);
1888 
1889 	rtwn_pci_write_1(sc, R92C_MSR, 0);
1890 	return (0);
1891 }
1892 
1893 int
1894 rtwn_23a_power_on(struct rtwn_pci_softc *sc)
1895 {
1896 	uint32_t reg;
1897 	int ntries;
1898 
1899 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x00);
1900 
1901 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1902 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1903 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1904 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0x00);
1905 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1906 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APFM_RSM);
1907 
1908 	/* Wait for power ready bit. */
1909 	for (ntries = 0; ntries < 5000; ntries++) {
1910 		if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
1911 			break;
1912 		DELAY(10);
1913 	}
1914 	if (ntries == 5000) {
1915 		printf("%s: timeout waiting for chip power up\n",
1916 		    sc->sc_dev.dv_xname);
1917 		return (ETIMEDOUT);
1918 	}
1919 
1920 	/* Release WLON reset */
1921 	rtwn_pci_write_4(sc, R92C_APS_FSMCO, rtwn_pci_read_4(sc, R92C_APS_FSMCO) |
1922 	    R92C_APS_FSMCO_RDY_MACON);
1923 	/* Disable HWPDN. */
1924 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1925 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
1926 	/* Disable WL suspend. */
1927 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1928 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1929 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1930 
1931 	/* Auto enable WLAN. */
1932 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1933 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1934 	for (ntries = 0; ntries < 5000; ntries++) {
1935 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1936 		    R92C_APS_FSMCO_APFM_ONMAC))
1937 			break;
1938 		DELAY(10);
1939 	}
1940 	if (ntries == 5000) {
1941 		printf("%s: timeout waiting for MAC auto ON (%x)\n",
1942 		    sc->sc_dev.dv_xname, rtwn_pci_read_2(sc, R92C_APS_FSMCO));
1943 		return (ETIMEDOUT);
1944 	}
1945 
1946 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2,
1947 	    rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04);
1948 
1949 	/* emac time out */
1950 	rtwn_pci_write_1(sc, 0x369, rtwn_pci_read_1(sc, 0x369) | 0x80);
1951 
1952 	for (ntries = 0; ntries < 100; ntries++) {
1953 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e);
1954 		DELAY(100);
1955 		rtwn_pci_write_2(sc, R92C_MDIO + 2, 0xc280);
1956 		rtwn_pci_write_2(sc, R92C_MDIO, 0xc290);
1957 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x3e);
1958 		DELAY(100);
1959 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e);
1960 		DELAY(100);
1961 		if (rtwn_pci_read_2(sc, R92C_MDIO + 2) == 0xc290)
1962 			break;
1963 	}
1964 	if (ntries == 100) {
1965 		printf("%s: timeout configuring ePHY\n", sc->sc_dev.dv_xname);
1966 		return (ETIMEDOUT);
1967 	}
1968 
1969 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1970 	rtwn_pci_write_2(sc, R92C_CR, 0);
1971 	reg = rtwn_pci_read_2(sc, R92C_CR);
1972 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1973 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1974 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1975 	    R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
1976 	rtwn_pci_write_2(sc, R92C_CR, reg);
1977 
1978 	return (0);
1979 }
1980 
1981 int
1982 rtwn_power_on(void *cookie)
1983 {
1984 	struct rtwn_pci_softc *sc = cookie;
1985 
1986 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1987 		return (rtwn_88e_power_on(sc));
1988 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
1989 		return (rtwn_23a_power_on(sc));
1990 	else
1991 		return (rtwn_92c_power_on(sc));
1992 }
1993 
1994 int
1995 rtwn_dma_init(void *cookie)
1996 {
1997 	struct rtwn_pci_softc *sc = cookie;
1998 	uint32_t reg;
1999 	uint16_t dmasize;
2000 	int hqpages, lqpages, nqpages, pagecnt, boundary, trxdma, tcr;
2001 	int error;
2002 
2003 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
2004 		nqpages = R88E_NPQ_NPAGES;
2005 		hqpages = R88E_HPQ_NPAGES;
2006 		lqpages = R88E_LPQ_NPAGES;
2007 		pagecnt = R88E_TX_PAGE_COUNT;
2008 		boundary = R88E_TX_PAGE_BOUNDARY;
2009 		dmasize = R88E_MAX_RX_DMA_SIZE;
2010 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN3;
2011 		trxdma = 0xe771;
2012 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2013 		nqpages = R23A_NPQ_NPAGES;
2014 		hqpages = R23A_HPQ_NPAGES;
2015 		lqpages = R23A_LPQ_NPAGES;
2016 		pagecnt = R23A_TX_PAGE_COUNT;
2017 		boundary = R23A_TX_PAGE_BOUNDARY;
2018 		dmasize = R23A_MAX_RX_DMA_SIZE;
2019 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 |
2020 		    R92C_TCR_ERRSTEN1;
2021 		trxdma = 0xf771;
2022 	} else {
2023 		nqpages = R92C_NPQ_NPAGES;
2024 		hqpages = R92C_HPQ_NPAGES;
2025 		lqpages = R92C_LPQ_NPAGES;
2026 		pagecnt = R92C_TX_PAGE_COUNT;
2027 		boundary = R92C_TX_PAGE_BOUNDARY;
2028 		dmasize = R92C_MAX_RX_DMA_SIZE;
2029 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 |
2030 		    R92C_TCR_ERRSTEN1;
2031 		trxdma = 0xf771;
2032 	}
2033 
2034 	/* Initialize LLT table. */
2035 	error = rtwn_llt_init(sc, pagecnt);
2036 	if (error != 0)
2037 		return error;
2038 
2039 	/* Set number of pages for normal priority queue. */
2040 	rtwn_pci_write_2(sc, R92C_RQPN_NPQ, nqpages);
2041 	rtwn_pci_write_4(sc, R92C_RQPN,
2042 	    /* Set number of pages for public queue. */
2043 	    SM(R92C_RQPN_PUBQ, pagecnt) |
2044 	    /* Set number of pages for high priority queue. */
2045 	    SM(R92C_RQPN_HPQ, hqpages) |
2046 	    /* Set number of pages for low priority queue. */
2047 	    SM(R92C_RQPN_LPQ, lqpages) |
2048 	    /* Load values. */
2049 	    R92C_RQPN_LD);
2050 
2051 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, boundary);
2052 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, boundary);
2053 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD,
2054 	    boundary);
2055 	rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, boundary);
2056 	rtwn_pci_write_1(sc, R92C_TDECTRL + 1, boundary);
2057 
2058 	reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL);
2059 	reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
2060 	reg |= trxdma;
2061 	rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg);
2062 
2063 	rtwn_pci_write_4(sc, R92C_TCR, tcr);
2064 
2065 	/* Configure Tx DMA. */
2066 	rtwn_pci_write_4(sc, R92C_BKQ_DESA,
2067 		sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
2068 	rtwn_pci_write_4(sc, R92C_BEQ_DESA,
2069 		sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
2070 	rtwn_pci_write_4(sc, R92C_VIQ_DESA,
2071 		sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
2072 	rtwn_pci_write_4(sc, R92C_VOQ_DESA,
2073 		sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
2074 	rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
2075 		sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
2076 	rtwn_pci_write_4(sc, R92C_MGQ_DESA,
2077 		sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
2078 	rtwn_pci_write_4(sc, R92C_HQ_DESA,
2079 		sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
2080 
2081 	/* Configure Rx DMA. */
2082 	rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr);
2083 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG+1, 0);
2084 
2085 	/* Set Tx/Rx transfer page boundary. */
2086 	rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, dmasize - 1);
2087 
2088 	/* Set Tx/Rx transfer page size. */
2089 	rtwn_pci_write_1(sc, R92C_PBP,
2090 	    SM(R92C_PBP_PSRX, R92C_PBP_128) |
2091 	    SM(R92C_PBP_PSTX, R92C_PBP_128));
2092 
2093 	return (0);
2094 }
2095 
2096 int
2097 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len)
2098 {
2099 	struct rtwn_pci_softc *sc = cookie;
2100 	uint32_t reg;
2101 	int off, mlen, error = 0, i;
2102 
2103 	reg = rtwn_pci_read_4(sc, R92C_MCUFWDL);
2104 	reg = RW(reg, R92C_MCUFWDL_PAGE, page);
2105 	rtwn_pci_write_4(sc, R92C_MCUFWDL, reg);
2106 
2107 	DELAY(5);
2108 
2109 	off = R92C_FW_START_ADDR;
2110 	while (len > 0) {
2111 		if (len > 196)
2112 			mlen = 196;
2113 		else if (len > 4)
2114 			mlen = 4;
2115 		else
2116 			mlen = 1;
2117 		for (i = 0; i < mlen; i++)
2118 			rtwn_pci_write_1(sc, off++, buf[i]);
2119 		buf += mlen;
2120 		len -= mlen;
2121 	}
2122 
2123 	return (error);
2124 }
2125 
2126 int
2127 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len)
2128 {
2129 	struct rtwn_pci_softc *sc = cookie;
2130 	const char *name;
2131 	int error;
2132 
2133 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
2134 		name = "rtwn-rtl8188efw";
2135 	else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2136 		if (sc->sc_sc.chip & RTWN_CHIP_UMC_A_CUT)
2137 			name = "rtwn-rtl8723fw";
2138 		else
2139 			name = "rtwn-rtl8723fw_B";
2140 	} else if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) ==
2141 	    RTWN_CHIP_UMC_A_CUT)
2142 		name = "rtwn-rtl8192cfwU";
2143 	else
2144 		name = "rtwn-rtl8192cfwU_B";
2145 
2146 	error = loadfirmware(name, fw, len);
2147 	if (error)
2148 		printf("%s: could not read firmware %s (error %d)\n",
2149 		    sc->sc_dev.dv_xname, name, error);
2150 	return (error);
2151 }
2152 
2153 void
2154 rtwn_mac_init(void *cookie)
2155 {
2156 	struct rtwn_pci_softc *sc = cookie;
2157 	int i;
2158 
2159 	/* Write MAC initialization values. */
2160 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
2161 		for (i = 0; i < nitems(rtl8188eu_mac); i++) {
2162 			if (rtl8188eu_mac[i].reg == R92C_GPIO_MUXCFG)
2163 				continue;
2164 			rtwn_pci_write_1(sc, rtl8188eu_mac[i].reg,
2165 			    rtl8188eu_mac[i].val);
2166 		}
2167 		rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x07);
2168 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2169 		for (i = 0; i < nitems(rtl8192cu_mac); i++) {
2170 			rtwn_pci_write_1(sc, rtl8192cu_mac[i].reg,
2171 			    rtl8192cu_mac[i].val);
2172 		}
2173 		rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x0a);
2174 	} else {
2175 		for (i = 0; i < nitems(rtl8192ce_mac); i++)
2176 			rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg,
2177 			    rtl8192ce_mac[i].val);
2178 	}
2179 }
2180 
2181 void
2182 rtwn_bb_init(void *cookie)
2183 {
2184 	struct rtwn_pci_softc *sc = cookie;
2185 	const struct r92c_bb_prog *prog;
2186 	uint32_t reg;
2187 	int i;
2188 
2189 	/* Enable BB and RF. */
2190 	rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN,
2191 	    rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) |
2192 	    R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
2193 	    R92C_SYS_FUNC_EN_DIO_RF);
2194 
2195 	if (!(sc->sc_sc.chip & RTWN_CHIP_88E))
2196 		rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
2197 
2198 	rtwn_pci_write_1(sc, R92C_RF_CTRL,
2199 	    R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
2200 
2201 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
2202 	    R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA |
2203 	    R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST |
2204 	    R92C_SYS_FUNC_EN_BBRSTB);
2205 
2206 	if (!(sc->sc_sc.chip & RTWN_CHIP_88E)) {
2207 		rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
2208 	}
2209 
2210 	rtwn_pci_write_4(sc, R92C_LEDCFG0,
2211 	    rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000);
2212 
2213 	/* Select BB programming. */
2214 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
2215 		prog = &rtl8188eu_bb_prog;
2216 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
2217 		prog = &rtl8723a_bb_prog;
2218 	else if (!(sc->sc_sc.chip & RTWN_CHIP_92C))
2219 		prog = &rtl8192ce_bb_prog_1t;
2220 	else
2221 		prog = &rtl8192ce_bb_prog_2t;
2222 
2223 	/* Write BB initialization values. */
2224 	for (i = 0; i < prog->count; i++) {
2225 		rtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
2226 		DELAY(1);
2227 	}
2228 
2229 	if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) {
2230 		/* 8192C 1T only configuration. */
2231 		reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO);
2232 		reg = (reg & ~0x00000003) | 0x2;
2233 		rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
2234 
2235 		reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO);
2236 		reg = (reg & ~0x00300033) | 0x00200022;
2237 		rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
2238 
2239 		reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING);
2240 		reg = (reg & ~0xff000000) | 0x45 << 24;
2241 		rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
2242 
2243 		reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
2244 		reg = (reg & ~0x000000ff) | 0x23;
2245 		rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
2246 
2247 		reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
2248 		reg = (reg & ~0x00000030) | 1 << 4;
2249 		rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
2250 
2251 		reg = rtwn_bb_read(sc, 0xe74);
2252 		reg = (reg & ~0x0c000000) | 2 << 26;
2253 		rtwn_bb_write(sc, 0xe74, reg);
2254 		reg = rtwn_bb_read(sc, 0xe78);
2255 		reg = (reg & ~0x0c000000) | 2 << 26;
2256 		rtwn_bb_write(sc, 0xe78, reg);
2257 		reg = rtwn_bb_read(sc, 0xe7c);
2258 		reg = (reg & ~0x0c000000) | 2 << 26;
2259 		rtwn_bb_write(sc, 0xe7c, reg);
2260 		reg = rtwn_bb_read(sc, 0xe80);
2261 		reg = (reg & ~0x0c000000) | 2 << 26;
2262 		rtwn_bb_write(sc, 0xe80, reg);
2263 		reg = rtwn_bb_read(sc, 0xe88);
2264 		reg = (reg & ~0x0c000000) | 2 << 26;
2265 		rtwn_bb_write(sc, 0xe88, reg);
2266 	}
2267 
2268 	/* Write AGC values. */
2269 	for (i = 0; i < prog->agccount; i++) {
2270 		rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
2271 		    prog->agcvals[i]);
2272 		DELAY(1);
2273 	}
2274 
2275 	if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR)
2276 		sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR;
2277 }
2278 
2279 void
2280 rtwn_calib_to(void *arg)
2281 {
2282 	struct rtwn_pci_softc *sc = arg;
2283 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
2284 	int s;
2285 
2286 	s = splnet();
2287 	ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn);
2288 	splx(s);
2289 
2290 	rtwn_calib(&sc->sc_sc);
2291 }
2292 
2293 void
2294 rtwn_next_calib(void *cookie)
2295 {
2296 	struct rtwn_pci_softc *sc = cookie;
2297 
2298 	timeout_add_sec(&sc->calib_to, 2);
2299 }
2300 
2301 void
2302 rtwn_cancel_calib(void *cookie)
2303 {
2304 	struct rtwn_pci_softc *sc = cookie;
2305 
2306 	if (timeout_initialized(&sc->calib_to))
2307 		timeout_del(&sc->calib_to);
2308 }
2309 
2310 void
2311 rtwn_scan_to(void *arg)
2312 {
2313 	struct rtwn_pci_softc *sc = arg;
2314 
2315 	rtwn_next_scan(&sc->sc_sc);
2316 }
2317 
2318 void
2319 rtwn_pci_next_scan(void *cookie)
2320 {
2321 	struct rtwn_pci_softc *sc = cookie;
2322 
2323 	timeout_add_msec(&sc->scan_to, 200);
2324 }
2325 
2326 void
2327 rtwn_cancel_scan(void *cookie)
2328 {
2329 	struct rtwn_pci_softc *sc = cookie;
2330 
2331 	if (timeout_initialized(&sc->scan_to))
2332 		timeout_del(&sc->scan_to);
2333 }
2334 
2335 void
2336 rtwn_wait_async(void *cookie)
2337 {
2338 	/* nothing to do */
2339 }
2340 
2341 void
2342 rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len)
2343 {
2344 	struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf;
2345 	int packets, tries, tx_ok, drop, expire, over;
2346 
2347 	if (len != sizeof(*rpt))
2348 		return;
2349 
2350 	if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2351 		struct r88e_tx_rpt_ccx *rxstat = (struct r88e_tx_rpt_ccx *)buf;
2352 
2353 		/*
2354 		 * we seem to get some garbage reports, so check macid makes
2355 		 * sense.
2356 		 */
2357 		if (MS(rxstat->rptb1, R88E_RPTB1_MACID) != R92C_MACID_BSS) {
2358 			return;
2359 		}
2360 
2361 		packets = 1;
2362 		tx_ok = (rxstat->rptb1 & R88E_RPTB1_PKT_OK) ? 1 : 0;
2363 		tries = MS(rxstat->rptb2, R88E_RPTB2_RETRY_CNT);
2364 		expire = (rxstat->rptb2 & R88E_RPTB2_LIFE_EXPIRE);
2365 		over = (rxstat->rptb2 & R88E_RPTB2_RETRY_OVER);
2366 		drop = 0;
2367 	} else {
2368 		packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM);
2369 		tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT);
2370 		tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK);
2371 		drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP);
2372 		expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE);
2373 		over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER);
2374 	}
2375 
2376 	if (packets > 0) {
2377 		sc->amn.amn_txcnt += packets;
2378 		if (!tx_ok || tries > 1 || drop || expire || over)
2379 			sc->amn.amn_retrycnt++;
2380 	}
2381 }
2382 
2383 void
2384 rtwn_poll_c2h_events(struct rtwn_pci_softc *sc)
2385 {
2386 	const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt);
2387 	uint8_t buf[R92C_C2H_MSG_MAX_LEN];
2388 	uint8_t id, len, status;
2389 	int i;
2390 
2391 	/* Read current status. */
2392 	status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR);
2393 	if (status == R92C_C2HEVT_HOST_CLOSE)
2394 		return;	/* nothing to do */
2395 
2396 	if (status == R92C_C2HEVT_FW_CLOSE) {
2397 		len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG);
2398 		id = MS(len, R92C_C2H_EVTB0_ID);
2399 		len = MS(len, R92C_C2H_EVTB0_LEN);
2400 
2401 		if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) {
2402 			memset(buf, 0, sizeof(buf));
2403 			for (i = 0; i < len; i++)
2404 				buf[i] = rtwn_pci_read_1(sc, off + i);
2405 			rtwn_tx_report(sc, buf, len);
2406 		} else
2407 			DPRINTF(("unhandled C2H event %d (%d bytes)\n",
2408 			    id, len));
2409 	}
2410 
2411 	/* Prepare for next event. */
2412 	rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE);
2413 }
2414