xref: /openbsd-src/sys/dev/pci/if_rtwn.c (revision 1ad61ae0a79a724d2d3ec69e69c8e1d1ff6b53a0)
1 /*	$OpenBSD: if_rtwn.c,v 1.41 2023/07/14 14:28:47 kevlo Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * PCI front-end for Realtek RTL8188CE/RTL8188EE/RTL8192CE/RTL8723AE driver.
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/task.h>
34 #include <sys/timeout.h>
35 #include <sys/conf.h>
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_amrr.h>
54 #include <net80211/ieee80211_radiotap.h>
55 
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59 
60 #include <dev/ic/r92creg.h>
61 #include <dev/ic/rtwnvar.h>
62 
63 /*
64  * Driver definitions.
65  */
66 
67 #define R92C_NPQ_NPAGES		0
68 #define R92C_PUBQ_NPAGES	176
69 #define R92C_HPQ_NPAGES		41
70 #define R92C_LPQ_NPAGES		28
71 #define R92C_TXPKTBUF_COUNT	256
72 #define R92C_TX_PAGE_COUNT	\
73 	(R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES)
74 #define R92C_TX_PAGE_BOUNDARY	(R92C_TX_PAGE_COUNT + 1)
75 #define R92C_MAX_RX_DMA_SIZE	0x2800
76 
77 #define R88E_NPQ_NPAGES		0
78 #define R88E_PUBQ_NPAGES	116
79 #define R88E_HPQ_NPAGES		41
80 #define R88E_LPQ_NPAGES		13
81 #define R88E_TXPKTBUF_COUNT	176
82 #define R88E_TX_PAGE_COUNT	\
83 	(R88E_PUBQ_NPAGES + R88E_HPQ_NPAGES + R88E_LPQ_NPAGES)
84 #define R88E_TX_PAGE_BOUNDARY	(R88E_TX_PAGE_COUNT + 1)
85 #define R88E_MAX_RX_DMA_SIZE	0x2600
86 
87 #define R23A_NPQ_NPAGES		0
88 #define R23A_PUBQ_NPAGES	189
89 #define R23A_HPQ_NPAGES		28
90 #define R23A_LPQ_NPAGES		28
91 #define R23A_TXPKTBUF_COUNT	256
92 #define R23A_TX_PAGE_COUNT	\
93 	(R23A_PUBQ_NPAGES + R23A_HPQ_NPAGES + R23A_LPQ_NPAGES)
94 #define R23A_TX_PAGE_BOUNDARY	(R23A_TX_PAGE_COUNT + 1)
95 #define R23A_MAX_RX_DMA_SIZE	0x2800
96 
97 #define RTWN_NTXQUEUES			9
98 #define RTWN_RX_LIST_COUNT		256
99 #define RTWN_TX_LIST_COUNT		256
100 
101 /* TX queue indices. */
102 #define RTWN_BK_QUEUE			0
103 #define RTWN_BE_QUEUE			1
104 #define RTWN_VI_QUEUE			2
105 #define RTWN_VO_QUEUE			3
106 #define RTWN_BEACON_QUEUE		4
107 #define RTWN_TXCMD_QUEUE		5
108 #define RTWN_MGNT_QUEUE			6
109 #define RTWN_HIGH_QUEUE			7
110 #define RTWN_HCCA_QUEUE			8
111 
112 struct rtwn_rx_radiotap_header {
113 	struct ieee80211_radiotap_header wr_ihdr;
114 	uint8_t		wr_flags;
115 	uint8_t		wr_rate;
116 	uint16_t	wr_chan_freq;
117 	uint16_t	wr_chan_flags;
118 	uint8_t		wr_dbm_antsignal;
119 } __packed;
120 
121 #define RTWN_RX_RADIOTAP_PRESENT			\
122 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
123 	 1 << IEEE80211_RADIOTAP_RATE |			\
124 	 1 << IEEE80211_RADIOTAP_CHANNEL |		\
125 	 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
126 
127 struct rtwn_tx_radiotap_header {
128 	struct ieee80211_radiotap_header wt_ihdr;
129 	uint8_t		wt_flags;
130 	uint16_t	wt_chan_freq;
131 	uint16_t	wt_chan_flags;
132 } __packed;
133 
134 #define RTWN_TX_RADIOTAP_PRESENT			\
135 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
136 	 1 << IEEE80211_RADIOTAP_CHANNEL)
137 
138 struct rtwn_rx_data {
139 	bus_dmamap_t		map;
140 	struct mbuf		*m;
141 };
142 
143 struct rtwn_rx_ring {
144 	struct r92c_rx_desc_pci	*desc;
145 	bus_dmamap_t		map;
146 	bus_dma_segment_t	seg;
147 	int			nsegs;
148 	struct rtwn_rx_data	rx_data[RTWN_RX_LIST_COUNT];
149 
150 };
151 struct rtwn_tx_data {
152 	bus_dmamap_t			map;
153 	struct mbuf			*m;
154 	struct ieee80211_node		*ni;
155 };
156 
157 struct rtwn_tx_ring {
158 	bus_dmamap_t		map;
159 	bus_dma_segment_t	seg;
160 	int			nsegs;
161 	struct r92c_tx_desc_pci	*desc;
162 	struct rtwn_tx_data	tx_data[RTWN_TX_LIST_COUNT];
163 	int			queued;
164 	int			cur;
165 };
166 
167 struct rtwn_pci_softc {
168 	struct device		sc_dev;
169 	struct rtwn_softc	sc_sc;
170 
171 	struct rtwn_rx_ring	rx_ring;
172 	struct rtwn_tx_ring	tx_ring[RTWN_NTXQUEUES];
173 	uint32_t		qfullmsk;
174 
175 	struct timeout		calib_to;
176 	struct timeout		scan_to;
177 
178 	/* PCI specific goo. */
179 	bus_dma_tag_t 		sc_dmat;
180 	pci_chipset_tag_t	sc_pc;
181 	pcitag_t		sc_tag;
182 	void			*sc_ih;
183 	bus_space_tag_t		sc_st;
184 	bus_space_handle_t	sc_sh;
185 	bus_size_t		sc_mapsize;
186 	int			sc_cap_off;
187 
188 	struct ieee80211_amrr		amrr;
189 	struct ieee80211_amrr_node	amn;
190 
191 #if NBPFILTER > 0
192 	caddr_t				sc_drvbpf;
193 
194 	union {
195 		struct rtwn_rx_radiotap_header th;
196 		uint8_t	pad[64];
197 	}				sc_rxtapu;
198 #define sc_rxtap	sc_rxtapu.th
199 	int				sc_rxtap_len;
200 
201 	union {
202 		struct rtwn_tx_radiotap_header th;
203 		uint8_t	pad[64];
204 	}				sc_txtapu;
205 #define sc_txtap	sc_txtapu.th
206 	int				sc_txtap_len;
207 #endif
208 };
209 
210 #ifdef RTWN_DEBUG
211 #define DPRINTF(x)	do { if (rtwn_debug) printf x; } while (0)
212 #define DPRINTFN(n, x)	do { if (rtwn_debug >= (n)) printf x; } while (0)
213 extern int rtwn_debug;
214 #else
215 #define DPRINTF(x)
216 #define DPRINTFN(n, x)
217 #endif
218 
219 /*
220  * PCI configuration space registers.
221  */
222 #define	RTWN_PCI_IOBA		0x10	/* i/o mapped base */
223 #define	RTWN_PCI_MMBA		0x18	/* memory mapped base */
224 
225 static const struct pci_matchid rtwn_pci_devices[] = {
226 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8188CE },
227 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8188EE },
228 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8192CE },
229 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8723AE }
230 };
231 
232 int		rtwn_pci_match(struct device *, void *, void *);
233 void		rtwn_pci_attach(struct device *, struct device *, void *);
234 int		rtwn_pci_detach(struct device *, int);
235 int		rtwn_pci_activate(struct device *, int);
236 int		rtwn_alloc_rx_list(struct rtwn_pci_softc *);
237 void		rtwn_reset_rx_list(struct rtwn_pci_softc *);
238 void		rtwn_free_rx_list(struct rtwn_pci_softc *);
239 void		rtwn_setup_rx_desc(struct rtwn_pci_softc *,
240 		    struct r92c_rx_desc_pci *, bus_addr_t, size_t, int);
241 int		rtwn_alloc_tx_list(struct rtwn_pci_softc *, int);
242 void		rtwn_reset_tx_list(struct rtwn_pci_softc *, int);
243 void		rtwn_free_tx_list(struct rtwn_pci_softc *, int);
244 void		rtwn_pci_write_1(void *, uint16_t, uint8_t);
245 void		rtwn_pci_write_2(void *, uint16_t, uint16_t);
246 void		rtwn_pci_write_4(void *, uint16_t, uint32_t);
247 uint8_t		rtwn_pci_read_1(void *, uint16_t);
248 uint16_t	rtwn_pci_read_2(void *, uint16_t);
249 uint32_t	rtwn_pci_read_4(void *, uint16_t);
250 void		rtwn_rx_frame(struct rtwn_pci_softc *,
251 		    struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int,
252 		    struct mbuf_list *);
253 int		rtwn_tx(void *, struct mbuf *, struct ieee80211_node *);
254 void		rtwn_tx_done(struct rtwn_pci_softc *, int);
255 int		rtwn_alloc_buffers(void *);
256 int		rtwn_pci_init(void *);
257 void		rtwn_pci_88e_stop(struct rtwn_pci_softc *);
258 void		rtwn_pci_stop(void *);
259 int		rtwn_intr(void *);
260 int		rtwn_is_oactive(void *);
261 int		rtwn_92c_power_on(struct rtwn_pci_softc *);
262 int		rtwn_88e_power_on(struct rtwn_pci_softc *);
263 int		rtwn_23a_power_on(struct rtwn_pci_softc *);
264 int		rtwn_power_on(void *);
265 int		rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t);
266 int		rtwn_llt_init(struct rtwn_pci_softc *, int);
267 int		rtwn_dma_init(void *);
268 int		rtwn_fw_loadpage(void *, int, uint8_t *, int);
269 int		rtwn_pci_load_firmware(void *, u_char **, size_t *);
270 void		rtwn_mac_init(void *);
271 void		rtwn_bb_init(void *);
272 void		rtwn_calib_to(void *);
273 void		rtwn_next_calib(void *);
274 void		rtwn_cancel_calib(void *);
275 void		rtwn_scan_to(void *);
276 void		rtwn_pci_next_scan(void *);
277 void		rtwn_cancel_scan(void *);
278 void		rtwn_wait_async(void *);
279 void		rtwn_poll_c2h_events(struct rtwn_pci_softc *);
280 void		rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int);
281 
282 /* Aliases. */
283 #define	rtwn_bb_write	rtwn_pci_write_4
284 #define rtwn_bb_read	rtwn_pci_read_4
285 
286 struct cfdriver rtwn_cd = {
287 	NULL, "rtwn", DV_IFNET
288 };
289 
290 const struct cfattach rtwn_pci_ca = {
291 	sizeof(struct rtwn_pci_softc),
292 	rtwn_pci_match,
293 	rtwn_pci_attach,
294 	rtwn_pci_detach,
295 	rtwn_pci_activate
296 };
297 
298 int
299 rtwn_pci_match(struct device *parent, void *match, void *aux)
300 {
301 	return (pci_matchbyid(aux, rtwn_pci_devices,
302 	    nitems(rtwn_pci_devices)));
303 }
304 
305 void
306 rtwn_pci_attach(struct device *parent, struct device *self, void *aux)
307 {
308 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self;
309 	struct pci_attach_args *pa = aux;
310 	struct ifnet *ifp;
311 	int i, error;
312 	pcireg_t memtype;
313 	pci_intr_handle_t ih;
314 	const char *intrstr;
315 
316 	sc->sc_dmat = pa->pa_dmat;
317 	sc->sc_pc = pa->pa_pc;
318 	sc->sc_tag = pa->pa_tag;
319 
320 	timeout_set(&sc->calib_to, rtwn_calib_to, sc);
321 	timeout_set(&sc->scan_to, rtwn_scan_to, sc);
322 
323 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
324 
325 	/* Map control/status registers. */
326 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA);
327 	error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st,
328 	    &sc->sc_sh, NULL, &sc->sc_mapsize, 0);
329 	if (error != 0) {
330 		printf(": can't map mem space\n");
331 		return;
332 	}
333 
334 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
335 		printf(": can't map interrupt\n");
336 		return;
337 	}
338 	intrstr = pci_intr_string(sc->sc_pc, ih);
339 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET,
340 	    rtwn_intr, sc, sc->sc_dev.dv_xname);
341 	if (sc->sc_ih == NULL) {
342 		printf(": can't establish interrupt");
343 		if (intrstr != NULL)
344 			printf(" at %s", intrstr);
345 		printf("\n");
346 		return;
347 	}
348 	printf(": %s\n", intrstr);
349 
350 	/* Disable PCIe Active State Power Management (ASPM). */
351 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
352 	    &sc->sc_cap_off, NULL)) {
353 		uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag,
354 		    sc->sc_cap_off + PCI_PCIE_LCSR);
355 		lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
356 		pci_conf_write(sc->sc_pc, sc->sc_tag,
357 		    sc->sc_cap_off + PCI_PCIE_LCSR, lcsr);
358 	}
359 
360 	/* Allocate Tx/Rx buffers. */
361 	error = rtwn_alloc_rx_list(sc);
362 	if (error != 0) {
363 		printf("%s: could not allocate Rx buffers\n",
364 		    sc->sc_dev.dv_xname);
365 		return;
366 	}
367 	for (i = 0; i < RTWN_NTXQUEUES; i++) {
368 		error = rtwn_alloc_tx_list(sc, i);
369 		if (error != 0) {
370 			printf("%s: could not allocate Tx buffers\n",
371 			    sc->sc_dev.dv_xname);
372 			rtwn_free_rx_list(sc);
373 			return;
374 		}
375 	}
376 
377 	sc->amrr.amrr_min_success_threshold = 1;
378 	sc->amrr.amrr_max_success_threshold = 15;
379 
380 	/* Attach the bus-agnostic driver. */
381 	sc->sc_sc.sc_ops.cookie = sc;
382 	sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1;
383 	sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2;
384 	sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4;
385 	sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1;
386 	sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2;
387 	sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4;
388 	sc->sc_sc.sc_ops.tx = rtwn_tx;
389 	sc->sc_sc.sc_ops.power_on = rtwn_power_on;
390 	sc->sc_sc.sc_ops.dma_init = rtwn_dma_init;
391 	sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware;
392 	sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage;
393 	sc->sc_sc.sc_ops.mac_init = rtwn_mac_init;
394 	sc->sc_sc.sc_ops.bb_init = rtwn_bb_init;
395 	sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers;
396 	sc->sc_sc.sc_ops.init = rtwn_pci_init;
397 	sc->sc_sc.sc_ops.stop = rtwn_pci_stop;
398 	sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive;
399 	sc->sc_sc.sc_ops.next_calib = rtwn_next_calib;
400 	sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib;
401 	sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan;
402 	sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan;
403 	sc->sc_sc.sc_ops.wait_async = rtwn_wait_async;
404 
405 	sc->sc_sc.chip = RTWN_CHIP_PCI;
406 	switch (PCI_PRODUCT(pa->pa_id)) {
407 	case PCI_PRODUCT_REALTEK_RTL8188CE:
408 	case PCI_PRODUCT_REALTEK_RTL8192CE:
409 		sc->sc_sc.chip |= RTWN_CHIP_88C | RTWN_CHIP_92C;
410 		break;
411 	case PCI_PRODUCT_REALTEK_RTL8188EE:
412 		sc->sc_sc.chip |= RTWN_CHIP_88E;
413 		break;
414 	case PCI_PRODUCT_REALTEK_RTL8723AE:
415 		sc->sc_sc.chip |= RTWN_CHIP_23A;
416 		break;
417 	}
418 
419 	error = rtwn_attach(&sc->sc_dev, &sc->sc_sc);
420 	if (error != 0) {
421 		rtwn_free_rx_list(sc);
422 		for (i = 0; i < RTWN_NTXQUEUES; i++)
423 			rtwn_free_tx_list(sc, i);
424 		return;
425 	}
426 
427 	/* ifp is now valid */
428 	ifp = &sc->sc_sc.sc_ic.ic_if;
429 #if NBPFILTER > 0
430 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
431 	    sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
432 
433 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
434 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
435 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT);
436 
437 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
438 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
439 	sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT);
440 #endif
441 }
442 
443 int
444 rtwn_pci_detach(struct device *self, int flags)
445 {
446 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
447 	int s, i;
448 
449 	s = splnet();
450 
451 	if (timeout_initialized(&sc->calib_to))
452 		timeout_del(&sc->calib_to);
453 	if (timeout_initialized(&sc->scan_to))
454 		timeout_del(&sc->scan_to);
455 
456 	rtwn_detach(&sc->sc_sc, flags);
457 
458 	/* Free Tx/Rx buffers. */
459 	for (i = 0; i < RTWN_NTXQUEUES; i++)
460 		rtwn_free_tx_list(sc, i);
461 	rtwn_free_rx_list(sc);
462 	splx(s);
463 
464 	return (0);
465 }
466 
467 int
468 rtwn_pci_activate(struct device *self, int act)
469 {
470 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
471 
472 	return rtwn_activate(&sc->sc_sc, act);
473 }
474 
475 void
476 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc,
477     bus_addr_t addr, size_t len, int idx)
478 {
479 	memset(desc, 0, sizeof(*desc));
480 	desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) |
481 		((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0));
482 	desc->rxbufaddr = htole32(addr);
483 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
484 	    BUS_SPACE_BARRIER_WRITE);
485 	desc->rxdw0 |= htole32(R92C_RXDW0_OWN);
486 }
487 
488 int
489 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
490 {
491 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
492 	struct rtwn_rx_data *rx_data;
493 	size_t size;
494 	int i, error = 0;
495 
496 	/* Allocate Rx descriptors. */
497 	size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
498 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
499 		&rx_ring->map);
500 	if (error != 0) {
501 		printf("%s: could not create rx desc DMA map\n",
502 		    sc->sc_dev.dv_xname);
503 		rx_ring->map = NULL;
504 		goto fail;
505 	}
506 
507 	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
508 	    &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
509 	if (error != 0) {
510 		printf("%s: could not allocate rx desc\n",
511 		    sc->sc_dev.dv_xname);
512 		goto fail;
513 	}
514 
515 	error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
516 	    size, (caddr_t *)&rx_ring->desc,
517 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
518 	if (error != 0) {
519 		bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
520 		rx_ring->desc = NULL;
521 		printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
522 		goto fail;
523 	}
524 
525 	error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
526 	    1, size, BUS_DMA_NOWAIT);
527 	if (error != 0) {
528 		printf("%s: could not load rx desc\n",
529 		    sc->sc_dev.dv_xname);
530 		goto fail;
531 	}
532 
533 	bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
534 	    BUS_DMASYNC_PREWRITE);
535 
536 	/* Allocate Rx buffers. */
537 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
538 		rx_data = &rx_ring->rx_data[i];
539 
540 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
541 		    0, BUS_DMA_NOWAIT, &rx_data->map);
542 		if (error != 0) {
543 			printf("%s: could not create rx buf DMA map\n",
544 			    sc->sc_dev.dv_xname);
545 			goto fail;
546 		}
547 
548 		rx_data->m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
549 		if (rx_data->m == NULL) {
550 			printf("%s: could not allocate rx mbuf\n",
551 			    sc->sc_dev.dv_xname);
552 			error = ENOMEM;
553 			goto fail;
554 		}
555 
556 		error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
557 		    mtod(rx_data->m, void *), MCLBYTES, NULL,
558 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
559 		if (error != 0) {
560 			printf("%s: could not load rx buf DMA map\n",
561 			    sc->sc_dev.dv_xname);
562 			goto fail;
563 		}
564 
565 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
566 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
567 	}
568 fail:	if (error != 0)
569 		rtwn_free_rx_list(sc);
570 	return (error);
571 }
572 
573 void
574 rtwn_reset_rx_list(struct rtwn_pci_softc *sc)
575 {
576 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
577 	struct rtwn_rx_data *rx_data;
578 	int i;
579 
580 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
581 		rx_data = &rx_ring->rx_data[i];
582 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
583 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
584 	}
585 }
586 
587 void
588 rtwn_free_rx_list(struct rtwn_pci_softc *sc)
589 {
590 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
591 	struct rtwn_rx_data *rx_data;
592 	int i, s;
593 
594 	s = splnet();
595 
596 	if (rx_ring->map) {
597 		if (rx_ring->desc) {
598 			bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
599 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
600 			    sizeof (struct r92c_rx_desc_pci) *
601 			    RTWN_RX_LIST_COUNT);
602 			bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
603 			    rx_ring->nsegs);
604 			rx_ring->desc = NULL;
605 		}
606 		bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
607 		rx_ring->map = NULL;
608 	}
609 
610 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
611 		rx_data = &rx_ring->rx_data[i];
612 
613 		if (rx_data->m != NULL) {
614 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
615 			m_freem(rx_data->m);
616 			rx_data->m = NULL;
617 		}
618 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
619 		rx_data->map = NULL;
620 	}
621 
622 	splx(s);
623 }
624 
625 int
626 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid)
627 {
628 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
629 	struct rtwn_tx_data *tx_data;
630 	int i = 0, error = 0;
631 
632 	error = bus_dmamap_create(sc->sc_dmat,
633 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1,
634 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0,
635 	    BUS_DMA_NOWAIT, &tx_ring->map);
636 	if (error != 0) {
637 		printf("%s: could not create tx ring DMA map\n",
638 		    sc->sc_dev.dv_xname);
639 		goto fail;
640 	}
641 
642 	error = bus_dmamem_alloc(sc->sc_dmat,
643 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0,
644 	    &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
645 	if (error != 0) {
646 		printf("%s: could not allocate tx ring DMA memory\n",
647 		    sc->sc_dev.dv_xname);
648 		goto fail;
649 	}
650 
651 	error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
652 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT,
653 	    (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
654 	if (error != 0) {
655 		bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
656 		printf("%s: can't map tx ring DMA memory\n",
657 		    sc->sc_dev.dv_xname);
658 		goto fail;
659 	}
660 
661 	error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
662 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL,
663 	    BUS_DMA_NOWAIT);
664 	if (error != 0) {
665 		printf("%s: could not load tx ring DMA map\n",
666 		    sc->sc_dev.dv_xname);
667 		goto fail;
668 	}
669 
670 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
671 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
672 
673 		/* setup tx desc */
674 		desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
675 		  + sizeof(struct r92c_tx_desc_pci)
676 		  * ((i + 1) % RTWN_TX_LIST_COUNT));
677 
678 		tx_data = &tx_ring->tx_data[i];
679 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
680 		    0, BUS_DMA_NOWAIT, &tx_data->map);
681 		if (error != 0) {
682 			printf("%s: could not create tx buf DMA map\n",
683 			    sc->sc_dev.dv_xname);
684 			goto fail;
685 		}
686 		tx_data->m = NULL;
687 		tx_data->ni = NULL;
688 	}
689 fail:
690 	if (error != 0)
691 		rtwn_free_tx_list(sc, qid);
692 	return (error);
693 }
694 
695 void
696 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid)
697 {
698 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
699 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
700 	int i;
701 
702 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
703 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
704 		struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
705 
706 		memset(desc, 0, sizeof(*desc) -
707 		    (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) +
708 		    sizeof(desc->nextdescaddr)));
709 
710 		if (tx_data->m != NULL) {
711 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
712 			m_freem(tx_data->m);
713 			tx_data->m = NULL;
714 			ieee80211_release_node(ic, tx_data->ni);
715 			tx_data->ni = NULL;
716 		}
717 	}
718 
719 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
720 	    BUS_DMASYNC_POSTWRITE);
721 
722 	sc->qfullmsk &= ~(1 << qid);
723 	tx_ring->queued = 0;
724 	tx_ring->cur = 0;
725 }
726 
727 void
728 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid)
729 {
730 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
731 	struct rtwn_tx_data *tx_data;
732 	int i;
733 
734 	if (tx_ring->map != NULL) {
735 		if (tx_ring->desc != NULL) {
736 			bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
737 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
738 			    sizeof (struct r92c_tx_desc_pci) *
739 			    RTWN_TX_LIST_COUNT);
740 			bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
741 		}
742 		bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
743 	}
744 
745 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
746 		tx_data = &tx_ring->tx_data[i];
747 
748 		if (tx_data->m != NULL) {
749 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
750 			m_freem(tx_data->m);
751 			tx_data->m = NULL;
752 		}
753 		bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
754 	}
755 
756 	sc->qfullmsk &= ~(1 << qid);
757 	tx_ring->queued = 0;
758 	tx_ring->cur = 0;
759 }
760 
761 void
762 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val)
763 {
764 	struct rtwn_pci_softc *sc = cookie;
765 
766 	bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val);
767 }
768 
769 void
770 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val)
771 {
772 	struct rtwn_pci_softc *sc = cookie;
773 
774 	val = htole16(val);
775 	bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val);
776 }
777 
778 void
779 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val)
780 {
781 	struct rtwn_pci_softc *sc = cookie;
782 
783 	val = htole32(val);
784 	bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val);
785 }
786 
787 uint8_t
788 rtwn_pci_read_1(void *cookie, uint16_t addr)
789 {
790 	struct rtwn_pci_softc *sc = cookie;
791 
792 	return bus_space_read_1(sc->sc_st, sc->sc_sh, addr);
793 }
794 
795 uint16_t
796 rtwn_pci_read_2(void *cookie, uint16_t addr)
797 {
798 	struct rtwn_pci_softc *sc = cookie;
799 	uint16_t val;
800 
801 	val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr);
802 	return le16toh(val);
803 }
804 
805 uint32_t
806 rtwn_pci_read_4(void *cookie, uint16_t addr)
807 {
808 	struct rtwn_pci_softc *sc = cookie;
809 	uint32_t val;
810 
811 	val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr);
812 	return le32toh(val);
813 }
814 
815 void
816 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc,
817     struct rtwn_rx_data *rx_data, int desc_idx, struct mbuf_list *ml)
818 {
819 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
820 	struct ifnet *ifp = &ic->ic_if;
821 	struct ieee80211_rxinfo rxi;
822 	struct ieee80211_frame *wh;
823 	struct ieee80211_node *ni;
824 	struct r92c_rx_phystat *phy = NULL;
825 	uint32_t rxdw0, rxdw3;
826 	struct mbuf *m, *m1;
827 	uint8_t rate;
828 	int8_t rssi = 0;
829 	int infosz, pktlen, shift, error;
830 
831 	rxdw0 = letoh32(rx_desc->rxdw0);
832 	rxdw3 = letoh32(rx_desc->rxdw3);
833 
834 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
835 		int ntries, type;
836 		struct r88e_tx_rpt_ccx *rxstat;
837 
838 		type = MS(rxdw3, R88E_RXDW3_RPT);
839 		if (type == R88E_RXDW3_RPT_TX1) {
840 			uint32_t rptb1, rptb2;
841 
842 			rxstat = mtod(rx_data->m, struct r88e_tx_rpt_ccx *);
843 			rptb1 = letoh32(rxstat->rptb1);
844 			rptb2 = letoh32(rxstat->rptb2);
845 			ntries = MS(rptb2, R88E_RPTB2_RETRY_CNT);
846 			if (rptb1 & R88E_RPTB1_PKT_OK)
847 				sc->amn.amn_txcnt++;
848 			if (ntries > 0)
849 				sc->amn.amn_retrycnt++;
850 
851 			rtwn_setup_rx_desc(sc, rx_desc,
852 			    rx_data->map->dm_segs[0].ds_addr, MCLBYTES,
853 			    desc_idx);
854 			return;
855 		}
856 	}
857 
858 	if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
859 		/*
860 		 * This should not happen since we setup our Rx filter
861 		 * to not receive these frames.
862 		 */
863 		ifp->if_ierrors++;
864 		return;
865 	}
866 
867 	pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
868 	if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
869 		ifp->if_ierrors++;
870 		return;
871 	}
872 
873 	rate = MS(rxdw3, R92C_RXDW3_RATE);
874 	infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
875 	if (infosz > sizeof(struct r92c_rx_phystat))
876 		infosz = sizeof(struct r92c_rx_phystat);
877 	shift = MS(rxdw0, R92C_RXDW0_SHIFT);
878 
879 	/* Get RSSI from PHY status descriptor if present. */
880 	if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
881 		phy = mtod(rx_data->m, struct r92c_rx_phystat *);
882 		rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
883 		/* Update our average RSSI. */
884 		rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
885 	}
886 
887 	DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
888 	    pktlen, rate, infosz, shift, rssi));
889 
890 	m1 = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
891 	if (m1 == NULL) {
892 		ifp->if_ierrors++;
893 		return;
894 	}
895 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
896 	error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
897 	    mtod(m1, void *), MCLBYTES, NULL,
898 	    BUS_DMA_NOWAIT | BUS_DMA_READ);
899 	if (error != 0) {
900 		m_freem(m1);
901 
902 		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
903 		    rx_data->m, BUS_DMA_NOWAIT))
904 			panic("%s: could not load old RX mbuf",
905 			    sc->sc_dev.dv_xname);
906 
907 		/* Physical address may have changed. */
908 		rtwn_setup_rx_desc(sc, rx_desc,
909 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx);
910 
911 		ifp->if_ierrors++;
912 		return;
913 	}
914 
915 	/* Finalize mbuf. */
916 	m = rx_data->m;
917 	rx_data->m = m1;
918 	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
919 
920 	/* Update RX descriptor. */
921 	rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr,
922 	    MCLBYTES, desc_idx);
923 
924 	/* Get ieee80211 frame header. */
925 	if (rxdw0 & R92C_RXDW0_PHYST)
926 		m_adj(m, infosz + shift);
927 	else
928 		m_adj(m, shift);
929 	wh = mtod(m, struct ieee80211_frame *);
930 
931 #if NBPFILTER > 0
932 	if (__predict_false(sc->sc_drvbpf != NULL)) {
933 		struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
934 		struct mbuf mb;
935 
936 		tap->wr_flags = 0;
937 		/* Map HW rate index to 802.11 rate. */
938 		tap->wr_flags = 2;
939 		if (!(rxdw3 & R92C_RXDW3_HT)) {
940 			switch (rate) {
941 			/* CCK. */
942 			case  0: tap->wr_rate =   2; break;
943 			case  1: tap->wr_rate =   4; break;
944 			case  2: tap->wr_rate =  11; break;
945 			case  3: tap->wr_rate =  22; break;
946 			/* OFDM. */
947 			case  4: tap->wr_rate =  12; break;
948 			case  5: tap->wr_rate =  18; break;
949 			case  6: tap->wr_rate =  24; break;
950 			case  7: tap->wr_rate =  36; break;
951 			case  8: tap->wr_rate =  48; break;
952 			case  9: tap->wr_rate =  72; break;
953 			case 10: tap->wr_rate =  96; break;
954 			case 11: tap->wr_rate = 108; break;
955 			}
956 		} else if (rate >= 12) {	/* MCS0~15. */
957 			/* Bit 7 set means HT MCS instead of rate. */
958 			tap->wr_rate = 0x80 | (rate - 12);
959 		}
960 		tap->wr_dbm_antsignal = rssi;
961 		tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq);
962 		tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags);
963 
964 		mb.m_data = (caddr_t)tap;
965 		mb.m_len = sc->sc_rxtap_len;
966 		mb.m_next = m;
967 		mb.m_nextpkt = NULL;
968 		mb.m_type = 0;
969 		mb.m_flags = 0;
970 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
971 	}
972 #endif
973 
974 	ni = ieee80211_find_rxnode(ic, wh);
975 	memset(&rxi, 0, sizeof(rxi));
976 	rxi.rxi_rssi = rssi;
977 	ieee80211_inputm(ifp, m, ni, &rxi, ml);
978 	/* Node is no longer needed. */
979 	ieee80211_release_node(ic, ni);
980 }
981 
982 int
983 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni)
984 {
985 	struct rtwn_pci_softc *sc = cookie;
986 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
987 	struct ieee80211_frame *wh;
988 	struct ieee80211_key *k = NULL;
989 	struct rtwn_tx_ring *tx_ring;
990 	struct rtwn_tx_data *data;
991 	struct r92c_tx_desc_pci *txd;
992 	uint16_t qos;
993 	uint8_t raid, type, tid, qid;
994 	int hasqos, error;
995 
996 	wh = mtod(m, struct ieee80211_frame *);
997 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
998 
999 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1000 		k = ieee80211_get_txkey(ic, wh, ni);
1001 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
1002 			return (ENOBUFS);
1003 		wh = mtod(m, struct ieee80211_frame *);
1004 	}
1005 
1006 	if ((hasqos = ieee80211_has_qos(wh))) {
1007 		qos = ieee80211_get_qos(wh);
1008 		tid = qos & IEEE80211_QOS_TID;
1009 		qid = ieee80211_up_to_ac(ic, tid);
1010 	} else if (type != IEEE80211_FC0_TYPE_DATA) {
1011 		qid = RTWN_VO_QUEUE;
1012 	} else
1013 		qid = RTWN_BE_QUEUE;
1014 
1015 	/* Grab a Tx buffer from the ring. */
1016 	tx_ring = &sc->tx_ring[qid];
1017 	data = &tx_ring->tx_data[tx_ring->cur];
1018 	if (data->m != NULL) {
1019 		m_freem(m);
1020 		return (ENOBUFS);
1021 	}
1022 
1023 	/* Fill Tx descriptor. */
1024 	txd = &tx_ring->desc[tx_ring->cur];
1025 	if (htole32(txd->txdw0) & R92C_TXDW0_OWN) {
1026 		m_freem(m);
1027 		return (ENOBUFS);
1028 	}
1029 	txd->txdw0 = htole32(
1030 	    SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) |
1031 	    SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
1032 	    R92C_TXDW0_FSG | R92C_TXDW0_LSG);
1033 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
1034 		txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
1035 
1036 	txd->txdw1 = 0;
1037 #ifdef notyet
1038 	if (k != NULL) {
1039 		switch (k->k_cipher) {
1040 		case IEEE80211_CIPHER_WEP40:
1041 		case IEEE80211_CIPHER_WEP104:
1042 		case IEEE80211_CIPHER_TKIP:
1043 			cipher = R92C_TXDW1_CIPHER_RC4;
1044 			break;
1045 		case IEEE80211_CIPHER_CCMP:
1046 			cipher = R92C_TXDW1_CIPHER_AES;
1047 			break;
1048 		default:
1049 			cipher = R92C_TXDW1_CIPHER_NONE;
1050 		}
1051 		txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher));
1052 	}
1053 #endif
1054 	txd->txdw4 = 0;
1055 	txd->txdw5 = 0;
1056 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
1057 	    type == IEEE80211_FC0_TYPE_DATA) {
1058 		if (ic->ic_curmode == IEEE80211_MODE_11B ||
1059 		    (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B))
1060 			raid = R92C_RAID_11B;
1061 		else
1062 			raid = R92C_RAID_11BG;
1063 
1064 		if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1065 			txd->txdw1 |= htole32(
1066 			    SM(R88E_TXDW1_MACID, R92C_MACID_BSS) |
1067 			    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1068 			    SM(R92C_TXDW1_RAID, raid));
1069 			txd->txdw2 |= htole32(R88E_TXDW2_AGGBK);
1070 		} else {
1071 			txd->txdw1 |= htole32(
1072 			    SM(R92C_TXDW1_MACID, R92C_MACID_BSS) |
1073 			    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1074 			    SM(R92C_TXDW1_RAID, raid) |
1075 			    R92C_TXDW1_AGGBK);
1076 		}
1077 
1078 		/* Request TX status report for AMRR. */
1079 		txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
1080 
1081 		if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
1082 			txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1083 			    R92C_TXDW4_HWRTSEN);
1084 		} else if (ic->ic_flags & IEEE80211_F_USEPROT) {
1085 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1086 				txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
1087 				    R92C_TXDW4_HWRTSEN);
1088 			} else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1089 				txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1090 				    R92C_TXDW4_HWRTSEN);
1091 			}
1092 		}
1093 
1094 		if (ic->ic_curmode == IEEE80211_MODE_11B)
1095 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0));
1096 		else
1097 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8));
1098 		txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf));
1099 
1100 		/* Use AMMR rate for data. */
1101 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1102 		if (ic->ic_fixed_rate != -1)
1103 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1104 			    ic->ic_fixed_rate));
1105 		else
1106 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1107 			    ni->ni_txrate));
1108 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f));
1109 	} else {
1110 		txd->txdw1 |= htole32(
1111 		    SM(R92C_TXDW1_MACID, 0) |
1112 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
1113 		    SM(R92C_TXDW1_RAID, R92C_RAID_11B));
1114 
1115 		/* Force CCK1. */
1116 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1117 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
1118 	}
1119 	/* Set sequence number (already little endian). */
1120 	txd->txdseq = (*(uint16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
1121 	if (sc->sc_sc.chip & RTWN_CHIP_23A)
1122 		txd->txdseq |= htole16(R23A_TXDW3_TXRPTEN);
1123 
1124 	if (!hasqos) {
1125 		/* Use HW sequence numbering for non-QoS frames. */
1126 		if (!(sc->sc_sc.chip & RTWN_CHIP_23A))
1127 			txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ);
1128 		txd->txdseq |= htole16(R92C_TXDW3_HWSEQEN);
1129 	} else
1130 		txd->txdw4 |= htole32(R92C_TXDW4_QOS);
1131 
1132 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1133 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1134 	if (error && error != EFBIG) {
1135 		printf("%s: can't map mbuf (error %d)\n",
1136 		    sc->sc_dev.dv_xname, error);
1137 		m_freem(m);
1138 		return error;
1139 	}
1140 	if (error != 0) {
1141 		/* Too many DMA segments, linearize mbuf. */
1142 		if (m_defrag(m, M_DONTWAIT)) {
1143 			m_freem(m);
1144 			return ENOBUFS;
1145 		}
1146 
1147 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1148 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1149 		if (error != 0) {
1150 			printf("%s: can't map mbuf (error %d)\n",
1151 			    sc->sc_dev.dv_xname, error);
1152 			m_freem(m);
1153 			return error;
1154 		}
1155 	}
1156 
1157 	txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr);
1158 	txd->txbufsize = htole16(m->m_pkthdr.len);
1159 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
1160 	    BUS_SPACE_BARRIER_WRITE);
1161 	txd->txdw0 |= htole32(R92C_TXDW0_OWN);
1162 
1163 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1164 	    BUS_DMASYNC_POSTWRITE);
1165 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
1166 	    BUS_DMASYNC_POSTWRITE);
1167 
1168 	data->m = m;
1169 	data->ni = ni;
1170 
1171 #if NBPFILTER > 0
1172 	if (__predict_false(sc->sc_drvbpf != NULL)) {
1173 		struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
1174 		struct mbuf mb;
1175 
1176 		tap->wt_flags = 0;
1177 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1178 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1179 
1180 		mb.m_data = (caddr_t)tap;
1181 		mb.m_len = sc->sc_txtap_len;
1182 		mb.m_next = m;
1183 		mb.m_nextpkt = NULL;
1184 		mb.m_type = 0;
1185 		mb.m_flags = 0;
1186 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1187 	}
1188 #endif
1189 
1190 	tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
1191 	tx_ring->queued++;
1192 
1193 	if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
1194 		sc->qfullmsk |= (1 << qid);
1195 
1196 	/* Kick TX. */
1197 	rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid));
1198 
1199 	return (0);
1200 }
1201 
1202 void
1203 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid)
1204 {
1205 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1206 	struct ifnet *ifp = &ic->ic_if;
1207 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
1208 	struct rtwn_tx_data *tx_data;
1209 	struct r92c_tx_desc_pci *tx_desc;
1210 	int i;
1211 
1212 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1213 	    BUS_DMASYNC_POSTREAD);
1214 
1215 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
1216 		tx_data = &tx_ring->tx_data[i];
1217 		if (tx_data->m == NULL)
1218 			continue;
1219 
1220 		tx_desc = &tx_ring->desc[i];
1221 		if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN)
1222 			continue;
1223 
1224 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
1225 		m_freem(tx_data->m);
1226 		tx_data->m = NULL;
1227 		ieee80211_release_node(ic, tx_data->ni);
1228 		tx_data->ni = NULL;
1229 
1230 		sc->sc_sc.sc_tx_timer = 0;
1231 		tx_ring->queued--;
1232 
1233 		if (!(sc->sc_sc.chip & RTWN_CHIP_23A))
1234 			rtwn_poll_c2h_events(sc);
1235 	}
1236 
1237 	if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
1238 		sc->qfullmsk &= ~(1 << qid);
1239 
1240 	if (sc->qfullmsk == 0) {
1241 		ifq_clr_oactive(&ifp->if_snd);
1242 		(*ifp->if_start)(ifp);
1243 	}
1244 }
1245 
1246 int
1247 rtwn_alloc_buffers(void *cookie)
1248 {
1249 	/* Tx/Rx buffers were already allocated in rtwn_pci_attach() */
1250 	return (0);
1251 }
1252 
1253 int
1254 rtwn_pci_init(void *cookie)
1255 {
1256 	struct rtwn_pci_softc *sc = cookie;
1257 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
1258 
1259 	/* Enable TX reports for AMRR */
1260 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1261 		rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL,
1262 		    (rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) & ~0) |
1263 		    R88E_TX_RPT_CTRL_EN);
1264 		rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL + 1, 0x02);
1265 
1266 		rtwn_pci_write_2(sc, R88E_TX_RPT_TIME, 0xcdf0);
1267 	}
1268 
1269 	return (0);
1270 }
1271 
1272 void
1273 rtwn_pci_92c_stop(struct rtwn_pci_softc *sc)
1274 {
1275 	uint16_t reg;
1276 
1277 	/* Disable interrupts. */
1278 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1279 
1280 	/* Stop hardware. */
1281 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1282 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1283 	reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN);
1284 	reg |= R92C_SYS_FUNC_EN_BB_GLB_RST;
1285 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1286 	reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST;
1287 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1288 	reg = rtwn_pci_read_2(sc, R92C_CR);
1289 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1290 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1291 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1292 	    R92C_CR_ENSEC);
1293 	rtwn_pci_write_2(sc, R92C_CR, reg);
1294 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1295 		rtwn_fw_reset(&sc->sc_sc);
1296 	/* TODO: linux does additional btcoex stuff here */
1297 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */
1298 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */
1299 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */
1300 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1301 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1302 	rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN);
1303 }
1304 
1305 void
1306 rtwn_pci_88e_stop(struct rtwn_pci_softc *sc)
1307 {
1308 	int i;
1309 	uint16_t reg;
1310 
1311 	/* Disable interrupts. */
1312 	rtwn_pci_write_4(sc, R88E_HIMR, 0x00000000);
1313 
1314 	/* Stop hardware. */
1315 	rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL,
1316 	    rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) &
1317 	    ~(R88E_TX_RPT_CTRL_EN));
1318 
1319 	for (i = 0; i < 100; i++) {
1320 		if (rtwn_pci_read_1(sc, R88E_RXDMA_CTRL) & 0x02)
1321 			break;
1322 		DELAY(10);
1323 	}
1324 	if (i == 100)
1325 		DPRINTF(("rxdma ctrl didn't go off, %x\n", rtwn_pci_read_1(sc, R88E_RXDMA_CTRL)));
1326 
1327 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff);
1328 
1329 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1330 
1331 	/* ensure transmission has stopped */
1332 	for (i = 0; i < 100; i++) {
1333 		if (rtwn_pci_read_4(sc, 0x5f8) == 0)
1334 			break;
1335 		DELAY(10);
1336 	}
1337 	if (i == 100)
1338 		DPRINTF(("tx didn't stop\n"));
1339 
1340 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1341 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1342 	    ~(R92C_SYS_FUNC_EN_BBRSTB));
1343 	DELAY(1);
1344 	reg = rtwn_pci_read_2(sc, R92C_CR);
1345 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1346 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1347 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1348 	    R92C_CR_ENSEC);
1349 	rtwn_pci_write_2(sc, R92C_CR, reg);
1350 	rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST,
1351 	    rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20);
1352 
1353 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1354 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1355 		rtwn_fw_reset(&sc->sc_sc);
1356 
1357 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1,
1358 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~0x02);
1359 	rtwn_pci_write_1(sc, R92C_MCUFWDL, 0);
1360 
1361 	rtwn_pci_write_1(sc, R88E_32K_CTRL,
1362 	    rtwn_pci_read_1(sc, R88E_32K_CTRL) & ~(0x01));
1363 
1364 	/* transition to cardemu state */
1365 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0);
1366 	rtwn_pci_write_1(sc, R92C_LPLDO_CTRL,
1367 	    rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) | 0x10);
1368 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1369 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_OFF);
1370 	for (i = 0; i < 100; i++) {
1371 		if ((rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1372 		    R92C_APS_FSMCO_APFM_OFF) == 0)
1373 			break;
1374 		DELAY(10);
1375 	}
1376 	if (i == 100)
1377 		DPRINTF(("apfm off didn't go off\n"));
1378 
1379 	/* transition to card disabled state */
1380 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
1381 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
1382 
1383 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1384 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08);
1385 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1386 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08);
1387 
1388 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1389 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1390 }
1391 
1392 void
1393 rtwn_pci_23a_stop(struct rtwn_pci_softc *sc)
1394 {
1395 	int i;
1396 
1397 	/* Disable interrupts. */
1398 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1399 
1400 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff);
1401 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1402 
1403 	/* ensure transmission has stopped */
1404 	for (i = 0; i < 100; i++) {
1405 		if (rtwn_pci_read_4(sc, 0x5f8) == 0)
1406 			break;
1407 		DELAY(10);
1408 	}
1409 	if (i == 100)
1410 		DPRINTF(("tx didn't stop\n"));
1411 
1412 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1413 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1414 	    ~(R92C_SYS_FUNC_EN_BBRSTB));
1415 	DELAY(1);
1416 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1417 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1418 	    ~(R92C_SYS_FUNC_EN_BB_GLB_RST));
1419 
1420 	rtwn_pci_write_2(sc, R92C_CR,
1421 	    rtwn_pci_read_2(sc, R92C_CR) &
1422 	    ~(R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSWBCN));
1423 
1424 	rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST,
1425 	    rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20);
1426 
1427 	/* Turn off RF */
1428 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1429 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1430 		rtwn_fw_reset(&sc->sc_sc);
1431 
1432 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1,
1433 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~R92C_SYS_FUNC_EN_DIOE);
1434 	rtwn_pci_write_1(sc, R92C_MCUFWDL, 0);
1435 
1436 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1437 	rtwn_pci_write_1(sc, R92C_LEDCFG2, rtwn_pci_read_1(sc, R92C_LEDCFG2) & ~(0x80));
1438 	rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) |
1439 	    R92C_APS_FSMCO_APFM_OFF);
1440 	rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1441 	    ~(R92C_APS_FSMCO_APFM_OFF));
1442 
1443 	rtwn_pci_write_4(sc, R92C_APS_FSMCO,
1444 	    rtwn_pci_read_4(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_RDY_MACON);
1445 	rtwn_pci_write_4(sc, R92C_APS_FSMCO,
1446 	    rtwn_pci_read_4(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APDM_HPDN);
1447 
1448 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1449 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08);
1450 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1451 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08);
1452 
1453 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1454 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1455 }
1456 
1457 void
1458 rtwn_pci_stop(void *cookie)
1459 {
1460 	struct rtwn_pci_softc *sc = cookie;
1461 	int i, s;
1462 
1463 	s = splnet();
1464 
1465 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1466 		rtwn_pci_88e_stop(sc);
1467 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
1468 		rtwn_pci_23a_stop(sc);
1469 	} else {
1470 		rtwn_pci_92c_stop(sc);
1471 	}
1472 
1473 	for (i = 0; i < RTWN_NTXQUEUES; i++)
1474 		rtwn_reset_tx_list(sc, i);
1475 	rtwn_reset_rx_list(sc);
1476 
1477 	splx(s);
1478 }
1479 
1480 int
1481 rtwn_88e_intr(struct rtwn_pci_softc *sc)
1482 {
1483 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1484 	u_int32_t status, estatus;
1485 	int i;
1486 
1487 	status = rtwn_pci_read_4(sc, R88E_HISR);
1488 	if (status == 0 || status == 0xffffffff)
1489 		return (0);
1490 
1491 	estatus = rtwn_pci_read_4(sc, R88E_HISRE);
1492 
1493 	status &= RTWN_88E_INT_ENABLE;
1494 	estatus &= R88E_HIMRE_RXFOVW;
1495 
1496 	rtwn_pci_write_4(sc, R88E_HIMR, 0);
1497 	rtwn_pci_write_4(sc, R88E_HIMRE, 0);
1498 	rtwn_pci_write_4(sc, R88E_HISR, status);
1499 	rtwn_pci_write_4(sc, R88E_HISRE, estatus);
1500 
1501 	if (status & R88E_HIMR_HIGHDOK)
1502 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1503 	if (status & R88E_HIMR_MGNTDOK)
1504 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1505 	if (status & R88E_HIMR_BKDOK)
1506 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1507 	if (status & R88E_HIMR_BEDOK)
1508 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1509 	if (status & R88E_HIMR_VIDOK)
1510 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1511 	if (status & R88E_HIMR_VODOK)
1512 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1513 	if ((status & (R88E_HIMR_ROK | R88E_HIMR_RDU)) ||
1514 	    (estatus & R88E_HIMRE_RXFOVW)) {
1515 		struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1516 
1517 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1518 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1519 		    BUS_DMASYNC_POSTREAD);
1520 
1521 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1522 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1523 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1524 
1525 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1526 				continue;
1527 
1528 			rtwn_rx_frame(sc, rx_desc, rx_data, i, &ml);
1529 		}
1530 		if_input(&ic->ic_if, &ml);
1531 	}
1532 
1533 	if (status & R88E_HIMR_HSISR_IND_ON_INT) {
1534 		rtwn_pci_write_1(sc, R92C_HSISR,
1535 		    rtwn_pci_read_1(sc, R92C_HSISR) |
1536 		    R88E_HSIMR_PDN_INT_EN | R88E_HSIMR_RON_INT_EN);
1537 	}
1538 
1539 	/* Enable interrupts. */
1540 	rtwn_pci_write_4(sc, R88E_HIMR, RTWN_88E_INT_ENABLE);
1541 	rtwn_pci_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW);
1542 
1543 	return (1);
1544 }
1545 
1546 int
1547 rtwn_intr(void *xsc)
1548 {
1549 	struct rtwn_pci_softc *sc = xsc;
1550 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1551 	u_int32_t status;
1552 	int i;
1553 
1554 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1555 		return (rtwn_88e_intr(sc));
1556 
1557 	status = rtwn_pci_read_4(sc, R92C_HISR);
1558 	if (status == 0 || status == 0xffffffff)
1559 		return (0);
1560 
1561 	/* Disable interrupts. */
1562 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1563 
1564 	/* Ack interrupts. */
1565 	rtwn_pci_write_4(sc, R92C_HISR, status);
1566 
1567 	/* Vendor driver treats RX errors like ROK... */
1568 	if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) {
1569 		struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1570 
1571 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1572 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1573 		    BUS_DMASYNC_POSTREAD);
1574 
1575 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1576 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1577 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1578 
1579 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1580 				continue;
1581 
1582 			rtwn_rx_frame(sc, rx_desc, rx_data, i, &ml);
1583 		}
1584 		if_input(&ic->ic_if, &ml);
1585 	}
1586 
1587 	if (status & R92C_IMR_BDOK)
1588 		rtwn_tx_done(sc, RTWN_BEACON_QUEUE);
1589 	if (status & R92C_IMR_HIGHDOK)
1590 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1591 	if (status & R92C_IMR_MGNTDOK)
1592 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1593 	if (status & R92C_IMR_BKDOK)
1594 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1595 	if (status & R92C_IMR_BEDOK)
1596 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1597 	if (status & R92C_IMR_VIDOK)
1598 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1599 	if (status & R92C_IMR_VODOK)
1600 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1601 
1602 	if (sc->sc_sc.chip & RTWN_CHIP_23A) {
1603 		if (status & R92C_IMR_ATIMEND)
1604 			rtwn_poll_c2h_events(sc);
1605 	}
1606 
1607 	/* Enable interrupts. */
1608 	rtwn_pci_write_4(sc, R92C_HIMR, RTWN_92C_INT_ENABLE);
1609 
1610 	return (1);
1611 }
1612 
1613 int
1614 rtwn_is_oactive(void *cookie)
1615 {
1616 	struct rtwn_pci_softc *sc = cookie;
1617 
1618 	return (sc->qfullmsk != 0);
1619 }
1620 
1621 int
1622 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data)
1623 {
1624 	int ntries;
1625 
1626 	rtwn_pci_write_4(sc, R92C_LLT_INIT,
1627 	    SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
1628 	    SM(R92C_LLT_INIT_ADDR, addr) |
1629 	    SM(R92C_LLT_INIT_DATA, data));
1630 	/* Wait for write operation to complete. */
1631 	for (ntries = 0; ntries < 20; ntries++) {
1632 		if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
1633 		    R92C_LLT_INIT_OP_NO_ACTIVE)
1634 			return (0);
1635 		DELAY(5);
1636 	}
1637 	return (ETIMEDOUT);
1638 }
1639 
1640 int
1641 rtwn_llt_init(struct rtwn_pci_softc *sc, int page_count)
1642 {
1643 	int i, error, pktbuf_count;
1644 
1645 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1646 		pktbuf_count = R88E_TXPKTBUF_COUNT;
1647 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
1648 		pktbuf_count = R23A_TXPKTBUF_COUNT;
1649 	else
1650 		pktbuf_count = R92C_TXPKTBUF_COUNT;
1651 
1652 	/* Reserve pages [0; page_count]. */
1653 	for (i = 0; i < page_count; i++) {
1654 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1655 			return (error);
1656 	}
1657 	/* NB: 0xff indicates end-of-list. */
1658 	if ((error = rtwn_llt_write(sc, i, 0xff)) != 0)
1659 		return (error);
1660 	/*
1661 	 * Use pages [page_count + 1; pktbuf_count - 1]
1662 	 * as ring buffer.
1663 	 */
1664 	for (++i; i < pktbuf_count - 1; i++) {
1665 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1666 			return (error);
1667 	}
1668 	/* Make the last page point to the beginning of the ring buffer. */
1669 	error = rtwn_llt_write(sc, i, pktbuf_count + 1);
1670 	return (error);
1671 }
1672 
1673 int
1674 rtwn_92c_power_on(struct rtwn_pci_softc *sc)
1675 {
1676 	uint32_t reg;
1677 	int ntries;
1678 
1679 	/* Wait for autoload done bit. */
1680 	for (ntries = 0; ntries < 1000; ntries++) {
1681 		if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) &
1682 		    R92C_APS_FSMCO_PFM_ALDN)
1683 			break;
1684 		DELAY(5);
1685 	}
1686 	if (ntries == 1000) {
1687 		printf("%s: timeout waiting for chip autoload\n",
1688 		    sc->sc_dev.dv_xname);
1689 		return (ETIMEDOUT);
1690 	}
1691 
1692 	/* Unlock ISO/CLK/Power control register. */
1693 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1694 
1695 	/* TODO: check if we need this for 8188CE */
1696 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1697 		/* bt coex */
1698 		reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO);
1699 		reg |= (R92C_APS_FSMCO_SOP_ABG |
1700 			R92C_APS_FSMCO_SOP_AMB |
1701 			R92C_APS_FSMCO_XOP_BTCK);
1702 		rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg);
1703 	}
1704 
1705 	/* Move SPS into PWM mode. */
1706 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b);
1707 	DELAY(100);
1708 
1709 	/* Set low byte to 0x0f, leave others unchanged. */
1710 	rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL,
1711 	    (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f);
1712 
1713 	/* TODO: check if we need this for 8188CE */
1714 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1715 		/* bt coex */
1716 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL);
1717 		reg &= (~0x00024800); /* XXX magic from linux */
1718 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg);
1719 	}
1720 
1721 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1722 	  (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) |
1723 	  R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR);
1724 	DELAY(200);
1725 
1726 	/* TODO: linux does additional btcoex stuff here */
1727 
1728 	/* Auto enable WLAN. */
1729 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1730 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1731 	for (ntries = 0; ntries < 1000; ntries++) {
1732 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1733 		    R92C_APS_FSMCO_APFM_ONMAC))
1734 			break;
1735 		DELAY(5);
1736 	}
1737 	if (ntries == 1000) {
1738 		printf("%s: timeout waiting for MAC auto ON\n",
1739 		    sc->sc_dev.dv_xname);
1740 		return (ETIMEDOUT);
1741 	}
1742 
1743 	/* Enable radio, GPIO and LED functions. */
1744 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1745 	    R92C_APS_FSMCO_AFSM_PCIE |
1746 	    R92C_APS_FSMCO_PDN_EN |
1747 	    R92C_APS_FSMCO_PFM_ALDN);
1748 	/* Release RF digital isolation. */
1749 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1750 	    rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
1751 
1752 	if (sc->sc_sc.chip & RTWN_CHIP_92C)
1753 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77);
1754 	else
1755 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22);
1756 
1757 	rtwn_pci_write_4(sc, R92C_INT_MIG, 0);
1758 
1759 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1760 		/* bt coex */
1761 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2);
1762 		reg &= 0xfd; /* XXX magic from linux */
1763 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg);
1764 	}
1765 
1766 	rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG,
1767 	    rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL);
1768 
1769 	reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL);
1770 	if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) {
1771 		printf("%s: radio is disabled by hardware switch\n",
1772 		    sc->sc_dev.dv_xname);
1773 		return (EPERM);	/* :-) */
1774 	}
1775 
1776 	/* Initialize MAC. */
1777 	rtwn_pci_write_1(sc, R92C_APSD_CTRL,
1778 	    rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
1779 	for (ntries = 0; ntries < 200; ntries++) {
1780 		if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) &
1781 		    R92C_APSD_CTRL_OFF_STATUS))
1782 			break;
1783 		DELAY(500);
1784 	}
1785 	if (ntries == 200) {
1786 		printf("%s: timeout waiting for MAC initialization\n",
1787 		    sc->sc_dev.dv_xname);
1788 		return (ETIMEDOUT);
1789 	}
1790 
1791 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1792 	reg = rtwn_pci_read_2(sc, R92C_CR);
1793 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1794 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1795 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1796 	    R92C_CR_ENSEC;
1797 	rtwn_pci_write_2(sc, R92C_CR, reg);
1798 
1799 	rtwn_pci_write_1(sc, 0xfe10, 0x19);
1800 
1801 	return (0);
1802 }
1803 
1804 int
1805 rtwn_88e_power_on(struct rtwn_pci_softc *sc)
1806 {
1807 	uint32_t reg;
1808 	int ntries;
1809 
1810 	/* Disable XTAL output for power saving. */
1811 	rtwn_pci_write_1(sc, R88E_XCK_OUT_CTRL,
1812 	    rtwn_pci_read_1(sc, R88E_XCK_OUT_CTRL) & ~R88E_XCK_OUT_CTRL_EN);
1813 
1814 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1815 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & (~R92C_APS_FSMCO_APDM_HPDN));
1816 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1817 
1818 	/* Wait for power ready bit. */
1819 	for (ntries = 0; ntries < 5000; ntries++) {
1820 		if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
1821 			break;
1822 		DELAY(10);
1823 	}
1824 	if (ntries == 5000) {
1825 		printf("%s: timeout waiting for chip power up\n",
1826 		    sc->sc_dev.dv_xname);
1827 		return (ETIMEDOUT);
1828 	}
1829 
1830 	/* Reset BB. */
1831 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1832 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB |
1833 	    R92C_SYS_FUNC_EN_BB_GLB_RST));
1834 
1835 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
1836 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
1837 
1838 	/* Disable HWPDN. */
1839 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1840 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
1841 	/* Disable WL suspend. */
1842 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1843 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1844 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1845 
1846 	/* Auto enable WLAN. */
1847 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1848 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1849 	for (ntries = 0; ntries < 5000; ntries++) {
1850 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1851 		    R92C_APS_FSMCO_APFM_ONMAC))
1852 			break;
1853 		DELAY(10);
1854 	}
1855 	if (ntries == 5000) {
1856 		printf("%s: timeout waiting for MAC auto ON\n",
1857 		    sc->sc_dev.dv_xname);
1858 		return (ETIMEDOUT);
1859 	}
1860 
1861 	/* Enable LDO normal mode. */
1862 	rtwn_pci_write_1(sc, R92C_LPLDO_CTRL,
1863 	    rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) & ~0x10);
1864 
1865 	rtwn_pci_write_1(sc, R92C_APS_FSMCO,
1866 	    rtwn_pci_read_1(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_PDN_EN);
1867 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2,
1868 	    rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04);
1869 
1870 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1,
1871 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1) | 0x02);
1872 
1873 	rtwn_pci_write_1(sc, R92C_SYS_CLKR,
1874 	    rtwn_pci_read_1(sc, R92C_SYS_CLKR) | 0x08);
1875 
1876 	rtwn_pci_write_2(sc, R92C_GPIO_MUXCFG,
1877 	    rtwn_pci_read_2(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENSIC);
1878 
1879 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1880 	rtwn_pci_write_2(sc, R92C_CR, 0);
1881 	reg = rtwn_pci_read_2(sc, R92C_CR);
1882 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1883 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1884 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1885 	    R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
1886 	rtwn_pci_write_2(sc, R92C_CR, reg);
1887 
1888 	rtwn_pci_write_1(sc, R92C_MSR, 0);
1889 	return (0);
1890 }
1891 
1892 int
1893 rtwn_23a_power_on(struct rtwn_pci_softc *sc)
1894 {
1895 	uint32_t reg;
1896 	int ntries;
1897 
1898 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x00);
1899 
1900 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1901 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1902 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1903 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0x00);
1904 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1905 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APFM_RSM);
1906 
1907 	/* Wait for power ready bit. */
1908 	for (ntries = 0; ntries < 5000; ntries++) {
1909 		if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
1910 			break;
1911 		DELAY(10);
1912 	}
1913 	if (ntries == 5000) {
1914 		printf("%s: timeout waiting for chip power up\n",
1915 		    sc->sc_dev.dv_xname);
1916 		return (ETIMEDOUT);
1917 	}
1918 
1919 	/* Release WLON reset */
1920 	rtwn_pci_write_4(sc, R92C_APS_FSMCO, rtwn_pci_read_4(sc, R92C_APS_FSMCO) |
1921 	    R92C_APS_FSMCO_RDY_MACON);
1922 	/* Disable HWPDN. */
1923 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1924 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
1925 	/* Disable WL suspend. */
1926 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1927 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1928 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1929 
1930 	/* Auto enable WLAN. */
1931 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1932 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1933 	for (ntries = 0; ntries < 5000; ntries++) {
1934 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1935 		    R92C_APS_FSMCO_APFM_ONMAC))
1936 			break;
1937 		DELAY(10);
1938 	}
1939 	if (ntries == 5000) {
1940 		printf("%s: timeout waiting for MAC auto ON (%x)\n",
1941 		    sc->sc_dev.dv_xname, rtwn_pci_read_2(sc, R92C_APS_FSMCO));
1942 		return (ETIMEDOUT);
1943 	}
1944 
1945 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2,
1946 	    rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04);
1947 
1948 	/* emac time out */
1949 	rtwn_pci_write_1(sc, 0x369, rtwn_pci_read_1(sc, 0x369) | 0x80);
1950 
1951 	for (ntries = 0; ntries < 100; ntries++) {
1952 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e);
1953 		DELAY(100);
1954 		rtwn_pci_write_2(sc, R92C_MDIO + 2, 0xc280);
1955 		rtwn_pci_write_2(sc, R92C_MDIO, 0xc290);
1956 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x3e);
1957 		DELAY(100);
1958 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e);
1959 		DELAY(100);
1960 		if (rtwn_pci_read_2(sc, R92C_MDIO + 2) == 0xc290)
1961 			break;
1962 	}
1963 	if (ntries == 100) {
1964 		printf("%s: timeout configuring ePHY\n", sc->sc_dev.dv_xname);
1965 		return (ETIMEDOUT);
1966 	}
1967 
1968 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1969 	rtwn_pci_write_2(sc, R92C_CR, 0);
1970 	reg = rtwn_pci_read_2(sc, R92C_CR);
1971 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1972 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1973 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1974 	    R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
1975 	rtwn_pci_write_2(sc, R92C_CR, reg);
1976 
1977 	return (0);
1978 }
1979 
1980 int
1981 rtwn_power_on(void *cookie)
1982 {
1983 	struct rtwn_pci_softc *sc = cookie;
1984 
1985 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1986 		return (rtwn_88e_power_on(sc));
1987 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
1988 		return (rtwn_23a_power_on(sc));
1989 	else
1990 		return (rtwn_92c_power_on(sc));
1991 }
1992 
1993 int
1994 rtwn_dma_init(void *cookie)
1995 {
1996 	struct rtwn_pci_softc *sc = cookie;
1997 	uint32_t reg;
1998 	uint16_t dmasize;
1999 	int hqpages, lqpages, nqpages, pagecnt, boundary, trxdma, tcr;
2000 	int error;
2001 
2002 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
2003 		nqpages = R88E_NPQ_NPAGES;
2004 		hqpages = R88E_HPQ_NPAGES;
2005 		lqpages = R88E_LPQ_NPAGES;
2006 		pagecnt = R88E_TX_PAGE_COUNT;
2007 		boundary = R88E_TX_PAGE_BOUNDARY;
2008 		dmasize = R88E_MAX_RX_DMA_SIZE;
2009 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN3;
2010 		trxdma = 0xe771;
2011 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2012 		nqpages = R23A_NPQ_NPAGES;
2013 		hqpages = R23A_HPQ_NPAGES;
2014 		lqpages = R23A_LPQ_NPAGES;
2015 		pagecnt = R23A_TX_PAGE_COUNT;
2016 		boundary = R23A_TX_PAGE_BOUNDARY;
2017 		dmasize = R23A_MAX_RX_DMA_SIZE;
2018 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 |
2019 		    R92C_TCR_ERRSTEN1;
2020 		trxdma = 0xf771;
2021 	} else {
2022 		nqpages = R92C_NPQ_NPAGES;
2023 		hqpages = R92C_HPQ_NPAGES;
2024 		lqpages = R92C_LPQ_NPAGES;
2025 		pagecnt = R92C_TX_PAGE_COUNT;
2026 		boundary = R92C_TX_PAGE_BOUNDARY;
2027 		dmasize = R92C_MAX_RX_DMA_SIZE;
2028 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 |
2029 		    R92C_TCR_ERRSTEN1;
2030 		trxdma = 0xf771;
2031 	}
2032 
2033 	/* Initialize LLT table. */
2034 	error = rtwn_llt_init(sc, pagecnt);
2035 	if (error != 0)
2036 		return error;
2037 
2038 	/* Set number of pages for normal priority queue. */
2039 	rtwn_pci_write_2(sc, R92C_RQPN_NPQ, nqpages);
2040 	rtwn_pci_write_4(sc, R92C_RQPN,
2041 	    /* Set number of pages for public queue. */
2042 	    SM(R92C_RQPN_PUBQ, pagecnt) |
2043 	    /* Set number of pages for high priority queue. */
2044 	    SM(R92C_RQPN_HPQ, hqpages) |
2045 	    /* Set number of pages for low priority queue. */
2046 	    SM(R92C_RQPN_LPQ, lqpages) |
2047 	    /* Load values. */
2048 	    R92C_RQPN_LD);
2049 
2050 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, boundary);
2051 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, boundary);
2052 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD,
2053 	    boundary);
2054 	rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, boundary);
2055 	rtwn_pci_write_1(sc, R92C_TDECTRL + 1, boundary);
2056 
2057 	reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL);
2058 	reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
2059 	reg |= trxdma;
2060 	rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg);
2061 
2062 	rtwn_pci_write_4(sc, R92C_TCR, tcr);
2063 
2064 	/* Configure Tx DMA. */
2065 	rtwn_pci_write_4(sc, R92C_BKQ_DESA,
2066 		sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
2067 	rtwn_pci_write_4(sc, R92C_BEQ_DESA,
2068 		sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
2069 	rtwn_pci_write_4(sc, R92C_VIQ_DESA,
2070 		sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
2071 	rtwn_pci_write_4(sc, R92C_VOQ_DESA,
2072 		sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
2073 	rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
2074 		sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
2075 	rtwn_pci_write_4(sc, R92C_MGQ_DESA,
2076 		sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
2077 	rtwn_pci_write_4(sc, R92C_HQ_DESA,
2078 		sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
2079 
2080 	/* Configure Rx DMA. */
2081 	rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr);
2082 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG+1, 0);
2083 
2084 	/* Set Tx/Rx transfer page boundary. */
2085 	rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, dmasize - 1);
2086 
2087 	/* Set Tx/Rx transfer page size. */
2088 	rtwn_pci_write_1(sc, R92C_PBP,
2089 	    SM(R92C_PBP_PSRX, R92C_PBP_128) |
2090 	    SM(R92C_PBP_PSTX, R92C_PBP_128));
2091 
2092 	return (0);
2093 }
2094 
2095 int
2096 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len)
2097 {
2098 	struct rtwn_pci_softc *sc = cookie;
2099 	uint32_t reg;
2100 	int off, mlen, error = 0, i;
2101 
2102 	reg = rtwn_pci_read_4(sc, R92C_MCUFWDL);
2103 	reg = RW(reg, R92C_MCUFWDL_PAGE, page);
2104 	rtwn_pci_write_4(sc, R92C_MCUFWDL, reg);
2105 
2106 	DELAY(5);
2107 
2108 	off = R92C_FW_START_ADDR;
2109 	while (len > 0) {
2110 		if (len > 196)
2111 			mlen = 196;
2112 		else if (len > 4)
2113 			mlen = 4;
2114 		else
2115 			mlen = 1;
2116 		for (i = 0; i < mlen; i++)
2117 			rtwn_pci_write_1(sc, off++, buf[i]);
2118 		buf += mlen;
2119 		len -= mlen;
2120 	}
2121 
2122 	return (error);
2123 }
2124 
2125 int
2126 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len)
2127 {
2128 	struct rtwn_pci_softc *sc = cookie;
2129 	const char *name;
2130 	int error;
2131 
2132 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
2133 		name = "rtwn-rtl8188e";
2134 	else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2135 		if (sc->sc_sc.chip & RTWN_CHIP_UMC_A_CUT)
2136 			name = "rtwn-rtl8723";
2137 		else
2138 			name = "rtwn-rtl8723_B";
2139 	} else if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) ==
2140 	    RTWN_CHIP_UMC_A_CUT)
2141 		name = "rtwn-rtl8192cU";
2142 	else
2143 		name = "rtwn-rtl8192cU_B";
2144 
2145 	error = loadfirmware(name, fw, len);
2146 	if (error)
2147 		printf("%s: could not read firmware %s (error %d)\n",
2148 		    sc->sc_dev.dv_xname, name, error);
2149 	return (error);
2150 }
2151 
2152 void
2153 rtwn_mac_init(void *cookie)
2154 {
2155 	struct rtwn_pci_softc *sc = cookie;
2156 	int i;
2157 
2158 	/* Write MAC initialization values. */
2159 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
2160 		for (i = 0; i < nitems(rtl8188eu_mac); i++) {
2161 			if (rtl8188eu_mac[i].reg == R92C_GPIO_MUXCFG)
2162 				continue;
2163 			rtwn_pci_write_1(sc, rtl8188eu_mac[i].reg,
2164 			    rtl8188eu_mac[i].val);
2165 		}
2166 		rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x07);
2167 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2168 		for (i = 0; i < nitems(rtl8192cu_mac); i++) {
2169 			rtwn_pci_write_1(sc, rtl8192cu_mac[i].reg,
2170 			    rtl8192cu_mac[i].val);
2171 		}
2172 		rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x0a);
2173 	} else {
2174 		for (i = 0; i < nitems(rtl8192ce_mac); i++)
2175 			rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg,
2176 			    rtl8192ce_mac[i].val);
2177 	}
2178 }
2179 
2180 void
2181 rtwn_bb_init(void *cookie)
2182 {
2183 	struct rtwn_pci_softc *sc = cookie;
2184 	const struct r92c_bb_prog *prog;
2185 	uint32_t reg;
2186 	int i;
2187 
2188 	/* Enable BB and RF. */
2189 	rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN,
2190 	    rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) |
2191 	    R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
2192 	    R92C_SYS_FUNC_EN_DIO_RF);
2193 
2194 	if (!(sc->sc_sc.chip & RTWN_CHIP_88E))
2195 		rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
2196 
2197 	rtwn_pci_write_1(sc, R92C_RF_CTRL,
2198 	    R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
2199 
2200 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
2201 	    R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA |
2202 	    R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST |
2203 	    R92C_SYS_FUNC_EN_BBRSTB);
2204 
2205 	if (!(sc->sc_sc.chip & RTWN_CHIP_88E)) {
2206 		rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
2207 	}
2208 
2209 	rtwn_pci_write_4(sc, R92C_LEDCFG0,
2210 	    rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000);
2211 
2212 	/* Select BB programming. */
2213 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
2214 		prog = &rtl8188eu_bb_prog;
2215 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
2216 		prog = &rtl8723a_bb_prog;
2217 	else if (!(sc->sc_sc.chip & RTWN_CHIP_92C))
2218 		prog = &rtl8192ce_bb_prog_1t;
2219 	else
2220 		prog = &rtl8192ce_bb_prog_2t;
2221 
2222 	/* Write BB initialization values. */
2223 	for (i = 0; i < prog->count; i++) {
2224 		rtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
2225 		DELAY(1);
2226 	}
2227 
2228 	if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) {
2229 		/* 8192C 1T only configuration. */
2230 		reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO);
2231 		reg = (reg & ~0x00000003) | 0x2;
2232 		rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
2233 
2234 		reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO);
2235 		reg = (reg & ~0x00300033) | 0x00200022;
2236 		rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
2237 
2238 		reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING);
2239 		reg = (reg & ~0xff000000) | 0x45 << 24;
2240 		rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
2241 
2242 		reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
2243 		reg = (reg & ~0x000000ff) | 0x23;
2244 		rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
2245 
2246 		reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
2247 		reg = (reg & ~0x00000030) | 1 << 4;
2248 		rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
2249 
2250 		reg = rtwn_bb_read(sc, 0xe74);
2251 		reg = (reg & ~0x0c000000) | 2 << 26;
2252 		rtwn_bb_write(sc, 0xe74, reg);
2253 		reg = rtwn_bb_read(sc, 0xe78);
2254 		reg = (reg & ~0x0c000000) | 2 << 26;
2255 		rtwn_bb_write(sc, 0xe78, reg);
2256 		reg = rtwn_bb_read(sc, 0xe7c);
2257 		reg = (reg & ~0x0c000000) | 2 << 26;
2258 		rtwn_bb_write(sc, 0xe7c, reg);
2259 		reg = rtwn_bb_read(sc, 0xe80);
2260 		reg = (reg & ~0x0c000000) | 2 << 26;
2261 		rtwn_bb_write(sc, 0xe80, reg);
2262 		reg = rtwn_bb_read(sc, 0xe88);
2263 		reg = (reg & ~0x0c000000) | 2 << 26;
2264 		rtwn_bb_write(sc, 0xe88, reg);
2265 	}
2266 
2267 	/* Write AGC values. */
2268 	for (i = 0; i < prog->agccount; i++) {
2269 		rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
2270 		    prog->agcvals[i]);
2271 		DELAY(1);
2272 	}
2273 
2274 	if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR)
2275 		sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR;
2276 }
2277 
2278 void
2279 rtwn_calib_to(void *arg)
2280 {
2281 	struct rtwn_pci_softc *sc = arg;
2282 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
2283 	int s;
2284 
2285 	s = splnet();
2286 	ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn);
2287 	splx(s);
2288 
2289 	rtwn_calib(&sc->sc_sc);
2290 }
2291 
2292 void
2293 rtwn_next_calib(void *cookie)
2294 {
2295 	struct rtwn_pci_softc *sc = cookie;
2296 
2297 	timeout_add_sec(&sc->calib_to, 2);
2298 }
2299 
2300 void
2301 rtwn_cancel_calib(void *cookie)
2302 {
2303 	struct rtwn_pci_softc *sc = cookie;
2304 
2305 	if (timeout_initialized(&sc->calib_to))
2306 		timeout_del(&sc->calib_to);
2307 }
2308 
2309 void
2310 rtwn_scan_to(void *arg)
2311 {
2312 	struct rtwn_pci_softc *sc = arg;
2313 
2314 	rtwn_next_scan(&sc->sc_sc);
2315 }
2316 
2317 void
2318 rtwn_pci_next_scan(void *cookie)
2319 {
2320 	struct rtwn_pci_softc *sc = cookie;
2321 
2322 	timeout_add_msec(&sc->scan_to, 200);
2323 }
2324 
2325 void
2326 rtwn_cancel_scan(void *cookie)
2327 {
2328 	struct rtwn_pci_softc *sc = cookie;
2329 
2330 	if (timeout_initialized(&sc->scan_to))
2331 		timeout_del(&sc->scan_to);
2332 }
2333 
2334 void
2335 rtwn_wait_async(void *cookie)
2336 {
2337 	/* nothing to do */
2338 }
2339 
2340 void
2341 rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len)
2342 {
2343 	struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf;
2344 	int packets, tries, tx_ok, drop, expire, over;
2345 
2346 	if (len != sizeof(*rpt))
2347 		return;
2348 
2349 	if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2350 		struct r88e_tx_rpt_ccx *rxstat = (struct r88e_tx_rpt_ccx *)buf;
2351 
2352 		/*
2353 		 * we seem to get some garbage reports, so check macid makes
2354 		 * sense.
2355 		 */
2356 		if (MS(rxstat->rptb1, R88E_RPTB1_MACID) != R92C_MACID_BSS) {
2357 			return;
2358 		}
2359 
2360 		packets = 1;
2361 		tx_ok = (rxstat->rptb1 & R88E_RPTB1_PKT_OK) ? 1 : 0;
2362 		tries = MS(rxstat->rptb2, R88E_RPTB2_RETRY_CNT);
2363 		expire = (rxstat->rptb2 & R88E_RPTB2_LIFE_EXPIRE);
2364 		over = (rxstat->rptb2 & R88E_RPTB2_RETRY_OVER);
2365 		drop = 0;
2366 	} else {
2367 		packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM);
2368 		tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT);
2369 		tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK);
2370 		drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP);
2371 		expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE);
2372 		over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER);
2373 	}
2374 
2375 	if (packets > 0) {
2376 		sc->amn.amn_txcnt += packets;
2377 		if (!tx_ok || tries > 1 || drop || expire || over)
2378 			sc->amn.amn_retrycnt++;
2379 	}
2380 }
2381 
2382 void
2383 rtwn_poll_c2h_events(struct rtwn_pci_softc *sc)
2384 {
2385 	const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt);
2386 	uint8_t buf[R92C_C2H_MSG_MAX_LEN];
2387 	uint8_t id, len, status;
2388 	int i;
2389 
2390 	/* Read current status. */
2391 	status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR);
2392 	if (status == R92C_C2HEVT_HOST_CLOSE)
2393 		return;	/* nothing to do */
2394 
2395 	if (status == R92C_C2HEVT_FW_CLOSE) {
2396 		len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG);
2397 		id = MS(len, R92C_C2H_EVTB0_ID);
2398 		len = MS(len, R92C_C2H_EVTB0_LEN);
2399 
2400 		if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) {
2401 			memset(buf, 0, sizeof(buf));
2402 			for (i = 0; i < len; i++)
2403 				buf[i] = rtwn_pci_read_1(sc, off + i);
2404 			rtwn_tx_report(sc, buf, len);
2405 		} else
2406 			DPRINTF(("unhandled C2H event %d (%d bytes)\n",
2407 			    id, len));
2408 	}
2409 
2410 	/* Prepare for next event. */
2411 	rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE);
2412 }
2413