xref: /openbsd-src/sys/dev/pci/if_rtwn.c (revision 0f9891f1fafd8f53a63c41edb56ce51e2589b910)
1 /*	$OpenBSD: if_rtwn.c,v 1.42 2024/05/24 06:02:56 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * PCI front-end for Realtek RTL8188CE/RTL8188EE/RTL8192CE/RTL8723AE driver.
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/mbuf.h>
29 #include <sys/socket.h>
30 #include <sys/systm.h>
31 #include <sys/timeout.h>
32 #include <sys/device.h>
33 #include <sys/endian.h>
34 
35 #include <machine/bus.h>
36 #include <machine/intr.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 #include <net/if.h>
42 #include <net/if_media.h>
43 
44 #include <netinet/in.h>
45 #include <netinet/if_ether.h>
46 
47 #include <net80211/ieee80211_var.h>
48 #include <net80211/ieee80211_amrr.h>
49 #include <net80211/ieee80211_radiotap.h>
50 
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcidevs.h>
54 
55 #include <dev/ic/r92creg.h>
56 #include <dev/ic/rtwnvar.h>
57 
58 /*
59  * Driver definitions.
60  */
61 
62 #define R92C_NPQ_NPAGES		0
63 #define R92C_PUBQ_NPAGES	176
64 #define R92C_HPQ_NPAGES		41
65 #define R92C_LPQ_NPAGES		28
66 #define R92C_TXPKTBUF_COUNT	256
67 #define R92C_TX_PAGE_COUNT	\
68 	(R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES)
69 #define R92C_TX_PAGE_BOUNDARY	(R92C_TX_PAGE_COUNT + 1)
70 #define R92C_MAX_RX_DMA_SIZE	0x2800
71 
72 #define R88E_NPQ_NPAGES		0
73 #define R88E_PUBQ_NPAGES	116
74 #define R88E_HPQ_NPAGES		41
75 #define R88E_LPQ_NPAGES		13
76 #define R88E_TXPKTBUF_COUNT	176
77 #define R88E_TX_PAGE_COUNT	\
78 	(R88E_PUBQ_NPAGES + R88E_HPQ_NPAGES + R88E_LPQ_NPAGES)
79 #define R88E_TX_PAGE_BOUNDARY	(R88E_TX_PAGE_COUNT + 1)
80 #define R88E_MAX_RX_DMA_SIZE	0x2600
81 
82 #define R23A_NPQ_NPAGES		0
83 #define R23A_PUBQ_NPAGES	189
84 #define R23A_HPQ_NPAGES		28
85 #define R23A_LPQ_NPAGES		28
86 #define R23A_TXPKTBUF_COUNT	256
87 #define R23A_TX_PAGE_COUNT	\
88 	(R23A_PUBQ_NPAGES + R23A_HPQ_NPAGES + R23A_LPQ_NPAGES)
89 #define R23A_TX_PAGE_BOUNDARY	(R23A_TX_PAGE_COUNT + 1)
90 #define R23A_MAX_RX_DMA_SIZE	0x2800
91 
92 #define RTWN_NTXQUEUES			9
93 #define RTWN_RX_LIST_COUNT		256
94 #define RTWN_TX_LIST_COUNT		256
95 
96 /* TX queue indices. */
97 #define RTWN_BK_QUEUE			0
98 #define RTWN_BE_QUEUE			1
99 #define RTWN_VI_QUEUE			2
100 #define RTWN_VO_QUEUE			3
101 #define RTWN_BEACON_QUEUE		4
102 #define RTWN_TXCMD_QUEUE		5
103 #define RTWN_MGNT_QUEUE			6
104 #define RTWN_HIGH_QUEUE			7
105 #define RTWN_HCCA_QUEUE			8
106 
107 struct rtwn_rx_radiotap_header {
108 	struct ieee80211_radiotap_header wr_ihdr;
109 	uint8_t		wr_flags;
110 	uint8_t		wr_rate;
111 	uint16_t	wr_chan_freq;
112 	uint16_t	wr_chan_flags;
113 	uint8_t		wr_dbm_antsignal;
114 } __packed;
115 
116 #define RTWN_RX_RADIOTAP_PRESENT			\
117 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
118 	 1 << IEEE80211_RADIOTAP_RATE |			\
119 	 1 << IEEE80211_RADIOTAP_CHANNEL |		\
120 	 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
121 
122 struct rtwn_tx_radiotap_header {
123 	struct ieee80211_radiotap_header wt_ihdr;
124 	uint8_t		wt_flags;
125 	uint16_t	wt_chan_freq;
126 	uint16_t	wt_chan_flags;
127 } __packed;
128 
129 #define RTWN_TX_RADIOTAP_PRESENT			\
130 	(1 << IEEE80211_RADIOTAP_FLAGS |		\
131 	 1 << IEEE80211_RADIOTAP_CHANNEL)
132 
133 struct rtwn_rx_data {
134 	bus_dmamap_t		map;
135 	struct mbuf		*m;
136 };
137 
138 struct rtwn_rx_ring {
139 	struct r92c_rx_desc_pci	*desc;
140 	bus_dmamap_t		map;
141 	bus_dma_segment_t	seg;
142 	int			nsegs;
143 	struct rtwn_rx_data	rx_data[RTWN_RX_LIST_COUNT];
144 
145 };
146 struct rtwn_tx_data {
147 	bus_dmamap_t			map;
148 	struct mbuf			*m;
149 	struct ieee80211_node		*ni;
150 };
151 
152 struct rtwn_tx_ring {
153 	bus_dmamap_t		map;
154 	bus_dma_segment_t	seg;
155 	int			nsegs;
156 	struct r92c_tx_desc_pci	*desc;
157 	struct rtwn_tx_data	tx_data[RTWN_TX_LIST_COUNT];
158 	int			queued;
159 	int			cur;
160 };
161 
162 struct rtwn_pci_softc {
163 	struct device		sc_dev;
164 	struct rtwn_softc	sc_sc;
165 
166 	struct rtwn_rx_ring	rx_ring;
167 	struct rtwn_tx_ring	tx_ring[RTWN_NTXQUEUES];
168 	uint32_t		qfullmsk;
169 
170 	struct timeout		calib_to;
171 	struct timeout		scan_to;
172 
173 	/* PCI specific goo. */
174 	bus_dma_tag_t 		sc_dmat;
175 	pci_chipset_tag_t	sc_pc;
176 	pcitag_t		sc_tag;
177 	void			*sc_ih;
178 	bus_space_tag_t		sc_st;
179 	bus_space_handle_t	sc_sh;
180 	bus_size_t		sc_mapsize;
181 	int			sc_cap_off;
182 
183 	struct ieee80211_amrr		amrr;
184 	struct ieee80211_amrr_node	amn;
185 
186 #if NBPFILTER > 0
187 	caddr_t				sc_drvbpf;
188 
189 	union {
190 		struct rtwn_rx_radiotap_header th;
191 		uint8_t	pad[64];
192 	}				sc_rxtapu;
193 #define sc_rxtap	sc_rxtapu.th
194 	int				sc_rxtap_len;
195 
196 	union {
197 		struct rtwn_tx_radiotap_header th;
198 		uint8_t	pad[64];
199 	}				sc_txtapu;
200 #define sc_txtap	sc_txtapu.th
201 	int				sc_txtap_len;
202 #endif
203 };
204 
205 #ifdef RTWN_DEBUG
206 #define DPRINTF(x)	do { if (rtwn_debug) printf x; } while (0)
207 #define DPRINTFN(n, x)	do { if (rtwn_debug >= (n)) printf x; } while (0)
208 extern int rtwn_debug;
209 #else
210 #define DPRINTF(x)
211 #define DPRINTFN(n, x)
212 #endif
213 
214 /*
215  * PCI configuration space registers.
216  */
217 #define	RTWN_PCI_IOBA		0x10	/* i/o mapped base */
218 #define	RTWN_PCI_MMBA		0x18	/* memory mapped base */
219 
220 static const struct pci_matchid rtwn_pci_devices[] = {
221 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8188CE },
222 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8188EE },
223 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8192CE },
224 	{ PCI_VENDOR_REALTEK,	PCI_PRODUCT_REALTEK_RTL8723AE }
225 };
226 
227 int		rtwn_pci_match(struct device *, void *, void *);
228 void		rtwn_pci_attach(struct device *, struct device *, void *);
229 int		rtwn_pci_detach(struct device *, int);
230 int		rtwn_pci_activate(struct device *, int);
231 int		rtwn_alloc_rx_list(struct rtwn_pci_softc *);
232 void		rtwn_reset_rx_list(struct rtwn_pci_softc *);
233 void		rtwn_free_rx_list(struct rtwn_pci_softc *);
234 void		rtwn_setup_rx_desc(struct rtwn_pci_softc *,
235 		    struct r92c_rx_desc_pci *, bus_addr_t, size_t, int);
236 int		rtwn_alloc_tx_list(struct rtwn_pci_softc *, int);
237 void		rtwn_reset_tx_list(struct rtwn_pci_softc *, int);
238 void		rtwn_free_tx_list(struct rtwn_pci_softc *, int);
239 void		rtwn_pci_write_1(void *, uint16_t, uint8_t);
240 void		rtwn_pci_write_2(void *, uint16_t, uint16_t);
241 void		rtwn_pci_write_4(void *, uint16_t, uint32_t);
242 uint8_t		rtwn_pci_read_1(void *, uint16_t);
243 uint16_t	rtwn_pci_read_2(void *, uint16_t);
244 uint32_t	rtwn_pci_read_4(void *, uint16_t);
245 void		rtwn_rx_frame(struct rtwn_pci_softc *,
246 		    struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int,
247 		    struct mbuf_list *);
248 int		rtwn_tx(void *, struct mbuf *, struct ieee80211_node *);
249 void		rtwn_tx_done(struct rtwn_pci_softc *, int);
250 int		rtwn_alloc_buffers(void *);
251 int		rtwn_pci_init(void *);
252 void		rtwn_pci_88e_stop(struct rtwn_pci_softc *);
253 void		rtwn_pci_stop(void *);
254 int		rtwn_intr(void *);
255 int		rtwn_is_oactive(void *);
256 int		rtwn_92c_power_on(struct rtwn_pci_softc *);
257 int		rtwn_88e_power_on(struct rtwn_pci_softc *);
258 int		rtwn_23a_power_on(struct rtwn_pci_softc *);
259 int		rtwn_power_on(void *);
260 int		rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t);
261 int		rtwn_llt_init(struct rtwn_pci_softc *, int);
262 int		rtwn_dma_init(void *);
263 int		rtwn_fw_loadpage(void *, int, uint8_t *, int);
264 int		rtwn_pci_load_firmware(void *, u_char **, size_t *);
265 void		rtwn_mac_init(void *);
266 void		rtwn_bb_init(void *);
267 void		rtwn_calib_to(void *);
268 void		rtwn_next_calib(void *);
269 void		rtwn_cancel_calib(void *);
270 void		rtwn_scan_to(void *);
271 void		rtwn_pci_next_scan(void *);
272 void		rtwn_cancel_scan(void *);
273 void		rtwn_wait_async(void *);
274 void		rtwn_poll_c2h_events(struct rtwn_pci_softc *);
275 void		rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int);
276 
277 /* Aliases. */
278 #define	rtwn_bb_write	rtwn_pci_write_4
279 #define rtwn_bb_read	rtwn_pci_read_4
280 
281 struct cfdriver rtwn_cd = {
282 	NULL, "rtwn", DV_IFNET
283 };
284 
285 const struct cfattach rtwn_pci_ca = {
286 	sizeof(struct rtwn_pci_softc),
287 	rtwn_pci_match,
288 	rtwn_pci_attach,
289 	rtwn_pci_detach,
290 	rtwn_pci_activate
291 };
292 
293 int
rtwn_pci_match(struct device * parent,void * match,void * aux)294 rtwn_pci_match(struct device *parent, void *match, void *aux)
295 {
296 	return (pci_matchbyid(aux, rtwn_pci_devices,
297 	    nitems(rtwn_pci_devices)));
298 }
299 
300 void
rtwn_pci_attach(struct device * parent,struct device * self,void * aux)301 rtwn_pci_attach(struct device *parent, struct device *self, void *aux)
302 {
303 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self;
304 	struct pci_attach_args *pa = aux;
305 	struct ifnet *ifp;
306 	int i, error;
307 	pcireg_t memtype;
308 	pci_intr_handle_t ih;
309 	const char *intrstr;
310 
311 	sc->sc_dmat = pa->pa_dmat;
312 	sc->sc_pc = pa->pa_pc;
313 	sc->sc_tag = pa->pa_tag;
314 
315 	timeout_set(&sc->calib_to, rtwn_calib_to, sc);
316 	timeout_set(&sc->scan_to, rtwn_scan_to, sc);
317 
318 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
319 
320 	/* Map control/status registers. */
321 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA);
322 	error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st,
323 	    &sc->sc_sh, NULL, &sc->sc_mapsize, 0);
324 	if (error != 0) {
325 		printf(": can't map mem space\n");
326 		return;
327 	}
328 
329 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
330 		printf(": can't map interrupt\n");
331 		return;
332 	}
333 	intrstr = pci_intr_string(sc->sc_pc, ih);
334 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET,
335 	    rtwn_intr, sc, sc->sc_dev.dv_xname);
336 	if (sc->sc_ih == NULL) {
337 		printf(": can't establish interrupt");
338 		if (intrstr != NULL)
339 			printf(" at %s", intrstr);
340 		printf("\n");
341 		return;
342 	}
343 	printf(": %s\n", intrstr);
344 
345 	/* Disable PCIe Active State Power Management (ASPM). */
346 	if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
347 	    &sc->sc_cap_off, NULL)) {
348 		uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag,
349 		    sc->sc_cap_off + PCI_PCIE_LCSR);
350 		lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
351 		pci_conf_write(sc->sc_pc, sc->sc_tag,
352 		    sc->sc_cap_off + PCI_PCIE_LCSR, lcsr);
353 	}
354 
355 	/* Allocate Tx/Rx buffers. */
356 	error = rtwn_alloc_rx_list(sc);
357 	if (error != 0) {
358 		printf("%s: could not allocate Rx buffers\n",
359 		    sc->sc_dev.dv_xname);
360 		return;
361 	}
362 	for (i = 0; i < RTWN_NTXQUEUES; i++) {
363 		error = rtwn_alloc_tx_list(sc, i);
364 		if (error != 0) {
365 			printf("%s: could not allocate Tx buffers\n",
366 			    sc->sc_dev.dv_xname);
367 			rtwn_free_rx_list(sc);
368 			return;
369 		}
370 	}
371 
372 	sc->amrr.amrr_min_success_threshold = 1;
373 	sc->amrr.amrr_max_success_threshold = 15;
374 
375 	/* Attach the bus-agnostic driver. */
376 	sc->sc_sc.sc_ops.cookie = sc;
377 	sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1;
378 	sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2;
379 	sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4;
380 	sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1;
381 	sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2;
382 	sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4;
383 	sc->sc_sc.sc_ops.tx = rtwn_tx;
384 	sc->sc_sc.sc_ops.power_on = rtwn_power_on;
385 	sc->sc_sc.sc_ops.dma_init = rtwn_dma_init;
386 	sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware;
387 	sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage;
388 	sc->sc_sc.sc_ops.mac_init = rtwn_mac_init;
389 	sc->sc_sc.sc_ops.bb_init = rtwn_bb_init;
390 	sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers;
391 	sc->sc_sc.sc_ops.init = rtwn_pci_init;
392 	sc->sc_sc.sc_ops.stop = rtwn_pci_stop;
393 	sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive;
394 	sc->sc_sc.sc_ops.next_calib = rtwn_next_calib;
395 	sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib;
396 	sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan;
397 	sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan;
398 	sc->sc_sc.sc_ops.wait_async = rtwn_wait_async;
399 
400 	sc->sc_sc.chip = RTWN_CHIP_PCI;
401 	switch (PCI_PRODUCT(pa->pa_id)) {
402 	case PCI_PRODUCT_REALTEK_RTL8188CE:
403 	case PCI_PRODUCT_REALTEK_RTL8192CE:
404 		sc->sc_sc.chip |= RTWN_CHIP_88C | RTWN_CHIP_92C;
405 		break;
406 	case PCI_PRODUCT_REALTEK_RTL8188EE:
407 		sc->sc_sc.chip |= RTWN_CHIP_88E;
408 		break;
409 	case PCI_PRODUCT_REALTEK_RTL8723AE:
410 		sc->sc_sc.chip |= RTWN_CHIP_23A;
411 		break;
412 	}
413 
414 	error = rtwn_attach(&sc->sc_dev, &sc->sc_sc);
415 	if (error != 0) {
416 		rtwn_free_rx_list(sc);
417 		for (i = 0; i < RTWN_NTXQUEUES; i++)
418 			rtwn_free_tx_list(sc, i);
419 		return;
420 	}
421 
422 	/* ifp is now valid */
423 	ifp = &sc->sc_sc.sc_ic.ic_if;
424 #if NBPFILTER > 0
425 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
426 	    sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
427 
428 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
429 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
430 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT);
431 
432 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
433 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
434 	sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT);
435 #endif
436 }
437 
438 int
rtwn_pci_detach(struct device * self,int flags)439 rtwn_pci_detach(struct device *self, int flags)
440 {
441 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
442 	int s, i;
443 
444 	s = splnet();
445 
446 	if (timeout_initialized(&sc->calib_to))
447 		timeout_del(&sc->calib_to);
448 	if (timeout_initialized(&sc->scan_to))
449 		timeout_del(&sc->scan_to);
450 
451 	rtwn_detach(&sc->sc_sc, flags);
452 
453 	/* Free Tx/Rx buffers. */
454 	for (i = 0; i < RTWN_NTXQUEUES; i++)
455 		rtwn_free_tx_list(sc, i);
456 	rtwn_free_rx_list(sc);
457 	splx(s);
458 
459 	return (0);
460 }
461 
462 int
rtwn_pci_activate(struct device * self,int act)463 rtwn_pci_activate(struct device *self, int act)
464 {
465 	struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
466 
467 	return rtwn_activate(&sc->sc_sc, act);
468 }
469 
470 void
rtwn_setup_rx_desc(struct rtwn_pci_softc * sc,struct r92c_rx_desc_pci * desc,bus_addr_t addr,size_t len,int idx)471 rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc,
472     bus_addr_t addr, size_t len, int idx)
473 {
474 	memset(desc, 0, sizeof(*desc));
475 	desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) |
476 		((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0));
477 	desc->rxbufaddr = htole32(addr);
478 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
479 	    BUS_SPACE_BARRIER_WRITE);
480 	desc->rxdw0 |= htole32(R92C_RXDW0_OWN);
481 }
482 
483 int
rtwn_alloc_rx_list(struct rtwn_pci_softc * sc)484 rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
485 {
486 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
487 	struct rtwn_rx_data *rx_data;
488 	size_t size;
489 	int i, error = 0;
490 
491 	/* Allocate Rx descriptors. */
492 	size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
493 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
494 		&rx_ring->map);
495 	if (error != 0) {
496 		printf("%s: could not create rx desc DMA map\n",
497 		    sc->sc_dev.dv_xname);
498 		rx_ring->map = NULL;
499 		goto fail;
500 	}
501 
502 	error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
503 	    &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
504 	if (error != 0) {
505 		printf("%s: could not allocate rx desc\n",
506 		    sc->sc_dev.dv_xname);
507 		goto fail;
508 	}
509 
510 	error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
511 	    size, (caddr_t *)&rx_ring->desc,
512 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
513 	if (error != 0) {
514 		bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
515 		rx_ring->desc = NULL;
516 		printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
517 		goto fail;
518 	}
519 
520 	error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
521 	    1, size, BUS_DMA_NOWAIT);
522 	if (error != 0) {
523 		printf("%s: could not load rx desc\n",
524 		    sc->sc_dev.dv_xname);
525 		goto fail;
526 	}
527 
528 	bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
529 	    BUS_DMASYNC_PREWRITE);
530 
531 	/* Allocate Rx buffers. */
532 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
533 		rx_data = &rx_ring->rx_data[i];
534 
535 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
536 		    0, BUS_DMA_NOWAIT, &rx_data->map);
537 		if (error != 0) {
538 			printf("%s: could not create rx buf DMA map\n",
539 			    sc->sc_dev.dv_xname);
540 			goto fail;
541 		}
542 
543 		rx_data->m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
544 		if (rx_data->m == NULL) {
545 			printf("%s: could not allocate rx mbuf\n",
546 			    sc->sc_dev.dv_xname);
547 			error = ENOMEM;
548 			goto fail;
549 		}
550 
551 		error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
552 		    mtod(rx_data->m, void *), MCLBYTES, NULL,
553 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
554 		if (error != 0) {
555 			printf("%s: could not load rx buf DMA map\n",
556 			    sc->sc_dev.dv_xname);
557 			goto fail;
558 		}
559 
560 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
561 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
562 	}
563 fail:	if (error != 0)
564 		rtwn_free_rx_list(sc);
565 	return (error);
566 }
567 
568 void
rtwn_reset_rx_list(struct rtwn_pci_softc * sc)569 rtwn_reset_rx_list(struct rtwn_pci_softc *sc)
570 {
571 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
572 	struct rtwn_rx_data *rx_data;
573 	int i;
574 
575 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
576 		rx_data = &rx_ring->rx_data[i];
577 		rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
578 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
579 	}
580 }
581 
582 void
rtwn_free_rx_list(struct rtwn_pci_softc * sc)583 rtwn_free_rx_list(struct rtwn_pci_softc *sc)
584 {
585 	struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
586 	struct rtwn_rx_data *rx_data;
587 	int i, s;
588 
589 	s = splnet();
590 
591 	if (rx_ring->map) {
592 		if (rx_ring->desc) {
593 			bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
594 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
595 			    sizeof (struct r92c_rx_desc_pci) *
596 			    RTWN_RX_LIST_COUNT);
597 			bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
598 			    rx_ring->nsegs);
599 			rx_ring->desc = NULL;
600 		}
601 		bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
602 		rx_ring->map = NULL;
603 	}
604 
605 	for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
606 		rx_data = &rx_ring->rx_data[i];
607 
608 		if (rx_data->m != NULL) {
609 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
610 			m_freem(rx_data->m);
611 			rx_data->m = NULL;
612 		}
613 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
614 		rx_data->map = NULL;
615 	}
616 
617 	splx(s);
618 }
619 
620 int
rtwn_alloc_tx_list(struct rtwn_pci_softc * sc,int qid)621 rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid)
622 {
623 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
624 	struct rtwn_tx_data *tx_data;
625 	int i = 0, error = 0;
626 
627 	error = bus_dmamap_create(sc->sc_dmat,
628 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1,
629 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0,
630 	    BUS_DMA_NOWAIT, &tx_ring->map);
631 	if (error != 0) {
632 		printf("%s: could not create tx ring DMA map\n",
633 		    sc->sc_dev.dv_xname);
634 		goto fail;
635 	}
636 
637 	error = bus_dmamem_alloc(sc->sc_dmat,
638 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0,
639 	    &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
640 	if (error != 0) {
641 		printf("%s: could not allocate tx ring DMA memory\n",
642 		    sc->sc_dev.dv_xname);
643 		goto fail;
644 	}
645 
646 	error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
647 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT,
648 	    (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
649 	if (error != 0) {
650 		bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
651 		printf("%s: can't map tx ring DMA memory\n",
652 		    sc->sc_dev.dv_xname);
653 		goto fail;
654 	}
655 
656 	error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
657 	    sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL,
658 	    BUS_DMA_NOWAIT);
659 	if (error != 0) {
660 		printf("%s: could not load tx ring DMA map\n",
661 		    sc->sc_dev.dv_xname);
662 		goto fail;
663 	}
664 
665 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
666 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
667 
668 		/* setup tx desc */
669 		desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
670 		  + sizeof(struct r92c_tx_desc_pci)
671 		  * ((i + 1) % RTWN_TX_LIST_COUNT));
672 
673 		tx_data = &tx_ring->tx_data[i];
674 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
675 		    0, BUS_DMA_NOWAIT, &tx_data->map);
676 		if (error != 0) {
677 			printf("%s: could not create tx buf DMA map\n",
678 			    sc->sc_dev.dv_xname);
679 			goto fail;
680 		}
681 		tx_data->m = NULL;
682 		tx_data->ni = NULL;
683 	}
684 fail:
685 	if (error != 0)
686 		rtwn_free_tx_list(sc, qid);
687 	return (error);
688 }
689 
690 void
rtwn_reset_tx_list(struct rtwn_pci_softc * sc,int qid)691 rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid)
692 {
693 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
694 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
695 	int i;
696 
697 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
698 		struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
699 		struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
700 
701 		memset(desc, 0, sizeof(*desc) -
702 		    (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) +
703 		    sizeof(desc->nextdescaddr)));
704 
705 		if (tx_data->m != NULL) {
706 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
707 			m_freem(tx_data->m);
708 			tx_data->m = NULL;
709 			ieee80211_release_node(ic, tx_data->ni);
710 			tx_data->ni = NULL;
711 		}
712 	}
713 
714 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
715 	    BUS_DMASYNC_POSTWRITE);
716 
717 	sc->qfullmsk &= ~(1 << qid);
718 	tx_ring->queued = 0;
719 	tx_ring->cur = 0;
720 }
721 
722 void
rtwn_free_tx_list(struct rtwn_pci_softc * sc,int qid)723 rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid)
724 {
725 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
726 	struct rtwn_tx_data *tx_data;
727 	int i;
728 
729 	if (tx_ring->map != NULL) {
730 		if (tx_ring->desc != NULL) {
731 			bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
732 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
733 			    sizeof (struct r92c_tx_desc_pci) *
734 			    RTWN_TX_LIST_COUNT);
735 			bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
736 		}
737 		bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
738 	}
739 
740 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
741 		tx_data = &tx_ring->tx_data[i];
742 
743 		if (tx_data->m != NULL) {
744 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
745 			m_freem(tx_data->m);
746 			tx_data->m = NULL;
747 		}
748 		bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
749 	}
750 
751 	sc->qfullmsk &= ~(1 << qid);
752 	tx_ring->queued = 0;
753 	tx_ring->cur = 0;
754 }
755 
756 void
rtwn_pci_write_1(void * cookie,uint16_t addr,uint8_t val)757 rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val)
758 {
759 	struct rtwn_pci_softc *sc = cookie;
760 
761 	bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val);
762 }
763 
764 void
rtwn_pci_write_2(void * cookie,uint16_t addr,uint16_t val)765 rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val)
766 {
767 	struct rtwn_pci_softc *sc = cookie;
768 
769 	val = htole16(val);
770 	bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val);
771 }
772 
773 void
rtwn_pci_write_4(void * cookie,uint16_t addr,uint32_t val)774 rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val)
775 {
776 	struct rtwn_pci_softc *sc = cookie;
777 
778 	val = htole32(val);
779 	bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val);
780 }
781 
782 uint8_t
rtwn_pci_read_1(void * cookie,uint16_t addr)783 rtwn_pci_read_1(void *cookie, uint16_t addr)
784 {
785 	struct rtwn_pci_softc *sc = cookie;
786 
787 	return bus_space_read_1(sc->sc_st, sc->sc_sh, addr);
788 }
789 
790 uint16_t
rtwn_pci_read_2(void * cookie,uint16_t addr)791 rtwn_pci_read_2(void *cookie, uint16_t addr)
792 {
793 	struct rtwn_pci_softc *sc = cookie;
794 	uint16_t val;
795 
796 	val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr);
797 	return le16toh(val);
798 }
799 
800 uint32_t
rtwn_pci_read_4(void * cookie,uint16_t addr)801 rtwn_pci_read_4(void *cookie, uint16_t addr)
802 {
803 	struct rtwn_pci_softc *sc = cookie;
804 	uint32_t val;
805 
806 	val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr);
807 	return le32toh(val);
808 }
809 
810 void
rtwn_rx_frame(struct rtwn_pci_softc * sc,struct r92c_rx_desc_pci * rx_desc,struct rtwn_rx_data * rx_data,int desc_idx,struct mbuf_list * ml)811 rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc,
812     struct rtwn_rx_data *rx_data, int desc_idx, struct mbuf_list *ml)
813 {
814 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
815 	struct ifnet *ifp = &ic->ic_if;
816 	struct ieee80211_rxinfo rxi;
817 	struct ieee80211_frame *wh;
818 	struct ieee80211_node *ni;
819 	struct r92c_rx_phystat *phy = NULL;
820 	uint32_t rxdw0, rxdw3;
821 	struct mbuf *m, *m1;
822 	uint8_t rate;
823 	int8_t rssi = 0;
824 	int infosz, pktlen, shift, error;
825 
826 	rxdw0 = letoh32(rx_desc->rxdw0);
827 	rxdw3 = letoh32(rx_desc->rxdw3);
828 
829 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
830 		int ntries, type;
831 		struct r88e_tx_rpt_ccx *rxstat;
832 
833 		type = MS(rxdw3, R88E_RXDW3_RPT);
834 		if (type == R88E_RXDW3_RPT_TX1) {
835 			uint32_t rptb1, rptb2;
836 
837 			rxstat = mtod(rx_data->m, struct r88e_tx_rpt_ccx *);
838 			rptb1 = letoh32(rxstat->rptb1);
839 			rptb2 = letoh32(rxstat->rptb2);
840 			ntries = MS(rptb2, R88E_RPTB2_RETRY_CNT);
841 			if (rptb1 & R88E_RPTB1_PKT_OK)
842 				sc->amn.amn_txcnt++;
843 			if (ntries > 0)
844 				sc->amn.amn_retrycnt++;
845 
846 			rtwn_setup_rx_desc(sc, rx_desc,
847 			    rx_data->map->dm_segs[0].ds_addr, MCLBYTES,
848 			    desc_idx);
849 			return;
850 		}
851 	}
852 
853 	if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
854 		/*
855 		 * This should not happen since we setup our Rx filter
856 		 * to not receive these frames.
857 		 */
858 		ifp->if_ierrors++;
859 		return;
860 	}
861 
862 	pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
863 	if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
864 		ifp->if_ierrors++;
865 		return;
866 	}
867 
868 	rate = MS(rxdw3, R92C_RXDW3_RATE);
869 	infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
870 	if (infosz > sizeof(struct r92c_rx_phystat))
871 		infosz = sizeof(struct r92c_rx_phystat);
872 	shift = MS(rxdw0, R92C_RXDW0_SHIFT);
873 
874 	/* Get RSSI from PHY status descriptor if present. */
875 	if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
876 		phy = mtod(rx_data->m, struct r92c_rx_phystat *);
877 		rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
878 		/* Update our average RSSI. */
879 		rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
880 	}
881 
882 	DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
883 	    pktlen, rate, infosz, shift, rssi));
884 
885 	m1 = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
886 	if (m1 == NULL) {
887 		ifp->if_ierrors++;
888 		return;
889 	}
890 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
891 	error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
892 	    mtod(m1, void *), MCLBYTES, NULL,
893 	    BUS_DMA_NOWAIT | BUS_DMA_READ);
894 	if (error != 0) {
895 		m_freem(m1);
896 
897 		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
898 		    rx_data->m, BUS_DMA_NOWAIT))
899 			panic("%s: could not load old RX mbuf",
900 			    sc->sc_dev.dv_xname);
901 
902 		/* Physical address may have changed. */
903 		rtwn_setup_rx_desc(sc, rx_desc,
904 		    rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx);
905 
906 		ifp->if_ierrors++;
907 		return;
908 	}
909 
910 	/* Finalize mbuf. */
911 	m = rx_data->m;
912 	rx_data->m = m1;
913 	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
914 
915 	/* Update RX descriptor. */
916 	rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr,
917 	    MCLBYTES, desc_idx);
918 
919 	/* Get ieee80211 frame header. */
920 	if (rxdw0 & R92C_RXDW0_PHYST)
921 		m_adj(m, infosz + shift);
922 	else
923 		m_adj(m, shift);
924 	wh = mtod(m, struct ieee80211_frame *);
925 
926 #if NBPFILTER > 0
927 	if (__predict_false(sc->sc_drvbpf != NULL)) {
928 		struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
929 		struct mbuf mb;
930 
931 		tap->wr_flags = 0;
932 		/* Map HW rate index to 802.11 rate. */
933 		tap->wr_flags = 2;
934 		if (!(rxdw3 & R92C_RXDW3_HT)) {
935 			switch (rate) {
936 			/* CCK. */
937 			case  0: tap->wr_rate =   2; break;
938 			case  1: tap->wr_rate =   4; break;
939 			case  2: tap->wr_rate =  11; break;
940 			case  3: tap->wr_rate =  22; break;
941 			/* OFDM. */
942 			case  4: tap->wr_rate =  12; break;
943 			case  5: tap->wr_rate =  18; break;
944 			case  6: tap->wr_rate =  24; break;
945 			case  7: tap->wr_rate =  36; break;
946 			case  8: tap->wr_rate =  48; break;
947 			case  9: tap->wr_rate =  72; break;
948 			case 10: tap->wr_rate =  96; break;
949 			case 11: tap->wr_rate = 108; break;
950 			}
951 		} else if (rate >= 12) {	/* MCS0~15. */
952 			/* Bit 7 set means HT MCS instead of rate. */
953 			tap->wr_rate = 0x80 | (rate - 12);
954 		}
955 		tap->wr_dbm_antsignal = rssi;
956 		tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq);
957 		tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags);
958 
959 		mb.m_data = (caddr_t)tap;
960 		mb.m_len = sc->sc_rxtap_len;
961 		mb.m_next = m;
962 		mb.m_nextpkt = NULL;
963 		mb.m_type = 0;
964 		mb.m_flags = 0;
965 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
966 	}
967 #endif
968 
969 	ni = ieee80211_find_rxnode(ic, wh);
970 	memset(&rxi, 0, sizeof(rxi));
971 	rxi.rxi_rssi = rssi;
972 	ieee80211_inputm(ifp, m, ni, &rxi, ml);
973 	/* Node is no longer needed. */
974 	ieee80211_release_node(ic, ni);
975 }
976 
977 int
rtwn_tx(void * cookie,struct mbuf * m,struct ieee80211_node * ni)978 rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni)
979 {
980 	struct rtwn_pci_softc *sc = cookie;
981 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
982 	struct ieee80211_frame *wh;
983 	struct ieee80211_key *k = NULL;
984 	struct rtwn_tx_ring *tx_ring;
985 	struct rtwn_tx_data *data;
986 	struct r92c_tx_desc_pci *txd;
987 	uint16_t qos;
988 	uint8_t raid, type, tid, qid;
989 	int hasqos, error;
990 
991 	wh = mtod(m, struct ieee80211_frame *);
992 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
993 
994 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
995 		k = ieee80211_get_txkey(ic, wh, ni);
996 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
997 			return (ENOBUFS);
998 		wh = mtod(m, struct ieee80211_frame *);
999 	}
1000 
1001 	if ((hasqos = ieee80211_has_qos(wh))) {
1002 		qos = ieee80211_get_qos(wh);
1003 		tid = qos & IEEE80211_QOS_TID;
1004 		qid = ieee80211_up_to_ac(ic, tid);
1005 	} else if (type != IEEE80211_FC0_TYPE_DATA) {
1006 		qid = RTWN_VO_QUEUE;
1007 	} else
1008 		qid = RTWN_BE_QUEUE;
1009 
1010 	/* Grab a Tx buffer from the ring. */
1011 	tx_ring = &sc->tx_ring[qid];
1012 	data = &tx_ring->tx_data[tx_ring->cur];
1013 	if (data->m != NULL) {
1014 		m_freem(m);
1015 		return (ENOBUFS);
1016 	}
1017 
1018 	/* Fill Tx descriptor. */
1019 	txd = &tx_ring->desc[tx_ring->cur];
1020 	if (htole32(txd->txdw0) & R92C_TXDW0_OWN) {
1021 		m_freem(m);
1022 		return (ENOBUFS);
1023 	}
1024 	txd->txdw0 = htole32(
1025 	    SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) |
1026 	    SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
1027 	    R92C_TXDW0_FSG | R92C_TXDW0_LSG);
1028 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
1029 		txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
1030 
1031 	txd->txdw1 = 0;
1032 #ifdef notyet
1033 	if (k != NULL) {
1034 		switch (k->k_cipher) {
1035 		case IEEE80211_CIPHER_WEP40:
1036 		case IEEE80211_CIPHER_WEP104:
1037 		case IEEE80211_CIPHER_TKIP:
1038 			cipher = R92C_TXDW1_CIPHER_RC4;
1039 			break;
1040 		case IEEE80211_CIPHER_CCMP:
1041 			cipher = R92C_TXDW1_CIPHER_AES;
1042 			break;
1043 		default:
1044 			cipher = R92C_TXDW1_CIPHER_NONE;
1045 		}
1046 		txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher));
1047 	}
1048 #endif
1049 	txd->txdw4 = 0;
1050 	txd->txdw5 = 0;
1051 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
1052 	    type == IEEE80211_FC0_TYPE_DATA) {
1053 		if (ic->ic_curmode == IEEE80211_MODE_11B ||
1054 		    (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B))
1055 			raid = R92C_RAID_11B;
1056 		else
1057 			raid = R92C_RAID_11BG;
1058 
1059 		if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1060 			txd->txdw1 |= htole32(
1061 			    SM(R88E_TXDW1_MACID, R92C_MACID_BSS) |
1062 			    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1063 			    SM(R92C_TXDW1_RAID, raid));
1064 			txd->txdw2 |= htole32(R88E_TXDW2_AGGBK);
1065 		} else {
1066 			txd->txdw1 |= htole32(
1067 			    SM(R92C_TXDW1_MACID, R92C_MACID_BSS) |
1068 			    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1069 			    SM(R92C_TXDW1_RAID, raid) |
1070 			    R92C_TXDW1_AGGBK);
1071 		}
1072 
1073 		/* Request TX status report for AMRR. */
1074 		txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
1075 
1076 		if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
1077 			txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1078 			    R92C_TXDW4_HWRTSEN);
1079 		} else if (ic->ic_flags & IEEE80211_F_USEPROT) {
1080 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1081 				txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
1082 				    R92C_TXDW4_HWRTSEN);
1083 			} else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1084 				txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1085 				    R92C_TXDW4_HWRTSEN);
1086 			}
1087 		}
1088 
1089 		if (ic->ic_curmode == IEEE80211_MODE_11B)
1090 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0));
1091 		else
1092 			txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8));
1093 		txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf));
1094 
1095 		/* Use AMMR rate for data. */
1096 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1097 		if (ic->ic_fixed_rate != -1)
1098 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1099 			    ic->ic_fixed_rate));
1100 		else
1101 			txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1102 			    ni->ni_txrate));
1103 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f));
1104 	} else {
1105 		txd->txdw1 |= htole32(
1106 		    SM(R92C_TXDW1_MACID, 0) |
1107 		    SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
1108 		    SM(R92C_TXDW1_RAID, R92C_RAID_11B));
1109 
1110 		/* Force CCK1. */
1111 		txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1112 		txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
1113 	}
1114 	/* Set sequence number (already little endian). */
1115 	txd->txdseq = (*(uint16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
1116 	if (sc->sc_sc.chip & RTWN_CHIP_23A)
1117 		txd->txdseq |= htole16(R23A_TXDW3_TXRPTEN);
1118 
1119 	if (!hasqos) {
1120 		/* Use HW sequence numbering for non-QoS frames. */
1121 		if (!(sc->sc_sc.chip & RTWN_CHIP_23A))
1122 			txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ);
1123 		txd->txdseq |= htole16(R92C_TXDW3_HWSEQEN);
1124 	} else
1125 		txd->txdw4 |= htole32(R92C_TXDW4_QOS);
1126 
1127 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1128 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1129 	if (error && error != EFBIG) {
1130 		printf("%s: can't map mbuf (error %d)\n",
1131 		    sc->sc_dev.dv_xname, error);
1132 		m_freem(m);
1133 		return error;
1134 	}
1135 	if (error != 0) {
1136 		/* Too many DMA segments, linearize mbuf. */
1137 		if (m_defrag(m, M_DONTWAIT)) {
1138 			m_freem(m);
1139 			return ENOBUFS;
1140 		}
1141 
1142 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1143 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1144 		if (error != 0) {
1145 			printf("%s: can't map mbuf (error %d)\n",
1146 			    sc->sc_dev.dv_xname, error);
1147 			m_freem(m);
1148 			return error;
1149 		}
1150 	}
1151 
1152 	txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr);
1153 	txd->txbufsize = htole16(m->m_pkthdr.len);
1154 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
1155 	    BUS_SPACE_BARRIER_WRITE);
1156 	txd->txdw0 |= htole32(R92C_TXDW0_OWN);
1157 
1158 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1159 	    BUS_DMASYNC_POSTWRITE);
1160 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
1161 	    BUS_DMASYNC_POSTWRITE);
1162 
1163 	data->m = m;
1164 	data->ni = ni;
1165 
1166 #if NBPFILTER > 0
1167 	if (__predict_false(sc->sc_drvbpf != NULL)) {
1168 		struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
1169 		struct mbuf mb;
1170 
1171 		tap->wt_flags = 0;
1172 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1173 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1174 
1175 		mb.m_data = (caddr_t)tap;
1176 		mb.m_len = sc->sc_txtap_len;
1177 		mb.m_next = m;
1178 		mb.m_nextpkt = NULL;
1179 		mb.m_type = 0;
1180 		mb.m_flags = 0;
1181 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1182 	}
1183 #endif
1184 
1185 	tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
1186 	tx_ring->queued++;
1187 
1188 	if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
1189 		sc->qfullmsk |= (1 << qid);
1190 
1191 	/* Kick TX. */
1192 	rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid));
1193 
1194 	return (0);
1195 }
1196 
1197 void
rtwn_tx_done(struct rtwn_pci_softc * sc,int qid)1198 rtwn_tx_done(struct rtwn_pci_softc *sc, int qid)
1199 {
1200 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1201 	struct ifnet *ifp = &ic->ic_if;
1202 	struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
1203 	struct rtwn_tx_data *tx_data;
1204 	struct r92c_tx_desc_pci *tx_desc;
1205 	int i;
1206 
1207 	bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1208 	    BUS_DMASYNC_POSTREAD);
1209 
1210 	for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
1211 		tx_data = &tx_ring->tx_data[i];
1212 		if (tx_data->m == NULL)
1213 			continue;
1214 
1215 		tx_desc = &tx_ring->desc[i];
1216 		if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN)
1217 			continue;
1218 
1219 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
1220 		m_freem(tx_data->m);
1221 		tx_data->m = NULL;
1222 		ieee80211_release_node(ic, tx_data->ni);
1223 		tx_data->ni = NULL;
1224 
1225 		sc->sc_sc.sc_tx_timer = 0;
1226 		tx_ring->queued--;
1227 
1228 		if (!(sc->sc_sc.chip & RTWN_CHIP_23A))
1229 			rtwn_poll_c2h_events(sc);
1230 	}
1231 
1232 	if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
1233 		sc->qfullmsk &= ~(1 << qid);
1234 
1235 	if (sc->qfullmsk == 0) {
1236 		ifq_clr_oactive(&ifp->if_snd);
1237 		(*ifp->if_start)(ifp);
1238 	}
1239 }
1240 
1241 int
rtwn_alloc_buffers(void * cookie)1242 rtwn_alloc_buffers(void *cookie)
1243 {
1244 	/* Tx/Rx buffers were already allocated in rtwn_pci_attach() */
1245 	return (0);
1246 }
1247 
1248 int
rtwn_pci_init(void * cookie)1249 rtwn_pci_init(void *cookie)
1250 {
1251 	struct rtwn_pci_softc *sc = cookie;
1252 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
1253 
1254 	/* Enable TX reports for AMRR */
1255 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1256 		rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL,
1257 		    (rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) & ~0) |
1258 		    R88E_TX_RPT_CTRL_EN);
1259 		rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL + 1, 0x02);
1260 
1261 		rtwn_pci_write_2(sc, R88E_TX_RPT_TIME, 0xcdf0);
1262 	}
1263 
1264 	return (0);
1265 }
1266 
1267 void
rtwn_pci_92c_stop(struct rtwn_pci_softc * sc)1268 rtwn_pci_92c_stop(struct rtwn_pci_softc *sc)
1269 {
1270 	uint16_t reg;
1271 
1272 	/* Disable interrupts. */
1273 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1274 
1275 	/* Stop hardware. */
1276 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1277 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1278 	reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN);
1279 	reg |= R92C_SYS_FUNC_EN_BB_GLB_RST;
1280 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1281 	reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST;
1282 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1283 	reg = rtwn_pci_read_2(sc, R92C_CR);
1284 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1285 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1286 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1287 	    R92C_CR_ENSEC);
1288 	rtwn_pci_write_2(sc, R92C_CR, reg);
1289 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1290 		rtwn_fw_reset(&sc->sc_sc);
1291 	/* TODO: linux does additional btcoex stuff here */
1292 	rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */
1293 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */
1294 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */
1295 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1296 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1297 	rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN);
1298 }
1299 
1300 void
rtwn_pci_88e_stop(struct rtwn_pci_softc * sc)1301 rtwn_pci_88e_stop(struct rtwn_pci_softc *sc)
1302 {
1303 	int i;
1304 	uint16_t reg;
1305 
1306 	/* Disable interrupts. */
1307 	rtwn_pci_write_4(sc, R88E_HIMR, 0x00000000);
1308 
1309 	/* Stop hardware. */
1310 	rtwn_pci_write_1(sc, R88E_TX_RPT_CTRL,
1311 	    rtwn_pci_read_1(sc, R88E_TX_RPT_CTRL) &
1312 	    ~(R88E_TX_RPT_CTRL_EN));
1313 
1314 	for (i = 0; i < 100; i++) {
1315 		if (rtwn_pci_read_1(sc, R88E_RXDMA_CTRL) & 0x02)
1316 			break;
1317 		DELAY(10);
1318 	}
1319 	if (i == 100)
1320 		DPRINTF(("rxdma ctrl didn't go off, %x\n", rtwn_pci_read_1(sc, R88E_RXDMA_CTRL)));
1321 
1322 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff);
1323 
1324 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1325 
1326 	/* ensure transmission has stopped */
1327 	for (i = 0; i < 100; i++) {
1328 		if (rtwn_pci_read_4(sc, 0x5f8) == 0)
1329 			break;
1330 		DELAY(10);
1331 	}
1332 	if (i == 100)
1333 		DPRINTF(("tx didn't stop\n"));
1334 
1335 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1336 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1337 	    ~(R92C_SYS_FUNC_EN_BBRSTB));
1338 	DELAY(1);
1339 	reg = rtwn_pci_read_2(sc, R92C_CR);
1340 	reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1341 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1342 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1343 	    R92C_CR_ENSEC);
1344 	rtwn_pci_write_2(sc, R92C_CR, reg);
1345 	rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST,
1346 	    rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20);
1347 
1348 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1349 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1350 		rtwn_fw_reset(&sc->sc_sc);
1351 
1352 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1,
1353 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~0x02);
1354 	rtwn_pci_write_1(sc, R92C_MCUFWDL, 0);
1355 
1356 	rtwn_pci_write_1(sc, R88E_32K_CTRL,
1357 	    rtwn_pci_read_1(sc, R88E_32K_CTRL) & ~(0x01));
1358 
1359 	/* transition to cardemu state */
1360 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0);
1361 	rtwn_pci_write_1(sc, R92C_LPLDO_CTRL,
1362 	    rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) | 0x10);
1363 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1364 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_OFF);
1365 	for (i = 0; i < 100; i++) {
1366 		if ((rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1367 		    R92C_APS_FSMCO_APFM_OFF) == 0)
1368 			break;
1369 		DELAY(10);
1370 	}
1371 	if (i == 100)
1372 		DPRINTF(("apfm off didn't go off\n"));
1373 
1374 	/* transition to card disabled state */
1375 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
1376 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
1377 
1378 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1379 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08);
1380 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1381 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08);
1382 
1383 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1384 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1385 }
1386 
1387 void
rtwn_pci_23a_stop(struct rtwn_pci_softc * sc)1388 rtwn_pci_23a_stop(struct rtwn_pci_softc *sc)
1389 {
1390 	int i;
1391 
1392 	/* Disable interrupts. */
1393 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1394 
1395 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0xff);
1396 	rtwn_pci_write_1(sc, R92C_TXPAUSE, R92C_TXPAUSE_ALL);
1397 
1398 	/* ensure transmission has stopped */
1399 	for (i = 0; i < 100; i++) {
1400 		if (rtwn_pci_read_4(sc, 0x5f8) == 0)
1401 			break;
1402 		DELAY(10);
1403 	}
1404 	if (i == 100)
1405 		DPRINTF(("tx didn't stop\n"));
1406 
1407 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1408 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1409 	    ~(R92C_SYS_FUNC_EN_BBRSTB));
1410 	DELAY(1);
1411 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1412 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) &
1413 	    ~(R92C_SYS_FUNC_EN_BB_GLB_RST));
1414 
1415 	rtwn_pci_write_2(sc, R92C_CR,
1416 	    rtwn_pci_read_2(sc, R92C_CR) &
1417 	    ~(R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSWBCN));
1418 
1419 	rtwn_pci_write_1(sc, R92C_DUAL_TSF_RST,
1420 	    rtwn_pci_read_1(sc, R92C_DUAL_TSF_RST) | 0x20);
1421 
1422 	/* Turn off RF */
1423 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1424 	if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1425 		rtwn_fw_reset(&sc->sc_sc);
1426 
1427 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN + 1,
1428 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN + 1) & ~R92C_SYS_FUNC_EN_DIOE);
1429 	rtwn_pci_write_1(sc, R92C_MCUFWDL, 0);
1430 
1431 	rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1432 	rtwn_pci_write_1(sc, R92C_LEDCFG2, rtwn_pci_read_1(sc, R92C_LEDCFG2) & ~(0x80));
1433 	rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) |
1434 	    R92C_APS_FSMCO_APFM_OFF);
1435 	rtwn_pci_write_2(sc, R92C_APS_FSMCO, rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1436 	    ~(R92C_APS_FSMCO_APFM_OFF));
1437 
1438 	rtwn_pci_write_4(sc, R92C_APS_FSMCO,
1439 	    rtwn_pci_read_4(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_RDY_MACON);
1440 	rtwn_pci_write_4(sc, R92C_APS_FSMCO,
1441 	    rtwn_pci_read_4(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APDM_HPDN);
1442 
1443 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1444 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) & ~R92C_RSV_CTRL_WLOCK_08);
1445 	rtwn_pci_write_1(sc, R92C_RSV_CTRL + 1,
1446 	    rtwn_pci_read_1(sc, R92C_RSV_CTRL + 1) | R92C_RSV_CTRL_WLOCK_08);
1447 
1448 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00 |
1449 	    R92C_RSV_CTRL_WLOCK_04 | R92C_RSV_CTRL_WLOCK_08);
1450 }
1451 
1452 void
rtwn_pci_stop(void * cookie)1453 rtwn_pci_stop(void *cookie)
1454 {
1455 	struct rtwn_pci_softc *sc = cookie;
1456 	int i, s;
1457 
1458 	s = splnet();
1459 
1460 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1461 		rtwn_pci_88e_stop(sc);
1462 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
1463 		rtwn_pci_23a_stop(sc);
1464 	} else {
1465 		rtwn_pci_92c_stop(sc);
1466 	}
1467 
1468 	for (i = 0; i < RTWN_NTXQUEUES; i++)
1469 		rtwn_reset_tx_list(sc, i);
1470 	rtwn_reset_rx_list(sc);
1471 
1472 	splx(s);
1473 }
1474 
1475 int
rtwn_88e_intr(struct rtwn_pci_softc * sc)1476 rtwn_88e_intr(struct rtwn_pci_softc *sc)
1477 {
1478 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1479 	u_int32_t status, estatus;
1480 	int i;
1481 
1482 	status = rtwn_pci_read_4(sc, R88E_HISR);
1483 	if (status == 0 || status == 0xffffffff)
1484 		return (0);
1485 
1486 	estatus = rtwn_pci_read_4(sc, R88E_HISRE);
1487 
1488 	status &= RTWN_88E_INT_ENABLE;
1489 	estatus &= R88E_HIMRE_RXFOVW;
1490 
1491 	rtwn_pci_write_4(sc, R88E_HIMR, 0);
1492 	rtwn_pci_write_4(sc, R88E_HIMRE, 0);
1493 	rtwn_pci_write_4(sc, R88E_HISR, status);
1494 	rtwn_pci_write_4(sc, R88E_HISRE, estatus);
1495 
1496 	if (status & R88E_HIMR_HIGHDOK)
1497 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1498 	if (status & R88E_HIMR_MGNTDOK)
1499 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1500 	if (status & R88E_HIMR_BKDOK)
1501 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1502 	if (status & R88E_HIMR_BEDOK)
1503 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1504 	if (status & R88E_HIMR_VIDOK)
1505 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1506 	if (status & R88E_HIMR_VODOK)
1507 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1508 	if ((status & (R88E_HIMR_ROK | R88E_HIMR_RDU)) ||
1509 	    (estatus & R88E_HIMRE_RXFOVW)) {
1510 		struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1511 
1512 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1513 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1514 		    BUS_DMASYNC_POSTREAD);
1515 
1516 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1517 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1518 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1519 
1520 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1521 				continue;
1522 
1523 			rtwn_rx_frame(sc, rx_desc, rx_data, i, &ml);
1524 		}
1525 		if_input(&ic->ic_if, &ml);
1526 	}
1527 
1528 	if (status & R88E_HIMR_HSISR_IND_ON_INT) {
1529 		rtwn_pci_write_1(sc, R92C_HSISR,
1530 		    rtwn_pci_read_1(sc, R92C_HSISR) |
1531 		    R88E_HSIMR_PDN_INT_EN | R88E_HSIMR_RON_INT_EN);
1532 	}
1533 
1534 	/* Enable interrupts. */
1535 	rtwn_pci_write_4(sc, R88E_HIMR, RTWN_88E_INT_ENABLE);
1536 	rtwn_pci_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW);
1537 
1538 	return (1);
1539 }
1540 
1541 int
rtwn_intr(void * xsc)1542 rtwn_intr(void *xsc)
1543 {
1544 	struct rtwn_pci_softc *sc = xsc;
1545 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1546 	u_int32_t status;
1547 	int i;
1548 
1549 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1550 		return (rtwn_88e_intr(sc));
1551 
1552 	status = rtwn_pci_read_4(sc, R92C_HISR);
1553 	if (status == 0 || status == 0xffffffff)
1554 		return (0);
1555 
1556 	/* Disable interrupts. */
1557 	rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1558 
1559 	/* Ack interrupts. */
1560 	rtwn_pci_write_4(sc, R92C_HISR, status);
1561 
1562 	/* Vendor driver treats RX errors like ROK... */
1563 	if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) {
1564 		struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1565 
1566 		bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1567 		    sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1568 		    BUS_DMASYNC_POSTREAD);
1569 
1570 		for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1571 			struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1572 			struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1573 
1574 			if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1575 				continue;
1576 
1577 			rtwn_rx_frame(sc, rx_desc, rx_data, i, &ml);
1578 		}
1579 		if_input(&ic->ic_if, &ml);
1580 	}
1581 
1582 	if (status & R92C_IMR_BDOK)
1583 		rtwn_tx_done(sc, RTWN_BEACON_QUEUE);
1584 	if (status & R92C_IMR_HIGHDOK)
1585 		rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1586 	if (status & R92C_IMR_MGNTDOK)
1587 		rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1588 	if (status & R92C_IMR_BKDOK)
1589 		rtwn_tx_done(sc, RTWN_BK_QUEUE);
1590 	if (status & R92C_IMR_BEDOK)
1591 		rtwn_tx_done(sc, RTWN_BE_QUEUE);
1592 	if (status & R92C_IMR_VIDOK)
1593 		rtwn_tx_done(sc, RTWN_VI_QUEUE);
1594 	if (status & R92C_IMR_VODOK)
1595 		rtwn_tx_done(sc, RTWN_VO_QUEUE);
1596 
1597 	if (sc->sc_sc.chip & RTWN_CHIP_23A) {
1598 		if (status & R92C_IMR_ATIMEND)
1599 			rtwn_poll_c2h_events(sc);
1600 	}
1601 
1602 	/* Enable interrupts. */
1603 	rtwn_pci_write_4(sc, R92C_HIMR, RTWN_92C_INT_ENABLE);
1604 
1605 	return (1);
1606 }
1607 
1608 int
rtwn_is_oactive(void * cookie)1609 rtwn_is_oactive(void *cookie)
1610 {
1611 	struct rtwn_pci_softc *sc = cookie;
1612 
1613 	return (sc->qfullmsk != 0);
1614 }
1615 
1616 int
rtwn_llt_write(struct rtwn_pci_softc * sc,uint32_t addr,uint32_t data)1617 rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data)
1618 {
1619 	int ntries;
1620 
1621 	rtwn_pci_write_4(sc, R92C_LLT_INIT,
1622 	    SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
1623 	    SM(R92C_LLT_INIT_ADDR, addr) |
1624 	    SM(R92C_LLT_INIT_DATA, data));
1625 	/* Wait for write operation to complete. */
1626 	for (ntries = 0; ntries < 20; ntries++) {
1627 		if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
1628 		    R92C_LLT_INIT_OP_NO_ACTIVE)
1629 			return (0);
1630 		DELAY(5);
1631 	}
1632 	return (ETIMEDOUT);
1633 }
1634 
1635 int
rtwn_llt_init(struct rtwn_pci_softc * sc,int page_count)1636 rtwn_llt_init(struct rtwn_pci_softc *sc, int page_count)
1637 {
1638 	int i, error, pktbuf_count;
1639 
1640 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1641 		pktbuf_count = R88E_TXPKTBUF_COUNT;
1642 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
1643 		pktbuf_count = R23A_TXPKTBUF_COUNT;
1644 	else
1645 		pktbuf_count = R92C_TXPKTBUF_COUNT;
1646 
1647 	/* Reserve pages [0; page_count]. */
1648 	for (i = 0; i < page_count; i++) {
1649 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1650 			return (error);
1651 	}
1652 	/* NB: 0xff indicates end-of-list. */
1653 	if ((error = rtwn_llt_write(sc, i, 0xff)) != 0)
1654 		return (error);
1655 	/*
1656 	 * Use pages [page_count + 1; pktbuf_count - 1]
1657 	 * as ring buffer.
1658 	 */
1659 	for (++i; i < pktbuf_count - 1; i++) {
1660 		if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1661 			return (error);
1662 	}
1663 	/* Make the last page point to the beginning of the ring buffer. */
1664 	error = rtwn_llt_write(sc, i, pktbuf_count + 1);
1665 	return (error);
1666 }
1667 
1668 int
rtwn_92c_power_on(struct rtwn_pci_softc * sc)1669 rtwn_92c_power_on(struct rtwn_pci_softc *sc)
1670 {
1671 	uint32_t reg;
1672 	int ntries;
1673 
1674 	/* Wait for autoload done bit. */
1675 	for (ntries = 0; ntries < 1000; ntries++) {
1676 		if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) &
1677 		    R92C_APS_FSMCO_PFM_ALDN)
1678 			break;
1679 		DELAY(5);
1680 	}
1681 	if (ntries == 1000) {
1682 		printf("%s: timeout waiting for chip autoload\n",
1683 		    sc->sc_dev.dv_xname);
1684 		return (ETIMEDOUT);
1685 	}
1686 
1687 	/* Unlock ISO/CLK/Power control register. */
1688 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1689 
1690 	/* TODO: check if we need this for 8188CE */
1691 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1692 		/* bt coex */
1693 		reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO);
1694 		reg |= (R92C_APS_FSMCO_SOP_ABG |
1695 			R92C_APS_FSMCO_SOP_AMB |
1696 			R92C_APS_FSMCO_XOP_BTCK);
1697 		rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg);
1698 	}
1699 
1700 	/* Move SPS into PWM mode. */
1701 	rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b);
1702 	DELAY(100);
1703 
1704 	/* Set low byte to 0x0f, leave others unchanged. */
1705 	rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL,
1706 	    (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f);
1707 
1708 	/* TODO: check if we need this for 8188CE */
1709 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1710 		/* bt coex */
1711 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL);
1712 		reg &= (~0x00024800); /* XXX magic from linux */
1713 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg);
1714 	}
1715 
1716 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1717 	  (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) |
1718 	  R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR);
1719 	DELAY(200);
1720 
1721 	/* TODO: linux does additional btcoex stuff here */
1722 
1723 	/* Auto enable WLAN. */
1724 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1725 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1726 	for (ntries = 0; ntries < 1000; ntries++) {
1727 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1728 		    R92C_APS_FSMCO_APFM_ONMAC))
1729 			break;
1730 		DELAY(5);
1731 	}
1732 	if (ntries == 1000) {
1733 		printf("%s: timeout waiting for MAC auto ON\n",
1734 		    sc->sc_dev.dv_xname);
1735 		return (ETIMEDOUT);
1736 	}
1737 
1738 	/* Enable radio, GPIO and LED functions. */
1739 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1740 	    R92C_APS_FSMCO_AFSM_PCIE |
1741 	    R92C_APS_FSMCO_PDN_EN |
1742 	    R92C_APS_FSMCO_PFM_ALDN);
1743 	/* Release RF digital isolation. */
1744 	rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1745 	    rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
1746 
1747 	if (sc->sc_sc.chip & RTWN_CHIP_92C)
1748 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77);
1749 	else
1750 		rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22);
1751 
1752 	rtwn_pci_write_4(sc, R92C_INT_MIG, 0);
1753 
1754 	if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1755 		/* bt coex */
1756 		reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2);
1757 		reg &= 0xfd; /* XXX magic from linux */
1758 		rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg);
1759 	}
1760 
1761 	rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG,
1762 	    rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL);
1763 
1764 	reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL);
1765 	if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) {
1766 		printf("%s: radio is disabled by hardware switch\n",
1767 		    sc->sc_dev.dv_xname);
1768 		return (EPERM);	/* :-) */
1769 	}
1770 
1771 	/* Initialize MAC. */
1772 	rtwn_pci_write_1(sc, R92C_APSD_CTRL,
1773 	    rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
1774 	for (ntries = 0; ntries < 200; ntries++) {
1775 		if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) &
1776 		    R92C_APSD_CTRL_OFF_STATUS))
1777 			break;
1778 		DELAY(500);
1779 	}
1780 	if (ntries == 200) {
1781 		printf("%s: timeout waiting for MAC initialization\n",
1782 		    sc->sc_dev.dv_xname);
1783 		return (ETIMEDOUT);
1784 	}
1785 
1786 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1787 	reg = rtwn_pci_read_2(sc, R92C_CR);
1788 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1789 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1790 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1791 	    R92C_CR_ENSEC;
1792 	rtwn_pci_write_2(sc, R92C_CR, reg);
1793 
1794 	rtwn_pci_write_1(sc, 0xfe10, 0x19);
1795 
1796 	return (0);
1797 }
1798 
1799 int
rtwn_88e_power_on(struct rtwn_pci_softc * sc)1800 rtwn_88e_power_on(struct rtwn_pci_softc *sc)
1801 {
1802 	uint32_t reg;
1803 	int ntries;
1804 
1805 	/* Disable XTAL output for power saving. */
1806 	rtwn_pci_write_1(sc, R88E_XCK_OUT_CTRL,
1807 	    rtwn_pci_read_1(sc, R88E_XCK_OUT_CTRL) & ~R88E_XCK_OUT_CTRL_EN);
1808 
1809 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1810 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & (~R92C_APS_FSMCO_APDM_HPDN));
1811 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1812 
1813 	/* Wait for power ready bit. */
1814 	for (ntries = 0; ntries < 5000; ntries++) {
1815 		if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
1816 			break;
1817 		DELAY(10);
1818 	}
1819 	if (ntries == 5000) {
1820 		printf("%s: timeout waiting for chip power up\n",
1821 		    sc->sc_dev.dv_xname);
1822 		return (ETIMEDOUT);
1823 	}
1824 
1825 	/* Reset BB. */
1826 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1827 	    rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB |
1828 	    R92C_SYS_FUNC_EN_BB_GLB_RST));
1829 
1830 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
1831 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
1832 
1833 	/* Disable HWPDN. */
1834 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1835 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
1836 	/* Disable WL suspend. */
1837 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1838 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1839 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1840 
1841 	/* Auto enable WLAN. */
1842 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1843 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1844 	for (ntries = 0; ntries < 5000; ntries++) {
1845 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1846 		    R92C_APS_FSMCO_APFM_ONMAC))
1847 			break;
1848 		DELAY(10);
1849 	}
1850 	if (ntries == 5000) {
1851 		printf("%s: timeout waiting for MAC auto ON\n",
1852 		    sc->sc_dev.dv_xname);
1853 		return (ETIMEDOUT);
1854 	}
1855 
1856 	/* Enable LDO normal mode. */
1857 	rtwn_pci_write_1(sc, R92C_LPLDO_CTRL,
1858 	    rtwn_pci_read_1(sc, R92C_LPLDO_CTRL) & ~0x10);
1859 
1860 	rtwn_pci_write_1(sc, R92C_APS_FSMCO,
1861 	    rtwn_pci_read_1(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_PDN_EN);
1862 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2,
1863 	    rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04);
1864 
1865 	rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1,
1866 	    rtwn_pci_read_1(sc, R92C_AFE_XTAL_CTRL_EXT + 1) | 0x02);
1867 
1868 	rtwn_pci_write_1(sc, R92C_SYS_CLKR,
1869 	    rtwn_pci_read_1(sc, R92C_SYS_CLKR) | 0x08);
1870 
1871 	rtwn_pci_write_2(sc, R92C_GPIO_MUXCFG,
1872 	    rtwn_pci_read_2(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENSIC);
1873 
1874 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1875 	rtwn_pci_write_2(sc, R92C_CR, 0);
1876 	reg = rtwn_pci_read_2(sc, R92C_CR);
1877 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1878 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1879 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1880 	    R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
1881 	rtwn_pci_write_2(sc, R92C_CR, reg);
1882 
1883 	rtwn_pci_write_1(sc, R92C_MSR, 0);
1884 	return (0);
1885 }
1886 
1887 int
rtwn_23a_power_on(struct rtwn_pci_softc * sc)1888 rtwn_23a_power_on(struct rtwn_pci_softc *sc)
1889 {
1890 	uint32_t reg;
1891 	int ntries;
1892 
1893 	rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x00);
1894 
1895 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1896 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1897 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1898 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 1, 0x00);
1899 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1900 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APFM_RSM);
1901 
1902 	/* Wait for power ready bit. */
1903 	for (ntries = 0; ntries < 5000; ntries++) {
1904 		if (rtwn_pci_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
1905 			break;
1906 		DELAY(10);
1907 	}
1908 	if (ntries == 5000) {
1909 		printf("%s: timeout waiting for chip power up\n",
1910 		    sc->sc_dev.dv_xname);
1911 		return (ETIMEDOUT);
1912 	}
1913 
1914 	/* Release WLON reset */
1915 	rtwn_pci_write_4(sc, R92C_APS_FSMCO, rtwn_pci_read_4(sc, R92C_APS_FSMCO) |
1916 	    R92C_APS_FSMCO_RDY_MACON);
1917 	/* Disable HWPDN. */
1918 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1919 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
1920 	/* Disable WL suspend. */
1921 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1922 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1923 	    ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
1924 
1925 	/* Auto enable WLAN. */
1926 	rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1927 	    rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1928 	for (ntries = 0; ntries < 5000; ntries++) {
1929 		if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1930 		    R92C_APS_FSMCO_APFM_ONMAC))
1931 			break;
1932 		DELAY(10);
1933 	}
1934 	if (ntries == 5000) {
1935 		printf("%s: timeout waiting for MAC auto ON (%x)\n",
1936 		    sc->sc_dev.dv_xname, rtwn_pci_read_2(sc, R92C_APS_FSMCO));
1937 		return (ETIMEDOUT);
1938 	}
1939 
1940 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 2,
1941 	    rtwn_pci_read_1(sc, R92C_PCIE_CTRL_REG + 2) | 0x04);
1942 
1943 	/* emac time out */
1944 	rtwn_pci_write_1(sc, 0x369, rtwn_pci_read_1(sc, 0x369) | 0x80);
1945 
1946 	for (ntries = 0; ntries < 100; ntries++) {
1947 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e);
1948 		DELAY(100);
1949 		rtwn_pci_write_2(sc, R92C_MDIO + 2, 0xc280);
1950 		rtwn_pci_write_2(sc, R92C_MDIO, 0xc290);
1951 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x3e);
1952 		DELAY(100);
1953 		rtwn_pci_write_2(sc, R92C_MDIO + 4, 0x5e);
1954 		DELAY(100);
1955 		if (rtwn_pci_read_2(sc, R92C_MDIO + 2) == 0xc290)
1956 			break;
1957 	}
1958 	if (ntries == 100) {
1959 		printf("%s: timeout configuring ePHY\n", sc->sc_dev.dv_xname);
1960 		return (ETIMEDOUT);
1961 	}
1962 
1963 	/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1964 	rtwn_pci_write_2(sc, R92C_CR, 0);
1965 	reg = rtwn_pci_read_2(sc, R92C_CR);
1966 	reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1967 	    R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1968 	    R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1969 	    R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
1970 	rtwn_pci_write_2(sc, R92C_CR, reg);
1971 
1972 	return (0);
1973 }
1974 
1975 int
rtwn_power_on(void * cookie)1976 rtwn_power_on(void *cookie)
1977 {
1978 	struct rtwn_pci_softc *sc = cookie;
1979 
1980 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
1981 		return (rtwn_88e_power_on(sc));
1982 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
1983 		return (rtwn_23a_power_on(sc));
1984 	else
1985 		return (rtwn_92c_power_on(sc));
1986 }
1987 
1988 int
rtwn_dma_init(void * cookie)1989 rtwn_dma_init(void *cookie)
1990 {
1991 	struct rtwn_pci_softc *sc = cookie;
1992 	uint32_t reg;
1993 	uint16_t dmasize;
1994 	int hqpages, lqpages, nqpages, pagecnt, boundary, trxdma, tcr;
1995 	int error;
1996 
1997 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
1998 		nqpages = R88E_NPQ_NPAGES;
1999 		hqpages = R88E_HPQ_NPAGES;
2000 		lqpages = R88E_LPQ_NPAGES;
2001 		pagecnt = R88E_TX_PAGE_COUNT;
2002 		boundary = R88E_TX_PAGE_BOUNDARY;
2003 		dmasize = R88E_MAX_RX_DMA_SIZE;
2004 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN3;
2005 		trxdma = 0xe771;
2006 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2007 		nqpages = R23A_NPQ_NPAGES;
2008 		hqpages = R23A_HPQ_NPAGES;
2009 		lqpages = R23A_LPQ_NPAGES;
2010 		pagecnt = R23A_TX_PAGE_COUNT;
2011 		boundary = R23A_TX_PAGE_BOUNDARY;
2012 		dmasize = R23A_MAX_RX_DMA_SIZE;
2013 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 |
2014 		    R92C_TCR_ERRSTEN1;
2015 		trxdma = 0xf771;
2016 	} else {
2017 		nqpages = R92C_NPQ_NPAGES;
2018 		hqpages = R92C_HPQ_NPAGES;
2019 		lqpages = R92C_LPQ_NPAGES;
2020 		pagecnt = R92C_TX_PAGE_COUNT;
2021 		boundary = R92C_TX_PAGE_BOUNDARY;
2022 		dmasize = R92C_MAX_RX_DMA_SIZE;
2023 		tcr = R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 |
2024 		    R92C_TCR_ERRSTEN1;
2025 		trxdma = 0xf771;
2026 	}
2027 
2028 	/* Initialize LLT table. */
2029 	error = rtwn_llt_init(sc, pagecnt);
2030 	if (error != 0)
2031 		return error;
2032 
2033 	/* Set number of pages for normal priority queue. */
2034 	rtwn_pci_write_2(sc, R92C_RQPN_NPQ, nqpages);
2035 	rtwn_pci_write_4(sc, R92C_RQPN,
2036 	    /* Set number of pages for public queue. */
2037 	    SM(R92C_RQPN_PUBQ, pagecnt) |
2038 	    /* Set number of pages for high priority queue. */
2039 	    SM(R92C_RQPN_HPQ, hqpages) |
2040 	    /* Set number of pages for low priority queue. */
2041 	    SM(R92C_RQPN_LPQ, lqpages) |
2042 	    /* Load values. */
2043 	    R92C_RQPN_LD);
2044 
2045 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, boundary);
2046 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, boundary);
2047 	rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD,
2048 	    boundary);
2049 	rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, boundary);
2050 	rtwn_pci_write_1(sc, R92C_TDECTRL + 1, boundary);
2051 
2052 	reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL);
2053 	reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
2054 	reg |= trxdma;
2055 	rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg);
2056 
2057 	rtwn_pci_write_4(sc, R92C_TCR, tcr);
2058 
2059 	/* Configure Tx DMA. */
2060 	rtwn_pci_write_4(sc, R92C_BKQ_DESA,
2061 		sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
2062 	rtwn_pci_write_4(sc, R92C_BEQ_DESA,
2063 		sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
2064 	rtwn_pci_write_4(sc, R92C_VIQ_DESA,
2065 		sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
2066 	rtwn_pci_write_4(sc, R92C_VOQ_DESA,
2067 		sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
2068 	rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
2069 		sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
2070 	rtwn_pci_write_4(sc, R92C_MGQ_DESA,
2071 		sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
2072 	rtwn_pci_write_4(sc, R92C_HQ_DESA,
2073 		sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
2074 
2075 	/* Configure Rx DMA. */
2076 	rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr);
2077 	rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG+1, 0);
2078 
2079 	/* Set Tx/Rx transfer page boundary. */
2080 	rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, dmasize - 1);
2081 
2082 	/* Set Tx/Rx transfer page size. */
2083 	rtwn_pci_write_1(sc, R92C_PBP,
2084 	    SM(R92C_PBP_PSRX, R92C_PBP_128) |
2085 	    SM(R92C_PBP_PSTX, R92C_PBP_128));
2086 
2087 	return (0);
2088 }
2089 
2090 int
rtwn_fw_loadpage(void * cookie,int page,uint8_t * buf,int len)2091 rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len)
2092 {
2093 	struct rtwn_pci_softc *sc = cookie;
2094 	uint32_t reg;
2095 	int off, mlen, error = 0, i;
2096 
2097 	reg = rtwn_pci_read_4(sc, R92C_MCUFWDL);
2098 	reg = RW(reg, R92C_MCUFWDL_PAGE, page);
2099 	rtwn_pci_write_4(sc, R92C_MCUFWDL, reg);
2100 
2101 	DELAY(5);
2102 
2103 	off = R92C_FW_START_ADDR;
2104 	while (len > 0) {
2105 		if (len > 196)
2106 			mlen = 196;
2107 		else if (len > 4)
2108 			mlen = 4;
2109 		else
2110 			mlen = 1;
2111 		for (i = 0; i < mlen; i++)
2112 			rtwn_pci_write_1(sc, off++, buf[i]);
2113 		buf += mlen;
2114 		len -= mlen;
2115 	}
2116 
2117 	return (error);
2118 }
2119 
2120 int
rtwn_pci_load_firmware(void * cookie,u_char ** fw,size_t * len)2121 rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len)
2122 {
2123 	struct rtwn_pci_softc *sc = cookie;
2124 	const char *name;
2125 	int error;
2126 
2127 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
2128 		name = "rtwn-rtl8188e";
2129 	else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2130 		if (sc->sc_sc.chip & RTWN_CHIP_UMC_A_CUT)
2131 			name = "rtwn-rtl8723";
2132 		else
2133 			name = "rtwn-rtl8723_B";
2134 	} else if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) ==
2135 	    RTWN_CHIP_UMC_A_CUT)
2136 		name = "rtwn-rtl8192cU";
2137 	else
2138 		name = "rtwn-rtl8192cU_B";
2139 
2140 	error = loadfirmware(name, fw, len);
2141 	if (error)
2142 		printf("%s: could not read firmware %s (error %d)\n",
2143 		    sc->sc_dev.dv_xname, name, error);
2144 	return (error);
2145 }
2146 
2147 void
rtwn_mac_init(void * cookie)2148 rtwn_mac_init(void *cookie)
2149 {
2150 	struct rtwn_pci_softc *sc = cookie;
2151 	int i;
2152 
2153 	/* Write MAC initialization values. */
2154 	if (sc->sc_sc.chip & RTWN_CHIP_88E) {
2155 		for (i = 0; i < nitems(rtl8188eu_mac); i++) {
2156 			if (rtl8188eu_mac[i].reg == R92C_GPIO_MUXCFG)
2157 				continue;
2158 			rtwn_pci_write_1(sc, rtl8188eu_mac[i].reg,
2159 			    rtl8188eu_mac[i].val);
2160 		}
2161 		rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x07);
2162 	} else if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2163 		for (i = 0; i < nitems(rtl8192cu_mac); i++) {
2164 			rtwn_pci_write_1(sc, rtl8192cu_mac[i].reg,
2165 			    rtl8192cu_mac[i].val);
2166 		}
2167 		rtwn_pci_write_1(sc, R92C_MAX_AGGR_NUM, 0x0a);
2168 	} else {
2169 		for (i = 0; i < nitems(rtl8192ce_mac); i++)
2170 			rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg,
2171 			    rtl8192ce_mac[i].val);
2172 	}
2173 }
2174 
2175 void
rtwn_bb_init(void * cookie)2176 rtwn_bb_init(void *cookie)
2177 {
2178 	struct rtwn_pci_softc *sc = cookie;
2179 	const struct r92c_bb_prog *prog;
2180 	uint32_t reg;
2181 	int i;
2182 
2183 	/* Enable BB and RF. */
2184 	rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN,
2185 	    rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) |
2186 	    R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
2187 	    R92C_SYS_FUNC_EN_DIO_RF);
2188 
2189 	if (!(sc->sc_sc.chip & RTWN_CHIP_88E))
2190 		rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
2191 
2192 	rtwn_pci_write_1(sc, R92C_RF_CTRL,
2193 	    R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
2194 
2195 	rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
2196 	    R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA |
2197 	    R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST |
2198 	    R92C_SYS_FUNC_EN_BBRSTB);
2199 
2200 	if (!(sc->sc_sc.chip & RTWN_CHIP_88E)) {
2201 		rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
2202 	}
2203 
2204 	rtwn_pci_write_4(sc, R92C_LEDCFG0,
2205 	    rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000);
2206 
2207 	/* Select BB programming. */
2208 	if (sc->sc_sc.chip & RTWN_CHIP_88E)
2209 		prog = &rtl8188eu_bb_prog;
2210 	else if (sc->sc_sc.chip & RTWN_CHIP_23A)
2211 		prog = &rtl8723a_bb_prog;
2212 	else if (!(sc->sc_sc.chip & RTWN_CHIP_92C))
2213 		prog = &rtl8192ce_bb_prog_1t;
2214 	else
2215 		prog = &rtl8192ce_bb_prog_2t;
2216 
2217 	/* Write BB initialization values. */
2218 	for (i = 0; i < prog->count; i++) {
2219 		rtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
2220 		DELAY(1);
2221 	}
2222 
2223 	if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) {
2224 		/* 8192C 1T only configuration. */
2225 		reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO);
2226 		reg = (reg & ~0x00000003) | 0x2;
2227 		rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
2228 
2229 		reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO);
2230 		reg = (reg & ~0x00300033) | 0x00200022;
2231 		rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
2232 
2233 		reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING);
2234 		reg = (reg & ~0xff000000) | 0x45 << 24;
2235 		rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
2236 
2237 		reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
2238 		reg = (reg & ~0x000000ff) | 0x23;
2239 		rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
2240 
2241 		reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
2242 		reg = (reg & ~0x00000030) | 1 << 4;
2243 		rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
2244 
2245 		reg = rtwn_bb_read(sc, 0xe74);
2246 		reg = (reg & ~0x0c000000) | 2 << 26;
2247 		rtwn_bb_write(sc, 0xe74, reg);
2248 		reg = rtwn_bb_read(sc, 0xe78);
2249 		reg = (reg & ~0x0c000000) | 2 << 26;
2250 		rtwn_bb_write(sc, 0xe78, reg);
2251 		reg = rtwn_bb_read(sc, 0xe7c);
2252 		reg = (reg & ~0x0c000000) | 2 << 26;
2253 		rtwn_bb_write(sc, 0xe7c, reg);
2254 		reg = rtwn_bb_read(sc, 0xe80);
2255 		reg = (reg & ~0x0c000000) | 2 << 26;
2256 		rtwn_bb_write(sc, 0xe80, reg);
2257 		reg = rtwn_bb_read(sc, 0xe88);
2258 		reg = (reg & ~0x0c000000) | 2 << 26;
2259 		rtwn_bb_write(sc, 0xe88, reg);
2260 	}
2261 
2262 	/* Write AGC values. */
2263 	for (i = 0; i < prog->agccount; i++) {
2264 		rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
2265 		    prog->agcvals[i]);
2266 		DELAY(1);
2267 	}
2268 
2269 	if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR)
2270 		sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR;
2271 }
2272 
2273 void
rtwn_calib_to(void * arg)2274 rtwn_calib_to(void *arg)
2275 {
2276 	struct rtwn_pci_softc *sc = arg;
2277 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
2278 	int s;
2279 
2280 	s = splnet();
2281 	ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn);
2282 	splx(s);
2283 
2284 	rtwn_calib(&sc->sc_sc);
2285 }
2286 
2287 void
rtwn_next_calib(void * cookie)2288 rtwn_next_calib(void *cookie)
2289 {
2290 	struct rtwn_pci_softc *sc = cookie;
2291 
2292 	timeout_add_sec(&sc->calib_to, 2);
2293 }
2294 
2295 void
rtwn_cancel_calib(void * cookie)2296 rtwn_cancel_calib(void *cookie)
2297 {
2298 	struct rtwn_pci_softc *sc = cookie;
2299 
2300 	if (timeout_initialized(&sc->calib_to))
2301 		timeout_del(&sc->calib_to);
2302 }
2303 
2304 void
rtwn_scan_to(void * arg)2305 rtwn_scan_to(void *arg)
2306 {
2307 	struct rtwn_pci_softc *sc = arg;
2308 
2309 	rtwn_next_scan(&sc->sc_sc);
2310 }
2311 
2312 void
rtwn_pci_next_scan(void * cookie)2313 rtwn_pci_next_scan(void *cookie)
2314 {
2315 	struct rtwn_pci_softc *sc = cookie;
2316 
2317 	timeout_add_msec(&sc->scan_to, 200);
2318 }
2319 
2320 void
rtwn_cancel_scan(void * cookie)2321 rtwn_cancel_scan(void *cookie)
2322 {
2323 	struct rtwn_pci_softc *sc = cookie;
2324 
2325 	if (timeout_initialized(&sc->scan_to))
2326 		timeout_del(&sc->scan_to);
2327 }
2328 
2329 void
rtwn_wait_async(void * cookie)2330 rtwn_wait_async(void *cookie)
2331 {
2332 	/* nothing to do */
2333 }
2334 
2335 void
rtwn_tx_report(struct rtwn_pci_softc * sc,uint8_t * buf,int len)2336 rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len)
2337 {
2338 	struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf;
2339 	int packets, tries, tx_ok, drop, expire, over;
2340 
2341 	if (len != sizeof(*rpt))
2342 		return;
2343 
2344 	if (sc->sc_sc.chip & RTWN_CHIP_23A) {
2345 		struct r88e_tx_rpt_ccx *rxstat = (struct r88e_tx_rpt_ccx *)buf;
2346 
2347 		/*
2348 		 * we seem to get some garbage reports, so check macid makes
2349 		 * sense.
2350 		 */
2351 		if (MS(rxstat->rptb1, R88E_RPTB1_MACID) != R92C_MACID_BSS) {
2352 			return;
2353 		}
2354 
2355 		packets = 1;
2356 		tx_ok = (rxstat->rptb1 & R88E_RPTB1_PKT_OK) ? 1 : 0;
2357 		tries = MS(rxstat->rptb2, R88E_RPTB2_RETRY_CNT);
2358 		expire = (rxstat->rptb2 & R88E_RPTB2_LIFE_EXPIRE);
2359 		over = (rxstat->rptb2 & R88E_RPTB2_RETRY_OVER);
2360 		drop = 0;
2361 	} else {
2362 		packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM);
2363 		tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT);
2364 		tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK);
2365 		drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP);
2366 		expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE);
2367 		over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER);
2368 	}
2369 
2370 	if (packets > 0) {
2371 		sc->amn.amn_txcnt += packets;
2372 		if (!tx_ok || tries > 1 || drop || expire || over)
2373 			sc->amn.amn_retrycnt++;
2374 	}
2375 }
2376 
2377 void
rtwn_poll_c2h_events(struct rtwn_pci_softc * sc)2378 rtwn_poll_c2h_events(struct rtwn_pci_softc *sc)
2379 {
2380 	const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt);
2381 	uint8_t buf[R92C_C2H_MSG_MAX_LEN];
2382 	uint8_t id, len, status;
2383 	int i;
2384 
2385 	/* Read current status. */
2386 	status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR);
2387 	if (status == R92C_C2HEVT_HOST_CLOSE)
2388 		return;	/* nothing to do */
2389 
2390 	if (status == R92C_C2HEVT_FW_CLOSE) {
2391 		len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG);
2392 		id = MS(len, R92C_C2H_EVTB0_ID);
2393 		len = MS(len, R92C_C2H_EVTB0_LEN);
2394 
2395 		if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) {
2396 			memset(buf, 0, sizeof(buf));
2397 			for (i = 0; i < len; i++)
2398 				buf[i] = rtwn_pci_read_1(sc, off + i);
2399 			rtwn_tx_report(sc, buf, len);
2400 		} else
2401 			DPRINTF(("unhandled C2H event %d (%d bytes)\n",
2402 			    id, len));
2403 	}
2404 
2405 	/* Prepare for next event. */
2406 	rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE);
2407 }
2408