xref: /openbsd-src/sys/dev/pci/if_bwfm_pci.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.71 2022/03/21 19:46:56 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if defined(__HAVE_FDT)
31 #include <machine/fdt.h>
32 #include <dev/ofw/openfirm.h>
33 #endif
34 
35 #if NBPFILTER > 0
36 #include <net/bpf.h>
37 #endif
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_media.h>
41 
42 #include <netinet/in.h>
43 #include <netinet/if_ether.h>
44 
45 #include <net80211/ieee80211_var.h>
46 
47 #include <machine/bus.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #include <dev/ic/bwfmvar.h>
54 #include <dev/ic/bwfmreg.h>
55 #include <dev/pci/if_bwfm_pci.h>
56 
57 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
58 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
59 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
60 
61 #define BWFM_NUM_TX_MSGRINGS			2
62 #define BWFM_NUM_RX_MSGRINGS			3
63 
64 #define BWFM_NUM_IOCTL_PKTIDS			8
65 #define BWFM_NUM_TX_PKTIDS			2048
66 #define BWFM_NUM_RX_PKTIDS			1024
67 
68 #define BWFM_NUM_IOCTL_DESCS			1
69 #define BWFM_NUM_TX_DESCS			1
70 #define BWFM_NUM_RX_DESCS			1
71 
72 #ifdef BWFM_DEBUG
73 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
74 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
75 static int bwfm_debug = 2;
76 #else
77 #define DPRINTF(x)	do { ; } while (0)
78 #define DPRINTFN(n, x)	do { ; } while (0)
79 #endif
80 
81 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
82 
83 enum ring_status {
84 	RING_CLOSED,
85 	RING_CLOSING,
86 	RING_OPEN,
87 	RING_OPENING,
88 };
89 
90 struct bwfm_pci_msgring {
91 	uint32_t		 w_idx_addr;
92 	uint32_t		 r_idx_addr;
93 	uint32_t		 w_ptr;
94 	uint32_t		 r_ptr;
95 	int			 nitem;
96 	int			 itemsz;
97 	enum ring_status	 status;
98 	struct bwfm_pci_dmamem	*ring;
99 	struct mbuf		*m;
100 
101 	int			 fifo;
102 	uint8_t			 mac[ETHER_ADDR_LEN];
103 };
104 
105 struct bwfm_pci_ioctl {
106 	uint16_t		 transid;
107 	uint16_t		 retlen;
108 	int16_t			 status;
109 	struct mbuf		*m;
110 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
111 };
112 
113 struct bwfm_pci_buf {
114 	bus_dmamap_t	 bb_map;
115 	struct mbuf	*bb_m;
116 };
117 
118 struct bwfm_pci_pkts {
119 	struct bwfm_pci_buf	*pkts;
120 	uint32_t		 npkt;
121 	int			 last;
122 };
123 
124 struct bwfm_pci_softc {
125 	struct bwfm_softc	 sc_sc;
126 	pci_chipset_tag_t	 sc_pc;
127 	pcitag_t		 sc_tag;
128 	pcireg_t		 sc_id;
129 	void 			*sc_ih;
130 
131 	int			 sc_initialized;
132 
133 	bus_space_tag_t		 sc_reg_iot;
134 	bus_space_handle_t	 sc_reg_ioh;
135 	bus_size_t		 sc_reg_ios;
136 
137 	bus_space_tag_t		 sc_tcm_iot;
138 	bus_space_handle_t	 sc_tcm_ioh;
139 	bus_size_t		 sc_tcm_ios;
140 
141 	bus_dma_tag_t		 sc_dmat;
142 
143 	uint32_t		 sc_shared_address;
144 	uint32_t		 sc_shared_flags;
145 	uint8_t			 sc_shared_version;
146 
147 	uint8_t			 sc_dma_idx_sz;
148 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
149 	size_t			 sc_dma_idx_bufsz;
150 
151 	uint16_t		 sc_max_rxbufpost;
152 	uint32_t		 sc_rx_dataoffset;
153 	uint32_t		 sc_htod_mb_data_addr;
154 	uint32_t		 sc_dtoh_mb_data_addr;
155 	uint32_t		 sc_ring_info_addr;
156 
157 	uint32_t		 sc_console_base_addr;
158 	uint32_t		 sc_console_buf_addr;
159 	uint32_t		 sc_console_buf_size;
160 	uint32_t		 sc_console_readidx;
161 
162 	uint16_t		 sc_max_flowrings;
163 	uint16_t		 sc_max_submissionrings;
164 	uint16_t		 sc_max_completionrings;
165 
166 	struct bwfm_pci_msgring	 sc_ctrl_submit;
167 	struct bwfm_pci_msgring	 sc_rxpost_submit;
168 	struct bwfm_pci_msgring	 sc_ctrl_complete;
169 	struct bwfm_pci_msgring	 sc_tx_complete;
170 	struct bwfm_pci_msgring	 sc_rx_complete;
171 	struct bwfm_pci_msgring	*sc_flowrings;
172 
173 	struct bwfm_pci_dmamem	*sc_scratch_buf;
174 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
175 
176 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
177 	uint16_t		 sc_ioctl_transid;
178 
179 	struct if_rxring	 sc_ioctl_ring;
180 	struct if_rxring	 sc_event_ring;
181 	struct if_rxring	 sc_rxbuf_ring;
182 
183 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
184 	struct bwfm_pci_pkts	 sc_rx_pkts;
185 	struct bwfm_pci_pkts	 sc_tx_pkts;
186 	int			 sc_tx_pkts_full;
187 
188 	uint8_t			 sc_mbdata_done;
189 	uint8_t			 sc_pcireg64;
190 };
191 
192 struct bwfm_pci_dmamem {
193 	bus_dmamap_t		bdm_map;
194 	bus_dma_segment_t	bdm_seg;
195 	size_t			bdm_size;
196 	caddr_t			bdm_kva;
197 };
198 
199 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
200 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
201 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
202 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
203 
204 int		 bwfm_pci_match(struct device *, void *, void *);
205 void		 bwfm_pci_attach(struct device *, struct device *, void *);
206 int		 bwfm_pci_detach(struct device *, int);
207 int		 bwfm_pci_activate(struct device *, int);
208 void		 bwfm_pci_cleanup(struct bwfm_pci_softc *);
209 
210 #if defined(__HAVE_FDT)
211 int		 bwfm_pci_read_otp(struct bwfm_pci_softc *);
212 void		 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *, uint8_t,
213 		    uint8_t, uint8_t *);
214 #endif
215 
216 int		 bwfm_pci_intr(void *);
217 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
218 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
219 uint32_t	 bwfm_pci_intr_status(struct bwfm_pci_softc *);
220 void		 bwfm_pci_intr_ack(struct bwfm_pci_softc *, uint32_t);
221 uint32_t	 bwfm_pci_intmask(struct bwfm_pci_softc *);
222 void		 bwfm_pci_hostready(struct bwfm_pci_softc *);
223 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
224 		    size_t, const u_char *, size_t);
225 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
226 
227 struct bwfm_pci_dmamem *
228 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
229 		    bus_size_t);
230 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
231 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
232 		    struct bwfm_pci_pkts *);
233 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
234 		    struct bwfm_pci_pkts *, struct mbuf *,
235 		    uint32_t *, paddr_t *);
236 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
237 		    struct bwfm_pci_pkts *, uint32_t);
238 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
239 		    struct if_rxring *, uint32_t);
240 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
241 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
242 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
243 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
244 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
245 		    int, size_t);
246 
247 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *);
249 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
250 		    struct bwfm_pci_msgring *);
251 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
252 		    struct bwfm_pci_msgring *);
253 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
254 		    struct bwfm_pci_msgring *);
255 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
256 		    struct bwfm_pci_msgring *);
257 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
258 		    struct bwfm_pci_msgring *);
259 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
260 		    struct bwfm_pci_msgring *, int, int *);
261 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
262 		    struct bwfm_pci_msgring *, int *);
263 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
264 		    struct bwfm_pci_msgring *, int);
265 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
266 		    struct bwfm_pci_msgring *);
267 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
268 		    struct bwfm_pci_msgring *, int);
269 
270 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
271 		    struct bwfm_pci_msgring *, struct mbuf_list *);
272 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
273 		    struct mbuf_list *);
274 
275 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
276 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
277 		    uint32_t);
278 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
279 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
280 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
281 
282 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
283 		     struct mbuf *);
284 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
285 		     struct mbuf *);
286 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
287 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
288 void		 bwfm_pci_flowring_delete_cb(struct bwfm_softc *, void *);
289 
290 int		 bwfm_pci_preinit(struct bwfm_softc *);
291 void		 bwfm_pci_stop(struct bwfm_softc *);
292 int		 bwfm_pci_txcheck(struct bwfm_softc *);
293 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
294 
295 int		 bwfm_pci_send_mb_data(struct bwfm_pci_softc *, uint32_t);
296 void		 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *);
297 
298 #ifdef BWFM_DEBUG
299 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
300 #endif
301 
302 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
303 		    int, char *, size_t *);
304 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
305 		    int, char *, size_t);
306 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
307 		    struct msgbuf_ioctl_resp_hdr *);
308 
309 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
310 	.bc_read = bwfm_pci_buscore_read,
311 	.bc_write = bwfm_pci_buscore_write,
312 	.bc_prepare = bwfm_pci_buscore_prepare,
313 	.bc_reset = bwfm_pci_buscore_reset,
314 	.bc_setup = NULL,
315 	.bc_activate = bwfm_pci_buscore_activate,
316 };
317 
318 struct bwfm_bus_ops bwfm_pci_bus_ops = {
319 	.bs_preinit = bwfm_pci_preinit,
320 	.bs_stop = bwfm_pci_stop,
321 	.bs_txcheck = bwfm_pci_txcheck,
322 	.bs_txdata = bwfm_pci_txdata,
323 	.bs_txctl = NULL,
324 };
325 
326 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
327 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
328 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
329 	.proto_rx = NULL,
330 	.proto_rxctl = NULL,
331 };
332 
333 const struct cfattach bwfm_pci_ca = {
334 	sizeof(struct bwfm_pci_softc),
335 	bwfm_pci_match,
336 	bwfm_pci_attach,
337 	bwfm_pci_detach,
338 	bwfm_pci_activate,
339 };
340 
341 static const struct pci_matchid bwfm_pci_devices[] = {
342 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
343 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
344 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
345 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
346 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4378 },
347 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4387 },
348 };
349 
350 int
351 bwfm_pci_match(struct device *parent, void *match, void *aux)
352 {
353 	return (pci_matchbyid(aux, bwfm_pci_devices,
354 	    nitems(bwfm_pci_devices)));
355 }
356 
357 void
358 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
359 {
360 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
361 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
362 	const char *intrstr;
363 	pci_intr_handle_t ih;
364 
365 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
366 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
367 	    NULL, &sc->sc_tcm_ios, 0)) {
368 		printf(": can't map bar1\n");
369 		return;
370 	}
371 
372 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
373 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
374 	    NULL, &sc->sc_reg_ios, 0)) {
375 		printf(": can't map bar0\n");
376 		goto bar1;
377 	}
378 
379 	sc->sc_pc = pa->pa_pc;
380 	sc->sc_tag = pa->pa_tag;
381 	sc->sc_id = pa->pa_id;
382 	sc->sc_dmat = pa->pa_dmat;
383 
384 	/* Map and establish the interrupt. */
385 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
386 		printf(": couldn't map interrupt\n");
387 		goto bar0;
388 	}
389 	intrstr = pci_intr_string(pa->pa_pc, ih);
390 
391 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
392 	    bwfm_pci_intr, sc, DEVNAME(sc));
393 	if (sc->sc_ih == NULL) {
394 		printf(": couldn't establish interrupt");
395 		if (intrstr != NULL)
396 			printf(" at %s", intrstr);
397 		printf("\n");
398 		goto bar1;
399 	}
400 	printf(": %s\n", intrstr);
401 
402 #if defined(__HAVE_FDT)
403 	sc->sc_sc.sc_node = PCITAG_NODE(pa->pa_tag);
404 	if (sc->sc_sc.sc_node) {
405 		if (OF_getproplen(sc->sc_sc.sc_node, "brcm,cal-blob") > 0) {
406 			sc->sc_sc.sc_calsize = OF_getproplen(sc->sc_sc.sc_node,
407 			    "brcm,cal-blob");
408 			sc->sc_sc.sc_cal = malloc(sc->sc_sc.sc_calsize,
409 			    M_DEVBUF, M_WAITOK);
410 			OF_getprop(sc->sc_sc.sc_node, "brcm,cal-blob",
411 			    sc->sc_sc.sc_cal, sc->sc_sc.sc_calsize);
412 		}
413 	}
414 #endif
415 
416 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
417 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
418 	bwfm_attach(&sc->sc_sc);
419 	config_mountroot(self, bwfm_attachhook);
420 	return;
421 
422 bar0:
423 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
424 bar1:
425 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
426 }
427 
428 int
429 bwfm_pci_preinit(struct bwfm_softc *bwfm)
430 {
431 	struct bwfm_pci_softc *sc = (void *)bwfm;
432 	struct bwfm_pci_ringinfo ringinfo;
433 	const char *chip = NULL;
434 	u_char *ucode, *nvram;
435 	size_t size, nvsize, nvlen;
436 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
437 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
438 	uint32_t idx_offset, reg;
439 	int i;
440 
441 	if (sc->sc_initialized)
442 		return 0;
443 
444 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
445 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
446 		printf("%s: cannot attach chip\n", DEVNAME(sc));
447 		return 1;
448 	}
449 
450 #if defined(__HAVE_FDT)
451 	if (bwfm_pci_read_otp(sc)) {
452 		printf("%s: cannot read OTP\n", DEVNAME(sc));
453 		return 1;
454 	}
455 #endif
456 
457 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
458 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
459 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
460 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
461 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
462 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
463 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
464 
465 	switch (bwfm->sc_chip.ch_chip) {
466 	case BRCM_CC_4350_CHIP_ID:
467 		if (bwfm->sc_chip.ch_chiprev <= 7)
468 			chip = "4350c2";
469 		else
470 			chip = "4350";
471 		break;
472 	case BRCM_CC_4355_CHIP_ID:
473 		chip = "4355c1";
474 		break;
475 	case BRCM_CC_4356_CHIP_ID:
476 		chip = "4356";
477 		break;
478 	case BRCM_CC_4364_CHIP_ID:
479 		if (bwfm->sc_chip.ch_chiprev <= 3)
480 			chip = "4364b2";
481 		else
482 			chip = "4364b3";
483 		break;
484 	case BRCM_CC_43602_CHIP_ID:
485 		chip = "43602";
486 		break;
487 	case BRCM_CC_4371_CHIP_ID:
488 		chip = "4371";
489 		break;
490 	case BRCM_CC_4377_CHIP_ID:
491 		chip = "4377b3";
492 		break;
493 	case BRCM_CC_4378_CHIP_ID:
494 		chip = "4378b1";
495 		break;
496 	case BRCM_CC_4387_CHIP_ID:
497 		chip = "4387c2";
498 		break;
499 	default:
500 		printf("%s: unknown firmware for chip %s\n",
501 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
502 		return 1;
503 	}
504 
505 	if (bwfm_loadfirmware(bwfm, chip, "-pcie", &ucode, &size,
506 	    &nvram, &nvsize, &nvlen) != 0)
507 		return 1;
508 
509 	/* Retrieve RAM size from firmware. */
510 	if (size >= BWFM_RAMSIZE + 8) {
511 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
512 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
513 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
514 	}
515 
516 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
517 		printf("%s: could not load microcode\n",
518 		    DEVNAME(sc));
519 		free(ucode, M_DEVBUF, size);
520 		free(nvram, M_DEVBUF, nvsize);
521 		return 1;
522 	}
523 	free(ucode, M_DEVBUF, size);
524 	free(nvram, M_DEVBUF, nvsize);
525 
526 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
527 	    sc->sc_shared_address + BWFM_SHARED_INFO);
528 	sc->sc_shared_version = sc->sc_shared_flags;
529 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
530 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
531 		printf("%s: PCIe version %d unsupported\n",
532 		    DEVNAME(sc), sc->sc_shared_version);
533 		return 1;
534 	}
535 
536 	sc->sc_dma_idx_sz = 0;
537 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
538 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
539 			sc->sc_dma_idx_sz = sizeof(uint16_t);
540 		else
541 			sc->sc_dma_idx_sz = sizeof(uint32_t);
542 	}
543 
544 	/* Maximum RX data buffers in the ring. */
545 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
546 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
547 	if (sc->sc_max_rxbufpost == 0)
548 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
549 
550 	/* Alternative offset of data in a packet */
551 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
552 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
553 
554 	/* For Power Management */
555 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
556 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
557 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
558 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
559 
560 	/* Ring information */
561 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
562 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
563 
564 	/* Firmware's "dmesg" */
565 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
566 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
567 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
568 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
569 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
570 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
571 
572 	/* Read ring information. */
573 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
574 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
575 
576 	if (sc->sc_shared_version >= 6) {
577 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
578 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
579 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
580 	} else {
581 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
582 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
583 		    BWFM_NUM_TX_MSGRINGS;
584 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
585 	}
586 
587 	if (sc->sc_dma_idx_sz == 0) {
588 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
589 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
590 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
591 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
592 		idx_offset = sizeof(uint32_t);
593 	} else {
594 		uint64_t address;
595 
596 		/* Each TX/RX Ring has a Read and Write Ptr */
597 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
598 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
599 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
600 		    sc->sc_dma_idx_bufsz, 8);
601 		if (sc->sc_dma_idx_buf == NULL) {
602 			/* XXX: Fallback to TCM? */
603 			printf("%s: cannot allocate idx buf\n",
604 			    DEVNAME(sc));
605 			return 1;
606 		}
607 
608 		idx_offset = sc->sc_dma_idx_sz;
609 		h2d_w_idx_ptr = 0;
610 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
611 		ringinfo.h2d_w_idx_hostaddr_low =
612 		    htole32(address & 0xffffffff);
613 		ringinfo.h2d_w_idx_hostaddr_high =
614 		    htole32(address >> 32);
615 
616 		h2d_r_idx_ptr = h2d_w_idx_ptr +
617 		    sc->sc_max_submissionrings * idx_offset;
618 		address += sc->sc_max_submissionrings * idx_offset;
619 		ringinfo.h2d_r_idx_hostaddr_low =
620 		    htole32(address & 0xffffffff);
621 		ringinfo.h2d_r_idx_hostaddr_high =
622 		    htole32(address >> 32);
623 
624 		d2h_w_idx_ptr = h2d_r_idx_ptr +
625 		    sc->sc_max_submissionrings * idx_offset;
626 		address += sc->sc_max_submissionrings * idx_offset;
627 		ringinfo.d2h_w_idx_hostaddr_low =
628 		    htole32(address & 0xffffffff);
629 		ringinfo.d2h_w_idx_hostaddr_high =
630 		    htole32(address >> 32);
631 
632 		d2h_r_idx_ptr = d2h_w_idx_ptr +
633 		    sc->sc_max_completionrings * idx_offset;
634 		address += sc->sc_max_completionrings * idx_offset;
635 		ringinfo.d2h_r_idx_hostaddr_low =
636 		    htole32(address & 0xffffffff);
637 		ringinfo.d2h_r_idx_hostaddr_high =
638 		    htole32(address >> 32);
639 
640 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
641 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
642 	}
643 
644 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
645 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
646 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
647 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
648 	    &ring_mem_ptr))
649 		goto cleanup;
650 	/* TX rxpost ring: Send clean data mbufs for RX */
651 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 1024, 32,
652 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
653 	    &ring_mem_ptr))
654 		goto cleanup;
655 	/* RX completion rings: recv our filled buffers back */
656 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
657 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
658 	    &ring_mem_ptr))
659 		goto cleanup;
660 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024,
661 	    sc->sc_shared_version >= 7 ? 24 : 16,
662 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
663 	    &ring_mem_ptr))
664 		goto cleanup;
665 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 1024,
666 	    sc->sc_shared_version >= 7 ? 40 : 32,
667 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
668 	    &ring_mem_ptr))
669 		goto cleanup;
670 
671 	/* Dynamic TX rings for actual data */
672 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
673 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
674 	for (i = 0; i < sc->sc_max_flowrings; i++) {
675 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
676 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
677 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
678 	}
679 
680 	/* Scratch and ring update buffers for firmware */
681 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
682 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
683 		goto cleanup;
684 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
685 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
686 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
687 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
688 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
689 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
690 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
691 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
692 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
693 
694 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
695 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
696 		goto cleanup;
697 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
698 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
699 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
700 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
701 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
702 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
703 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
704 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
705 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
706 
707 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
708 	bwfm_pci_intr_enable(sc);
709 	bwfm_pci_hostready(sc);
710 
711 	/* Maps RX mbufs to a packet id and back. */
712 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
713 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
714 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
715 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
716 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_CTL_PKT_SIZE,
717 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_CTL_PKT_SIZE, 0, BUS_DMA_WAITOK,
718 		    &sc->sc_rx_pkts.pkts[i].bb_map);
719 
720 	/* Maps TX mbufs to a packet id and back. */
721 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
722 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
723 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
724 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
725 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
726 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
727 		    &sc->sc_tx_pkts.pkts[i].bb_map);
728 	sc->sc_tx_pkts_full = 0;
729 
730 	/* Maps IOCTL mbufs to a packet id and back. */
731 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
732 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
733 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
734 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
735 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
736 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
737 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
738 
739 	/*
740 	 * For whatever reason, could also be a bug somewhere in this
741 	 * driver, the firmware needs a bunch of RX buffers otherwise
742 	 * it won't send any RX complete messages.
743 	 */
744 	if_rxr_init(&sc->sc_rxbuf_ring, min(256, sc->sc_max_rxbufpost),
745 	    sc->sc_max_rxbufpost);
746 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
747 	if_rxr_init(&sc->sc_event_ring, 8, 8);
748 	bwfm_pci_fill_rx_rings(sc);
749 
750 	TAILQ_INIT(&sc->sc_ioctlq);
751 
752 #ifdef BWFM_DEBUG
753 	sc->sc_console_readidx = 0;
754 	bwfm_pci_debug_console(sc);
755 #endif
756 
757 	sc->sc_initialized = 1;
758 	return 0;
759 
760 cleanup:
761 	if (sc->sc_ringupd_buf)
762 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
763 	if (sc->sc_scratch_buf)
764 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
765 	if (sc->sc_rx_complete.ring)
766 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
767 	if (sc->sc_tx_complete.ring)
768 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
769 	if (sc->sc_ctrl_complete.ring)
770 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
771 	if (sc->sc_rxpost_submit.ring)
772 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
773 	if (sc->sc_ctrl_submit.ring)
774 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
775 	if (sc->sc_dma_idx_buf)
776 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
777 	return 1;
778 }
779 
780 int
781 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
782     const u_char *nvram, size_t nvlen)
783 {
784 	struct bwfm_softc *bwfm = (void *)sc;
785 	struct bwfm_core *core;
786 	struct bwfm_pci_random_seed_footer footer;
787 	uint32_t addr, shared, written;
788 	uint8_t *rndbuf;
789 	int i;
790 
791 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
792 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
793 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
794 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
795 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
796 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
797 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
798 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
799 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
800 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
801 	}
802 
803 	for (i = 0; i < size; i++)
804 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
805 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
806 
807 	/* Firmware replaces this with a pointer once up. */
808 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
809 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
810 
811 	if (nvram) {
812 		addr = bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize -
813 		    nvlen;
814 		for (i = 0; i < nvlen; i++)
815 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
816 			    addr + i, nvram[i]);
817 
818 		footer.length = htole32(BWFM_RANDOM_SEED_LENGTH);
819 		footer.magic = htole32(BWFM_RANDOM_SEED_MAGIC);
820 		addr -= sizeof(footer);
821 		for (i = 0; i < sizeof(footer); i++)
822 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
823 			    addr + i, ((uint8_t *)&footer)[i]);
824 
825 		rndbuf = malloc(BWFM_RANDOM_SEED_LENGTH, M_TEMP, M_WAITOK);
826 		arc4random_buf(rndbuf, BWFM_RANDOM_SEED_LENGTH);
827 		addr -= BWFM_RANDOM_SEED_LENGTH;
828 		for (i = 0; i < BWFM_RANDOM_SEED_LENGTH; i++)
829 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
830 			    addr + i, rndbuf[i]);
831 		free(rndbuf, M_TEMP, BWFM_RANDOM_SEED_LENGTH);
832 	}
833 
834 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
835 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
836 
837 	/* Load reset vector from firmware and kickstart core. */
838 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
839 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
840 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
841 	}
842 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
843 
844 	for (i = 0; i < 100; i++) {
845 		delay(50 * 1000);
846 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
847 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
848 		if (shared != written)
849 			break;
850 	}
851 	if (shared == written) {
852 		printf("%s: firmware did not come up\n", DEVNAME(sc));
853 		return 1;
854 	}
855 	if (shared < bwfm->sc_chip.ch_rambase ||
856 	    shared >= bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize) {
857 		printf("%s: invalid shared RAM address 0x%08x\n", DEVNAME(sc),
858 		    shared);
859 		return 1;
860 	}
861 
862 	sc->sc_shared_address = shared;
863 	return 0;
864 }
865 
866 int
867 bwfm_pci_detach(struct device *self, int flags)
868 {
869 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
870 
871 	bwfm_detach(&sc->sc_sc, flags);
872 	bwfm_pci_cleanup(sc);
873 
874 	return 0;
875 }
876 
877 void
878 bwfm_pci_cleanup(struct bwfm_pci_softc *sc)
879 {
880 	int i;
881 
882 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++) {
883 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_pkts.pkts[i].bb_map);
884 		if (sc->sc_rx_pkts.pkts[i].bb_m)
885 			m_freem(sc->sc_rx_pkts.pkts[i].bb_m);
886 	}
887 	free(sc->sc_rx_pkts.pkts, M_DEVBUF, BWFM_NUM_RX_PKTIDS *
888 	    sizeof(struct bwfm_pci_buf));
889 
890 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++) {
891 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_pkts.pkts[i].bb_map);
892 		if (sc->sc_tx_pkts.pkts[i].bb_m)
893 			m_freem(sc->sc_tx_pkts.pkts[i].bb_m);
894 	}
895 	free(sc->sc_tx_pkts.pkts, M_DEVBUF, BWFM_NUM_TX_PKTIDS *
896 	    sizeof(struct bwfm_pci_buf));
897 
898 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++) {
899 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_ioctl_pkts.pkts[i].bb_map);
900 		if (sc->sc_ioctl_pkts.pkts[i].bb_m)
901 			m_freem(sc->sc_ioctl_pkts.pkts[i].bb_m);
902 	}
903 	free(sc->sc_ioctl_pkts.pkts, M_DEVBUF, BWFM_NUM_IOCTL_PKTIDS *
904 	    sizeof(struct bwfm_pci_buf));
905 
906 	for (i = 0; i < sc->sc_max_flowrings; i++) {
907 		if (sc->sc_flowrings[i].status >= RING_OPEN)
908 			bwfm_pci_dmamem_free(sc, sc->sc_flowrings[i].ring);
909 	}
910 	free(sc->sc_flowrings, M_DEVBUF, sc->sc_max_flowrings *
911 	    sizeof(struct bwfm_pci_msgring));
912 
913 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
914 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
915 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
916 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
917 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
918 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
919 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
920 	if (sc->sc_dma_idx_buf) {
921 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
922 		sc->sc_dma_idx_buf = NULL;
923 	}
924 
925 	sc->sc_initialized = 0;
926 }
927 
928 int
929 bwfm_pci_activate(struct device *self, int act)
930 {
931 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
932 	struct bwfm_softc *bwfm = (void *)sc;
933 	int error = 0;
934 
935 	switch (act) {
936 	case DVACT_QUIESCE:
937 		error = bwfm_activate(bwfm, act);
938 		if (error)
939 			return error;
940 		if (sc->sc_initialized) {
941 			sc->sc_mbdata_done = 0;
942 			error = bwfm_pci_send_mb_data(sc,
943 			    BWFM_PCI_H2D_HOST_D3_INFORM);
944 			if (error)
945 				return error;
946 			tsleep_nsec(&sc->sc_mbdata_done, PCATCH,
947 			    DEVNAME(sc), SEC_TO_NSEC(2));
948 			if (!sc->sc_mbdata_done)
949 				return ETIMEDOUT;
950 		}
951 		break;
952 	case DVACT_WAKEUP:
953 		if (sc->sc_initialized) {
954 			/* If device can't be resumed, re-init. */
955 			if (bwfm_pci_intmask(sc) == 0 ||
956 			    bwfm_pci_send_mb_data(sc,
957 			    BWFM_PCI_H2D_HOST_D0_INFORM) != 0) {
958 				bwfm_cleanup(bwfm);
959 				bwfm_pci_cleanup(sc);
960 			}
961 		}
962 		error = bwfm_activate(bwfm, act);
963 		if (error)
964 			return error;
965 		break;
966 	default:
967 		break;
968 	}
969 
970 	return 0;
971 }
972 
973 #if defined(__HAVE_FDT)
974 int
975 bwfm_pci_read_otp(struct bwfm_pci_softc *sc)
976 {
977 	struct bwfm_softc *bwfm = (void *)sc;
978 	struct bwfm_core *core;
979 	uint32_t coreid, base, words;
980 	uint32_t page, offset, sromctl;
981 	uint8_t *otp;
982 	int i;
983 
984 	switch (bwfm->sc_chip.ch_chip) {
985 	case BRCM_CC_4355_CHIP_ID:
986 		coreid = BWFM_AGENT_CORE_CHIPCOMMON;
987 		base = 0x8c0;
988 		words = 0xb2;
989 		break;
990 	case BRCM_CC_4364_CHIP_ID:
991 		coreid = BWFM_AGENT_CORE_CHIPCOMMON;
992 		base = 0x8c0;
993 		words = 0x1a0;
994 		break;
995 	case BRCM_CC_4377_CHIP_ID:
996 	case BRCM_CC_4378_CHIP_ID:
997 		coreid = BWFM_AGENT_CORE_GCI;
998 		base = 0x1120;
999 		words = 0x170;
1000 		break;
1001 	case BRCM_CC_4387_CHIP_ID:
1002 		coreid = BWFM_AGENT_CORE_GCI;
1003 		base = 0x113c;
1004 		words = 0x170;
1005 		break;
1006 	default:
1007 		return 0;
1008 	}
1009 
1010 	core = bwfm_chip_get_core(bwfm, coreid);
1011 	if (core == NULL)
1012 		return 1;
1013 
1014 	/* Map OTP to shadow area */
1015 	if (coreid == BWFM_AGENT_CORE_CHIPCOMMON) {
1016 		bwfm_pci_select_core(sc, coreid);
1017 		sromctl = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1018 		    BWFM_CHIP_REG_SROMCONTROL);
1019 
1020 		if (!(sromctl & BWFM_CHIP_REG_SROMCONTROL_OTP_PRESENT))
1021 			return 0;
1022 
1023 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1024 		    BWFM_CHIP_REG_SROMCONTROL, sromctl |
1025 		    BWFM_CHIP_REG_SROMCONTROL_OTPSEL);
1026 	}
1027 
1028 	/* Map bus window to SROM/OTP shadow area */
1029 	page = (core->co_base + base) & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1030 	offset = (core->co_base + base) & (BWFM_PCI_BAR0_REG_SIZE - 1);
1031 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1032 
1033 	otp = mallocarray(words, sizeof(uint16_t), M_TEMP, M_WAITOK);
1034 	for (i = 0; i < words; i++)
1035 		((uint16_t *)otp)[i] = bus_space_read_2(sc->sc_reg_iot,
1036 		    sc->sc_reg_ioh, offset + i * sizeof(uint16_t));
1037 
1038 	/* Unmap OTP */
1039 	if (coreid == BWFM_AGENT_CORE_CHIPCOMMON) {
1040 		bwfm_pci_select_core(sc, coreid);
1041 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1042 		    BWFM_CHIP_REG_SROMCONTROL, sromctl);
1043 	}
1044 
1045 	for (i = 0; i < (words * sizeof(uint16_t)) - 1; i += otp[i + 1]) {
1046 		if (otp[i + 0] == 0)
1047 			break;
1048 		if (i + otp[i + 1] > words * sizeof(uint16_t))
1049 			break;
1050 		bwfm_pci_process_otp_tuple(sc, otp[i + 0], otp[i + 1],
1051 		    &otp[i + 2]);
1052 	}
1053 
1054 	free(otp, M_TEMP, words * sizeof(uint16_t));
1055 	return 0;
1056 }
1057 
1058 void
1059 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *sc, uint8_t type, uint8_t size,
1060     uint8_t *data)
1061 {
1062 	struct bwfm_softc *bwfm = (void *)sc;
1063 	char chiprev[8] = "", module[8] = "", modrev[8] = "", vendor[8] = "", chip[8] = "";
1064 	char board_type[128] = "";
1065 	int len;
1066 
1067 	switch (type) {
1068 	case 0x15: /* system vendor OTP */
1069 		DPRINTF(("%s: system vendor OTP\n", DEVNAME(sc)));
1070 		if (size < sizeof(uint32_t))
1071 			return;
1072 		if (data[0] != 0x08 || data[1] != 0x00 ||
1073 		    data[2] != 0x00 || data[3] != 0x00)
1074 			return;
1075 		size -= sizeof(uint32_t);
1076 		data += sizeof(uint32_t);
1077 		while (size) {
1078 			/* reached end */
1079 			if (data[0] == 0xff)
1080 				break;
1081 			for (len = 0; len < size; len++)
1082 				if (data[len] == 0x00 || data[len] == ' ' ||
1083 				    data[len] == 0xff)
1084 					break;
1085 			if (len < 3 || len > 9) /* X=abcdef */
1086 				goto next;
1087 			if (data[1] != '=')
1088 				goto next;
1089 			/* NULL-terminate string */
1090 			if (data[len] == ' ')
1091 				data[len] = '\0';
1092 			switch (data[0]) {
1093 			case 's':
1094 				strlcpy(chiprev, &data[2], sizeof(chiprev));
1095 				break;
1096 			case 'M':
1097 				strlcpy(module, &data[2], sizeof(module));
1098 				break;
1099 			case 'm':
1100 				strlcpy(modrev, &data[2], sizeof(modrev));
1101 				break;
1102 			case 'V':
1103 				strlcpy(vendor, &data[2], sizeof(vendor));
1104 				break;
1105 			}
1106 next:
1107 			/* skip content */
1108 			data += len;
1109 			size -= len;
1110 			/* skip spacer tag */
1111 			if (size) {
1112 				data++;
1113 				size--;
1114 			}
1115 		}
1116 		snprintf(chip, sizeof(chip),
1117 		    bwfm->sc_chip.ch_chip > 40000 ? "%05d" : "%04x",
1118 		    bwfm->sc_chip.ch_chip);
1119 		if (sc->sc_sc.sc_node) {
1120 			OF_getprop(sc->sc_sc.sc_node, "brcm,board-type",
1121 			    board_type, sizeof(board_type));
1122 			if (strncmp(board_type, "apple,", 6) == 0) {
1123 				strlcpy(sc->sc_sc.sc_fwdir, "apple-bwfm/",
1124 				    sizeof(sc->sc_sc.sc_fwdir));
1125 			}
1126 		}
1127 		strlcpy(sc->sc_sc.sc_board_type, board_type,
1128 		    sizeof(sc->sc_sc.sc_board_type));
1129 		strlcpy(sc->sc_sc.sc_module, module,
1130 		    sizeof(sc->sc_sc.sc_module));
1131 		strlcpy(sc->sc_sc.sc_vendor, vendor,
1132 		    sizeof(sc->sc_sc.sc_vendor));
1133 		strlcpy(sc->sc_sc.sc_modrev, modrev,
1134 		    sizeof(sc->sc_sc.sc_modrev));
1135 		break;
1136 	case 0x80: /* Broadcom CIS */
1137 		DPRINTF(("%s: Broadcom CIS\n", DEVNAME(sc)));
1138 		break;
1139 	default:
1140 		DPRINTF(("%s: unknown OTP tuple\n", DEVNAME(sc)));
1141 		break;
1142 	}
1143 }
1144 #endif
1145 
1146 /* DMA code */
1147 struct bwfm_pci_dmamem *
1148 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
1149 {
1150 	struct bwfm_pci_dmamem *bdm;
1151 	int nsegs;
1152 
1153 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
1154 	bdm->bdm_size = size;
1155 
1156 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1157 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
1158 		goto bdmfree;
1159 
1160 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
1161 	    &nsegs, BUS_DMA_WAITOK) != 0)
1162 		goto destroy;
1163 
1164 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
1165 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1166 		goto free;
1167 
1168 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
1169 	    NULL, BUS_DMA_WAITOK) != 0)
1170 		goto unmap;
1171 
1172 	bzero(bdm->bdm_kva, size);
1173 
1174 	return (bdm);
1175 
1176 unmap:
1177 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
1178 free:
1179 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1180 destroy:
1181 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1182 bdmfree:
1183 	free(bdm, M_DEVBUF, sizeof(*bdm));
1184 
1185 	return (NULL);
1186 }
1187 
1188 void
1189 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
1190 {
1191 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
1192 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1193 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1194 	free(bdm, M_DEVBUF, sizeof(*bdm));
1195 }
1196 
1197 /*
1198  * We need a simple mapping from a packet ID to mbufs, because when
1199  * a transfer completed, we only know the ID so we have to look up
1200  * the memory for the ID.  This simply looks for an empty slot.
1201  */
1202 int
1203 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
1204 {
1205 	int i, idx;
1206 
1207 	idx = pkts->last + 1;
1208 	for (i = 0; i < pkts->npkt; i++) {
1209 		if (idx == pkts->npkt)
1210 			idx = 0;
1211 		if (pkts->pkts[idx].bb_m == NULL)
1212 			return 0;
1213 		idx++;
1214 	}
1215 	return ENOBUFS;
1216 }
1217 
1218 int
1219 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1220     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
1221 {
1222 	int i, idx;
1223 
1224 	idx = pkts->last + 1;
1225 	for (i = 0; i < pkts->npkt; i++) {
1226 		if (idx == pkts->npkt)
1227 			idx = 0;
1228 		if (pkts->pkts[idx].bb_m == NULL) {
1229 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
1230 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
1231 				if (m_defrag(m, M_DONTWAIT))
1232 					return EFBIG;
1233 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
1234 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
1235 					return EFBIG;
1236 			}
1237 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
1238 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
1239 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1240 			pkts->last = idx;
1241 			pkts->pkts[idx].bb_m = m;
1242 			*pktid = idx;
1243 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
1244 			return 0;
1245 		}
1246 		idx++;
1247 	}
1248 	return ENOBUFS;
1249 }
1250 
1251 struct mbuf *
1252 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1253     uint32_t pktid)
1254 {
1255 	struct mbuf *m;
1256 
1257 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
1258 		return NULL;
1259 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1260 	    pkts->pkts[pktid].bb_map->dm_mapsize,
1261 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1262 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1263 	m = pkts->pkts[pktid].bb_m;
1264 	pkts->pkts[pktid].bb_m = NULL;
1265 	return m;
1266 }
1267 
1268 void
1269 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1270 {
1271 	bwfm_pci_fill_rx_buf_ring(sc);
1272 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1273 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1274 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1275 	    MSGBUF_TYPE_EVENT_BUF_POST);
1276 }
1277 
1278 void
1279 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1280     uint32_t msgtype)
1281 {
1282 	struct msgbuf_rx_ioctl_resp_or_event *req;
1283 	struct mbuf *m;
1284 	uint32_t pktid;
1285 	paddr_t paddr;
1286 	int s, slots;
1287 
1288 	s = splnet();
1289 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1290 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1291 			break;
1292 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1293 		if (req == NULL)
1294 			break;
1295 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_CTL_PKT_SIZE);
1296 		if (m == NULL) {
1297 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1298 			break;
1299 		}
1300 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_CTL_PKT_SIZE;
1301 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1302 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1303 			m_freem(m);
1304 			break;
1305 		}
1306 		memset(req, 0, sizeof(*req));
1307 		req->msg.msgtype = msgtype;
1308 		req->msg.request_id = htole32(pktid);
1309 		req->host_buf_len = htole16(MSGBUF_MAX_CTL_PKT_SIZE);
1310 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1311 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1312 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1313 	}
1314 	if_rxr_put(rxring, slots);
1315 	splx(s);
1316 }
1317 
1318 void
1319 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1320 {
1321 	struct msgbuf_rx_bufpost *req;
1322 	struct mbuf *m;
1323 	uint32_t pktid;
1324 	paddr_t paddr;
1325 	int s, slots;
1326 
1327 	s = splnet();
1328 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1329 	    slots > 0; slots--) {
1330 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1331 			break;
1332 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1333 		if (req == NULL)
1334 			break;
1335 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1336 		if (m == NULL) {
1337 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1338 			break;
1339 		}
1340 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1341 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1342 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1343 			m_freem(m);
1344 			break;
1345 		}
1346 		memset(req, 0, sizeof(*req));
1347 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1348 		req->msg.request_id = htole32(pktid);
1349 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1350 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1351 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1352 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1353 	}
1354 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1355 	splx(s);
1356 }
1357 
1358 int
1359 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1360     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1361     int idx, uint32_t idx_off, uint32_t *ring_mem)
1362 {
1363 	ring->w_idx_addr = w_idx + idx * idx_off;
1364 	ring->r_idx_addr = r_idx + idx * idx_off;
1365 	ring->w_ptr = 0;
1366 	ring->r_ptr = 0;
1367 	ring->nitem = nitem;
1368 	ring->itemsz = itemsz;
1369 	bwfm_pci_ring_write_rptr(sc, ring);
1370 	bwfm_pci_ring_write_wptr(sc, ring);
1371 
1372 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1373 	if (ring->ring == NULL)
1374 		return ENOMEM;
1375 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1376 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1377 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1378 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1379 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1380 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1381 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1382 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1383 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1384 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1385 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1386 	return 0;
1387 }
1388 
1389 int
1390 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1391     int nitem, size_t itemsz)
1392 {
1393 	ring->w_ptr = 0;
1394 	ring->r_ptr = 0;
1395 	ring->nitem = nitem;
1396 	ring->itemsz = itemsz;
1397 	bwfm_pci_ring_write_rptr(sc, ring);
1398 	bwfm_pci_ring_write_wptr(sc, ring);
1399 
1400 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1401 	if (ring->ring == NULL)
1402 		return ENOMEM;
1403 	return 0;
1404 }
1405 
1406 /* Ring helpers */
1407 void
1408 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1409     struct bwfm_pci_msgring *ring)
1410 {
1411 	if (sc->sc_pcireg64)
1412 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1413 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_0, 1);
1414 	else
1415 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1416 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_0, 1);
1417 }
1418 
1419 void
1420 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1421     struct bwfm_pci_msgring *ring)
1422 {
1423 	if (sc->sc_dma_idx_sz == 0) {
1424 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1425 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1426 	} else {
1427 		bus_dmamap_sync(sc->sc_dmat,
1428 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1429 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1430 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1431 		    + ring->r_idx_addr);
1432 	}
1433 }
1434 
1435 void
1436 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1437     struct bwfm_pci_msgring *ring)
1438 {
1439 	if (sc->sc_dma_idx_sz == 0) {
1440 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1441 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1442 	} else {
1443 		bus_dmamap_sync(sc->sc_dmat,
1444 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1445 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1446 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1447 		    + ring->w_idx_addr);
1448 	}
1449 }
1450 
1451 void
1452 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1453     struct bwfm_pci_msgring *ring)
1454 {
1455 	if (sc->sc_dma_idx_sz == 0) {
1456 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1457 		    ring->r_idx_addr, ring->r_ptr);
1458 	} else {
1459 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1460 		    + ring->r_idx_addr) = ring->r_ptr;
1461 		bus_dmamap_sync(sc->sc_dmat,
1462 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1463 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464 	}
1465 }
1466 
1467 void
1468 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1469     struct bwfm_pci_msgring *ring)
1470 {
1471 	if (sc->sc_dma_idx_sz == 0) {
1472 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1473 		    ring->w_idx_addr, ring->w_ptr);
1474 	} else {
1475 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1476 		    + ring->w_idx_addr) = ring->w_ptr;
1477 		bus_dmamap_sync(sc->sc_dmat,
1478 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1479 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1480 	}
1481 }
1482 
1483 /*
1484  * Retrieve a free descriptor to put new stuff in, but don't commit
1485  * to it yet so we can rollback later if any error occurs.
1486  */
1487 void *
1488 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1489     struct bwfm_pci_msgring *ring)
1490 {
1491 	int available;
1492 	char *ret;
1493 
1494 	bwfm_pci_ring_update_rptr(sc, ring);
1495 
1496 	if (ring->r_ptr > ring->w_ptr)
1497 		available = ring->r_ptr - ring->w_ptr;
1498 	else
1499 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1500 
1501 	if (available <= 1)
1502 		return NULL;
1503 
1504 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1505 	ring->w_ptr += 1;
1506 	if (ring->w_ptr == ring->nitem)
1507 		ring->w_ptr = 0;
1508 	return ret;
1509 }
1510 
1511 void *
1512 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1513     struct bwfm_pci_msgring *ring, int count, int *avail)
1514 {
1515 	int available;
1516 	char *ret;
1517 
1518 	bwfm_pci_ring_update_rptr(sc, ring);
1519 
1520 	if (ring->r_ptr > ring->w_ptr)
1521 		available = ring->r_ptr - ring->w_ptr;
1522 	else
1523 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1524 
1525 	if (available <= 1)
1526 		return NULL;
1527 
1528 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1529 	*avail = min(count, available - 1);
1530 	if (*avail + ring->w_ptr > ring->nitem)
1531 		*avail = ring->nitem - ring->w_ptr;
1532 	ring->w_ptr += *avail;
1533 	if (ring->w_ptr == ring->nitem)
1534 		ring->w_ptr = 0;
1535 	return ret;
1536 }
1537 
1538 /*
1539  * Read number of descriptors available (submitted by the firmware)
1540  * and retrieve pointer to first descriptor.
1541  */
1542 void *
1543 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1544     struct bwfm_pci_msgring *ring, int *avail)
1545 {
1546 	bwfm_pci_ring_update_wptr(sc, ring);
1547 
1548 	if (ring->w_ptr >= ring->r_ptr)
1549 		*avail = ring->w_ptr - ring->r_ptr;
1550 	else
1551 		*avail = ring->nitem - ring->r_ptr;
1552 
1553 	if (*avail == 0)
1554 		return NULL;
1555 
1556 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1557 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1558 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1559 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1560 }
1561 
1562 /*
1563  * Let firmware know we read N descriptors.
1564  */
1565 void
1566 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1567     struct bwfm_pci_msgring *ring, int nitem)
1568 {
1569 	ring->r_ptr += nitem;
1570 	if (ring->r_ptr == ring->nitem)
1571 		ring->r_ptr = 0;
1572 	bwfm_pci_ring_write_rptr(sc, ring);
1573 }
1574 
1575 /*
1576  * Let firmware know that we submitted some descriptors.
1577  */
1578 void
1579 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1580     struct bwfm_pci_msgring *ring)
1581 {
1582 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1583 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1584 	    BUS_DMASYNC_PREWRITE);
1585 	bwfm_pci_ring_write_wptr(sc, ring);
1586 	bwfm_pci_ring_bell(sc, ring);
1587 }
1588 
1589 /*
1590  * Rollback N descriptors in case we don't actually want
1591  * to commit to it.
1592  */
1593 void
1594 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1595     struct bwfm_pci_msgring *ring, int nitem)
1596 {
1597 	if (ring->w_ptr == 0)
1598 		ring->w_ptr = ring->nitem - nitem;
1599 	else
1600 		ring->w_ptr -= nitem;
1601 }
1602 
1603 /*
1604  * Foreach written descriptor on the ring, pass the descriptor to
1605  * a message handler and let the firmware know we handled it.
1606  */
1607 void
1608 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1609     struct mbuf_list *ml)
1610 {
1611 	void *buf;
1612 	int avail, processed;
1613 
1614 again:
1615 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1616 	if (buf == NULL)
1617 		return;
1618 
1619 	processed = 0;
1620 	while (avail) {
1621 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1622 		buf += ring->itemsz;
1623 		processed++;
1624 		if (processed == 48) {
1625 			bwfm_pci_ring_read_commit(sc, ring, processed);
1626 			processed = 0;
1627 		}
1628 		avail--;
1629 	}
1630 	if (processed)
1631 		bwfm_pci_ring_read_commit(sc, ring, processed);
1632 	if (ring->r_ptr == 0)
1633 		goto again;
1634 }
1635 
1636 void
1637 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1638 {
1639 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1640 	struct msgbuf_ioctl_resp_hdr *resp;
1641 	struct msgbuf_tx_status *tx;
1642 	struct msgbuf_rx_complete *rx;
1643 	struct msgbuf_rx_event *event;
1644 	struct msgbuf_common_hdr *msg;
1645 	struct msgbuf_flowring_create_resp *fcr;
1646 	struct msgbuf_flowring_delete_resp *fdr;
1647 	struct bwfm_cmd_flowring_create fdcmd;
1648 	struct bwfm_pci_msgring *ring;
1649 	struct mbuf *m;
1650 	int flowid;
1651 
1652 	msg = (struct msgbuf_common_hdr *)buf;
1653 	switch (msg->msgtype)
1654 	{
1655 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1656 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1657 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1658 		if (flowid < 2)
1659 			break;
1660 		flowid -= 2;
1661 		if (flowid >= sc->sc_max_flowrings)
1662 			break;
1663 		ring = &sc->sc_flowrings[flowid];
1664 		if (ring->status != RING_OPENING)
1665 			break;
1666 		if (fcr->compl_hdr.status) {
1667 			printf("%s: failed to open flowring %d\n",
1668 			    DEVNAME(sc), flowid);
1669 			ring->status = RING_CLOSED;
1670 			if (ring->m) {
1671 				m_freem(ring->m);
1672 				ring->m = NULL;
1673 			}
1674 			ifq_restart(&ifp->if_snd);
1675 			break;
1676 		}
1677 		ring->status = RING_OPEN;
1678 		if (ring->m != NULL) {
1679 			m = ring->m;
1680 			ring->m = NULL;
1681 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1682 				m_freem(ring->m);
1683 		}
1684 		ifq_restart(&ifp->if_snd);
1685 		break;
1686 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1687 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1688 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1689 		if (flowid < 2)
1690 			break;
1691 		flowid -= 2;
1692 		if (flowid >= sc->sc_max_flowrings)
1693 			break;
1694 		ring = &sc->sc_flowrings[flowid];
1695 		if (ring->status != RING_CLOSING)
1696 			break;
1697 		if (fdr->compl_hdr.status) {
1698 			printf("%s: failed to delete flowring %d\n",
1699 			    DEVNAME(sc), flowid);
1700 			break;
1701 		}
1702 		fdcmd.flowid = flowid;
1703 		bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_delete_cb,
1704 		    &fdcmd, sizeof(fdcmd));
1705 		break;
1706 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1707 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1708 		    letoh32(msg->request_id));
1709 		if (m == NULL)
1710 			break;
1711 		m_freem(m);
1712 		break;
1713 	case MSGBUF_TYPE_IOCTL_CMPLT:
1714 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1715 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1716 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1717 		bwfm_pci_fill_rx_rings(sc);
1718 		break;
1719 	case MSGBUF_TYPE_WL_EVENT:
1720 		event = (struct msgbuf_rx_event *)buf;
1721 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1722 		    letoh32(event->msg.request_id));
1723 		if (m == NULL)
1724 			break;
1725 		m_adj(m, sc->sc_rx_dataoffset);
1726 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1727 		bwfm_rx(&sc->sc_sc, m, ml);
1728 		if_rxr_put(&sc->sc_event_ring, 1);
1729 		bwfm_pci_fill_rx_rings(sc);
1730 		break;
1731 	case MSGBUF_TYPE_TX_STATUS:
1732 		tx = (struct msgbuf_tx_status *)buf;
1733 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1734 		    letoh32(tx->msg.request_id) - 1);
1735 		if (m == NULL)
1736 			break;
1737 		m_freem(m);
1738 		if (sc->sc_tx_pkts_full) {
1739 			sc->sc_tx_pkts_full = 0;
1740 			ifq_restart(&ifp->if_snd);
1741 		}
1742 		break;
1743 	case MSGBUF_TYPE_RX_CMPLT:
1744 		rx = (struct msgbuf_rx_complete *)buf;
1745 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1746 		    letoh32(rx->msg.request_id));
1747 		if (m == NULL)
1748 			break;
1749 		if (letoh16(rx->data_offset))
1750 			m_adj(m, letoh16(rx->data_offset));
1751 		else if (sc->sc_rx_dataoffset)
1752 			m_adj(m, sc->sc_rx_dataoffset);
1753 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1754 		bwfm_rx(&sc->sc_sc, m, ml);
1755 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1756 		bwfm_pci_fill_rx_rings(sc);
1757 		break;
1758 	default:
1759 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1760 		break;
1761 	}
1762 }
1763 
1764 /* Bus core helpers */
1765 void
1766 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1767 {
1768 	struct bwfm_softc *bwfm = (void *)sc;
1769 	struct bwfm_core *core;
1770 
1771 	core = bwfm_chip_get_core(bwfm, id);
1772 	if (core == NULL) {
1773 		printf("%s: could not find core to select", DEVNAME(sc));
1774 		return;
1775 	}
1776 
1777 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1778 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1779 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1780 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1781 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1782 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1783 }
1784 
1785 uint32_t
1786 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1787 {
1788 	struct bwfm_pci_softc *sc = (void *)bwfm;
1789 	uint32_t page, offset;
1790 
1791 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1792 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1793 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1794 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1795 }
1796 
1797 void
1798 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1799 {
1800 	struct bwfm_pci_softc *sc = (void *)bwfm;
1801 	uint32_t page, offset;
1802 
1803 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1804 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1805 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1806 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1807 }
1808 
1809 int
1810 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1811 {
1812 	return 0;
1813 }
1814 
1815 int
1816 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1817 {
1818 	struct bwfm_pci_softc *sc = (void *)bwfm;
1819 	struct bwfm_core *core;
1820 	uint32_t reg;
1821 	int i;
1822 
1823 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1824 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1825 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1826 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1827 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1828 
1829 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1830 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1831 	    BWFM_CHIP_REG_WATCHDOG, 4);
1832 	delay(100 * 1000);
1833 
1834 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1835 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1836 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1837 
1838 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1839 	if (core->co_rev <= 13) {
1840 		uint16_t cfg_offset[] = {
1841 		    BWFM_PCI_CFGREG_STATUS_CMD,
1842 		    BWFM_PCI_CFGREG_PM_CSR,
1843 		    BWFM_PCI_CFGREG_MSI_CAP,
1844 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1845 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1846 		    BWFM_PCI_CFGREG_MSI_DATA,
1847 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1848 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1849 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1850 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1851 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1852 		};
1853 
1854 		for (i = 0; i < nitems(cfg_offset); i++) {
1855 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1856 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1857 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1858 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1859 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1860 			    DEVNAME(sc), cfg_offset[i], reg));
1861 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1862 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1863 		}
1864 	}
1865 	if (core->co_rev >= 64)
1866 		sc->sc_pcireg64 = 1;
1867 
1868 	reg = bwfm_pci_intr_status(sc);
1869 	if (reg != 0xffffffff)
1870 		bwfm_pci_intr_ack(sc, reg);
1871 
1872 	return 0;
1873 }
1874 
1875 void
1876 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1877 {
1878 	struct bwfm_pci_softc *sc = (void *)bwfm;
1879 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1880 }
1881 
1882 static int bwfm_pci_prio2fifo[8] = {
1883 	0, /* best effort */
1884 	1, /* IPTOS_PREC_IMMEDIATE */
1885 	1, /* IPTOS_PREC_PRIORITY */
1886 	0, /* IPTOS_PREC_FLASH */
1887 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1888 	2, /* IPTOS_PREC_CRITIC_ECP */
1889 	3, /* IPTOS_PREC_INTERNETCONTROL */
1890 	3, /* IPTOS_PREC_NETCONTROL */
1891 };
1892 
1893 int
1894 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1895 {
1896 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1897 #ifndef IEEE80211_STA_ONLY
1898 	uint8_t *da = mtod(m, uint8_t *);
1899 #endif
1900 	int flowid, prio, fifo;
1901 	int i, found;
1902 
1903 	prio = ieee80211_classify(ic, m);
1904 	fifo = bwfm_pci_prio2fifo[prio];
1905 
1906 	switch (ic->ic_opmode)
1907 	{
1908 	case IEEE80211_M_STA:
1909 		flowid = fifo;
1910 		break;
1911 #ifndef IEEE80211_STA_ONLY
1912 	case IEEE80211_M_HOSTAP:
1913 		if (ETHER_IS_MULTICAST(da))
1914 			da = etherbroadcastaddr;
1915 		flowid = da[5] * 2 + fifo;
1916 		break;
1917 #endif
1918 	default:
1919 		printf("%s: state not supported\n", DEVNAME(sc));
1920 		return ENOBUFS;
1921 	}
1922 
1923 	found = 0;
1924 	flowid = flowid % sc->sc_max_flowrings;
1925 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1926 		if (ic->ic_opmode == IEEE80211_M_STA &&
1927 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1928 		    sc->sc_flowrings[flowid].fifo == fifo) {
1929 			found = 1;
1930 			break;
1931 		}
1932 #ifndef IEEE80211_STA_ONLY
1933 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1934 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1935 		    sc->sc_flowrings[flowid].fifo == fifo &&
1936 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1937 			found = 1;
1938 			break;
1939 		}
1940 #endif
1941 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1942 	}
1943 
1944 	if (found)
1945 		return flowid;
1946 
1947 	return -1;
1948 }
1949 
1950 void
1951 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1952 {
1953 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1954 	struct bwfm_cmd_flowring_create cmd;
1955 #ifndef IEEE80211_STA_ONLY
1956 	uint8_t *da = mtod(m, uint8_t *);
1957 #endif
1958 	struct bwfm_pci_msgring *ring;
1959 	int flowid, prio, fifo;
1960 	int i, found;
1961 
1962 	prio = ieee80211_classify(ic, m);
1963 	fifo = bwfm_pci_prio2fifo[prio];
1964 
1965 	switch (ic->ic_opmode)
1966 	{
1967 	case IEEE80211_M_STA:
1968 		flowid = fifo;
1969 		break;
1970 #ifndef IEEE80211_STA_ONLY
1971 	case IEEE80211_M_HOSTAP:
1972 		if (ETHER_IS_MULTICAST(da))
1973 			da = etherbroadcastaddr;
1974 		flowid = da[5] * 2 + fifo;
1975 		break;
1976 #endif
1977 	default:
1978 		printf("%s: state not supported\n", DEVNAME(sc));
1979 		return;
1980 	}
1981 
1982 	found = 0;
1983 	flowid = flowid % sc->sc_max_flowrings;
1984 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1985 		ring = &sc->sc_flowrings[flowid];
1986 		if (ring->status == RING_CLOSED) {
1987 			ring->status = RING_OPENING;
1988 			found = 1;
1989 			break;
1990 		}
1991 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1992 	}
1993 
1994 	/*
1995 	 * We cannot recover from that so far.  Only a stop/init
1996 	 * cycle can revive this if it ever happens at all.
1997 	 */
1998 	if (!found) {
1999 		printf("%s: no flowring available\n", DEVNAME(sc));
2000 		return;
2001 	}
2002 
2003 	cmd.m = m;
2004 	cmd.prio = prio;
2005 	cmd.flowid = flowid;
2006 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
2007 }
2008 
2009 void
2010 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
2011 {
2012 	struct bwfm_pci_softc *sc = (void *)bwfm;
2013 #ifndef IEEE80211_STA_ONLY
2014 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
2015 #endif
2016 	struct bwfm_cmd_flowring_create *cmd = arg;
2017 	struct msgbuf_tx_flowring_create_req *req;
2018 	struct bwfm_pci_msgring *ring;
2019 	uint8_t *da, *sa;
2020 	int s;
2021 
2022 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
2023 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
2024 
2025 	ring = &sc->sc_flowrings[cmd->flowid];
2026 	if (ring->status != RING_OPENING) {
2027 		printf("%s: flowring not opening\n", DEVNAME(sc));
2028 		return;
2029 	}
2030 
2031 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
2032 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
2033 		return;
2034 	}
2035 
2036 	s = splnet();
2037 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2038 	if (req == NULL) {
2039 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
2040 		splx(s);
2041 		return;
2042 	}
2043 
2044 	ring->status = RING_OPENING;
2045 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
2046 	ring->m = cmd->m;
2047 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
2048 #ifndef IEEE80211_STA_ONLY
2049 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
2050 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
2051 #endif
2052 
2053 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
2054 	req->msg.ifidx = 0;
2055 	req->msg.request_id = 0;
2056 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
2057 	req->flow_ring_id = letoh16(cmd->flowid + 2);
2058 	memcpy(req->da, da, ETHER_ADDR_LEN);
2059 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
2060 	req->flow_ring_addr.high_addr =
2061 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
2062 	req->flow_ring_addr.low_addr =
2063 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
2064 	req->max_items = letoh16(512);
2065 	req->len_item = letoh16(48);
2066 
2067 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2068 	splx(s);
2069 }
2070 
2071 void
2072 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
2073 {
2074 	struct msgbuf_tx_flowring_delete_req *req;
2075 	struct bwfm_pci_msgring *ring;
2076 	int s;
2077 
2078 	ring = &sc->sc_flowrings[flowid];
2079 	if (ring->status != RING_OPEN) {
2080 		printf("%s: flowring not open\n", DEVNAME(sc));
2081 		return;
2082 	}
2083 
2084 	s = splnet();
2085 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2086 	if (req == NULL) {
2087 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
2088 		splx(s);
2089 		return;
2090 	}
2091 
2092 	ring->status = RING_CLOSING;
2093 
2094 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
2095 	req->msg.ifidx = 0;
2096 	req->msg.request_id = 0;
2097 	req->flow_ring_id = letoh16(flowid + 2);
2098 	req->reason = 0;
2099 
2100 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2101 	splx(s);
2102 }
2103 
2104 void
2105 bwfm_pci_flowring_delete_cb(struct bwfm_softc *bwfm, void *arg)
2106 {
2107 	struct bwfm_pci_softc *sc = (void *)bwfm;
2108 	struct bwfm_cmd_flowring_create *cmd = arg;
2109 	struct bwfm_pci_msgring *ring;
2110 
2111 	ring = &sc->sc_flowrings[cmd->flowid];
2112 	bwfm_pci_dmamem_free(sc, ring->ring);
2113 	ring->status = RING_CLOSED;
2114 }
2115 
2116 void
2117 bwfm_pci_stop(struct bwfm_softc *bwfm)
2118 {
2119 	struct bwfm_pci_softc *sc = (void *)bwfm;
2120 	struct bwfm_pci_msgring *ring;
2121 	int i;
2122 
2123 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2124 		ring = &sc->sc_flowrings[i];
2125 		if (ring->status == RING_OPEN)
2126 			bwfm_pci_flowring_delete(sc, i);
2127 	}
2128 }
2129 
2130 int
2131 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
2132 {
2133 	struct bwfm_pci_softc *sc = (void *)bwfm;
2134 	struct bwfm_pci_msgring *ring;
2135 	int i;
2136 
2137 	/* If we are transitioning, we cannot send. */
2138 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2139 		ring = &sc->sc_flowrings[i];
2140 		if (ring->status == RING_OPENING)
2141 			return ENOBUFS;
2142 	}
2143 
2144 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
2145 		sc->sc_tx_pkts_full = 1;
2146 		return ENOBUFS;
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 int
2153 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
2154 {
2155 	struct bwfm_pci_softc *sc = (void *)bwfm;
2156 	struct bwfm_pci_msgring *ring;
2157 	struct msgbuf_tx_msghdr *tx;
2158 	uint32_t pktid;
2159 	paddr_t paddr;
2160 	int flowid, ret;
2161 
2162 	flowid = bwfm_pci_flowring_lookup(sc, m);
2163 	if (flowid < 0) {
2164 		/*
2165 		 * We cannot send the packet right now as there is
2166 		 * no flowring yet.  The flowring will be created
2167 		 * asynchronously.  While the ring is transitioning
2168 		 * the TX check will tell the upper layers that we
2169 		 * cannot send packets right now.  When the flowring
2170 		 * is created the queue will be restarted and this
2171 		 * mbuf will be transmitted.
2172 		 */
2173 		bwfm_pci_flowring_create(sc, m);
2174 		return 0;
2175 	}
2176 
2177 	ring = &sc->sc_flowrings[flowid];
2178 	if (ring->status == RING_OPENING ||
2179 	    ring->status == RING_CLOSING) {
2180 		printf("%s: tried to use a flow that was "
2181 		    "transitioning in status %d\n",
2182 		    DEVNAME(sc), ring->status);
2183 		return ENOBUFS;
2184 	}
2185 
2186 	tx = bwfm_pci_ring_write_reserve(sc, ring);
2187 	if (tx == NULL)
2188 		return ENOBUFS;
2189 
2190 	memset(tx, 0, sizeof(*tx));
2191 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
2192 	tx->msg.ifidx = 0;
2193 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
2194 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
2195 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
2196 	tx->seg_cnt = 1;
2197 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
2198 
2199 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
2200 	if (ret) {
2201 		if (ret == ENOBUFS) {
2202 			printf("%s: no pktid available for TX\n",
2203 			    DEVNAME(sc));
2204 			sc->sc_tx_pkts_full = 1;
2205 		}
2206 		bwfm_pci_ring_write_cancel(sc, ring, 1);
2207 		return ret;
2208 	}
2209 	paddr += ETHER_HDR_LEN;
2210 
2211 	tx->msg.request_id = htole32(pktid + 1);
2212 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
2213 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2214 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2215 
2216 	bwfm_pci_ring_write_commit(sc, ring);
2217 	return 0;
2218 }
2219 
2220 int
2221 bwfm_pci_send_mb_data(struct bwfm_pci_softc *sc, uint32_t htod_mb_data)
2222 {
2223 	struct bwfm_softc *bwfm = (void *)sc;
2224 	struct bwfm_core *core;
2225 	uint32_t reg;
2226 	int i;
2227 
2228 	for (i = 0; i < 100; i++) {
2229 		reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2230 		    sc->sc_htod_mb_data_addr);
2231 		if (reg == 0)
2232 			break;
2233 		delay(10 * 1000);
2234 	}
2235 	if (i == 100) {
2236 		DPRINTF(("%s: MB transaction already pending\n", DEVNAME(sc)));
2237 		return EIO;
2238 	}
2239 
2240 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2241 	    sc->sc_htod_mb_data_addr, htod_mb_data);
2242 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2243 
2244 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
2245 	if (core->co_rev <= 13)
2246 		pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2247 
2248 	return 0;
2249 }
2250 
2251 void
2252 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *sc)
2253 {
2254 	uint32_t reg;
2255 
2256 	reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2257 	    sc->sc_dtoh_mb_data_addr);
2258 	if (reg == 0)
2259 		return;
2260 
2261 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2262 	    sc->sc_dtoh_mb_data_addr, 0);
2263 
2264 	if (reg & BWFM_PCI_D2H_DEV_D3_ACK) {
2265 		sc->sc_mbdata_done = 1;
2266 		wakeup(&sc->sc_mbdata_done);
2267 	}
2268 
2269 	/* TODO: support more events */
2270 	if (reg & ~BWFM_PCI_D2H_DEV_D3_ACK)
2271 		printf("%s: handle MB data 0x%08x\n", DEVNAME(sc), reg);
2272 }
2273 
2274 #ifdef BWFM_DEBUG
2275 void
2276 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
2277 {
2278 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2279 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
2280 
2281 	if (newidx != sc->sc_console_readidx)
2282 		DPRINTFN(3, ("BWFM CONSOLE: "));
2283 	while (newidx != sc->sc_console_readidx) {
2284 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2285 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2286 		sc->sc_console_readidx++;
2287 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2288 			sc->sc_console_readidx = 0;
2289 		if (ch == '\r')
2290 			continue;
2291 		DPRINTFN(3, ("%c", ch));
2292 	}
2293 }
2294 #endif
2295 
2296 int
2297 bwfm_pci_intr(void *v)
2298 {
2299 	struct bwfm_pci_softc *sc = (void *)v;
2300 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
2301 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2302 	uint32_t status, mask;
2303 
2304 	if (!sc->sc_initialized)
2305 		return 0;
2306 
2307 	status = bwfm_pci_intr_status(sc);
2308 	/* FIXME: interrupt status seems to be zero? */
2309 	if (status == 0 && sc->sc_pcireg64)
2310 		status |= BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2311 	if (status == 0)
2312 		return 0;
2313 
2314 	bwfm_pci_intr_disable(sc);
2315 	bwfm_pci_intr_ack(sc, status);
2316 
2317 	if (!sc->sc_pcireg64 &&
2318 	    (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2319 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1)))
2320 		bwfm_pci_handle_mb_data(sc);
2321 
2322 	mask = BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2323 	if (sc->sc_pcireg64)
2324 		mask = BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2325 
2326 	if (status & mask) {
2327 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
2328 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
2329 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
2330 
2331 		if (ifiq_input(&ifp->if_rcv, &ml))
2332 			if_rxr_livelocked(&sc->sc_rxbuf_ring);
2333 	}
2334 
2335 #ifdef BWFM_DEBUG
2336 	bwfm_pci_debug_console(sc);
2337 #endif
2338 
2339 	bwfm_pci_intr_enable(sc);
2340 	return 1;
2341 }
2342 
2343 void
2344 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2345 {
2346 	if (sc->sc_pcireg64)
2347 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2348 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK,
2349 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2350 	else
2351 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2352 		    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2353 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2354 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2355 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2356 }
2357 
2358 void
2359 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2360 {
2361 	if (sc->sc_pcireg64)
2362 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2363 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK, 0);
2364 	else
2365 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2366 		    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2367 }
2368 
2369 uint32_t
2370 bwfm_pci_intr_status(struct bwfm_pci_softc *sc)
2371 {
2372 	if (sc->sc_pcireg64)
2373 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2374 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT);
2375 	else
2376 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2377 		    BWFM_PCI_PCIE2REG_MAILBOXINT);
2378 }
2379 
2380 void
2381 bwfm_pci_intr_ack(struct bwfm_pci_softc *sc, uint32_t status)
2382 {
2383 	if (sc->sc_pcireg64)
2384 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2385 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT, status);
2386 	else
2387 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2388 		    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2389 }
2390 
2391 uint32_t
2392 bwfm_pci_intmask(struct bwfm_pci_softc *sc)
2393 {
2394 	if (sc->sc_pcireg64)
2395 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2396 		    BWFM_PCI_64_PCIE2REG_INTMASK);
2397 	else
2398 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2399 		    BWFM_PCI_PCIE2REG_INTMASK);
2400 }
2401 
2402 void
2403 bwfm_pci_hostready(struct bwfm_pci_softc *sc)
2404 {
2405 	if ((sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB1) == 0)
2406 		return;
2407 
2408 	if (sc->sc_pcireg64)
2409 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2410 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_1, 1);
2411 	else
2412 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2413 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_1, 1);
2414 }
2415 
2416 /* Msgbuf protocol implementation */
2417 int
2418 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2419     int cmd, char *buf, size_t *len)
2420 {
2421 	struct bwfm_pci_softc *sc = (void *)bwfm;
2422 	struct msgbuf_ioctl_req_hdr *req;
2423 	struct bwfm_pci_ioctl *ctl;
2424 	struct mbuf *m;
2425 	uint32_t pktid;
2426 	paddr_t paddr;
2427 	size_t buflen;
2428 	int s;
2429 
2430 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2431 	m = MCLGETL(NULL, M_DONTWAIT, buflen);
2432 	if (m == NULL)
2433 		return 1;
2434 	m->m_len = m->m_pkthdr.len = buflen;
2435 
2436 	if (buf)
2437 		memcpy(mtod(m, char *), buf, buflen);
2438 	else
2439 		memset(mtod(m, char *), 0, buflen);
2440 
2441 	s = splnet();
2442 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2443 	if (req == NULL) {
2444 		splx(s);
2445 		m_freem(m);
2446 		return 1;
2447 	}
2448 
2449 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
2450 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
2451 		splx(s);
2452 		m_freem(m);
2453 		return 1;
2454 	}
2455 
2456 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
2457 	ctl->transid = sc->sc_ioctl_transid++;
2458 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
2459 
2460 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2461 	req->msg.ifidx = 0;
2462 	req->msg.flags = 0;
2463 	req->msg.request_id = htole32(pktid);
2464 	req->cmd = htole32(cmd);
2465 	req->output_buf_len = htole16(*len);
2466 	req->trans_id = htole16(ctl->transid);
2467 
2468 	req->input_buf_len = htole16(m->m_len);
2469 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2470 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2471 
2472 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2473 	splx(s);
2474 
2475 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
2476 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2477 
2478 	if (ctl->m == NULL) {
2479 		free(ctl, M_TEMP, sizeof(*ctl));
2480 		return 1;
2481 	}
2482 
2483 	*len = min(ctl->retlen, m->m_len);
2484 	*len = min(*len, buflen);
2485 	if (buf)
2486 		m_copydata(ctl->m, 0, *len, buf);
2487 	m_freem(ctl->m);
2488 
2489 	if (ctl->status < 0) {
2490 		free(ctl, M_TEMP, sizeof(*ctl));
2491 		return 1;
2492 	}
2493 
2494 	free(ctl, M_TEMP, sizeof(*ctl));
2495 	return 0;
2496 }
2497 
2498 int
2499 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2500     int cmd, char *buf, size_t len)
2501 {
2502 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2503 }
2504 
2505 void
2506 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2507     struct msgbuf_ioctl_resp_hdr *resp)
2508 {
2509 	struct bwfm_pci_ioctl *ctl, *tmp;
2510 	struct mbuf *m;
2511 
2512 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2513 	    letoh32(resp->msg.request_id));
2514 
2515 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2516 		if (ctl->transid != letoh16(resp->trans_id))
2517 			continue;
2518 		ctl->m = m;
2519 		ctl->retlen = letoh16(resp->resp_len);
2520 		ctl->status = letoh16(resp->compl_hdr.status);
2521 		wakeup(ctl);
2522 		return;
2523 	}
2524 
2525 	m_freem(m);
2526 }
2527