xref: /openbsd-src/sys/dev/pci/if_bwfm_pci.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.34 2020/02/25 14:24:58 patrick Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if NBPFILTER > 0
31 #include <net/bpf.h>
32 #endif
33 #include <net/if.h>
34 #include <net/if_dl.h>
35 #include <net/if_media.h>
36 
37 #include <netinet/in.h>
38 #include <netinet/if_ether.h>
39 
40 #include <net80211/ieee80211_var.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <dev/ic/bwfmvar.h>
49 #include <dev/ic/bwfmreg.h>
50 #include <dev/pci/if_bwfm_pci.h>
51 
52 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
53 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
54 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
55 
56 #define BWFM_NUM_TX_MSGRINGS			2
57 #define BWFM_NUM_RX_MSGRINGS			3
58 
59 #define BWFM_NUM_IOCTL_PKTIDS			8
60 #define BWFM_NUM_TX_PKTIDS			2048
61 #define BWFM_NUM_RX_PKTIDS			1024
62 
63 #define BWFM_NUM_IOCTL_DESCS			1
64 #define BWFM_NUM_TX_DESCS			1
65 #define BWFM_NUM_RX_DESCS			1
66 
67 #ifdef BWFM_DEBUG
68 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
69 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
70 static int bwfm_debug = 2;
71 #else
72 #define DPRINTF(x)	do { ; } while (0)
73 #define DPRINTFN(n, x)	do { ; } while (0)
74 #endif
75 
76 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
77 
78 enum ring_status {
79 	RING_CLOSED,
80 	RING_CLOSING,
81 	RING_OPEN,
82 	RING_OPENING,
83 };
84 
85 struct bwfm_pci_msgring {
86 	uint32_t		 w_idx_addr;
87 	uint32_t		 r_idx_addr;
88 	uint32_t		 w_ptr;
89 	uint32_t		 r_ptr;
90 	int			 nitem;
91 	int			 itemsz;
92 	enum ring_status	 status;
93 	struct bwfm_pci_dmamem	*ring;
94 	struct mbuf		*m;
95 
96 	int			 fifo;
97 	uint8_t			 mac[ETHER_ADDR_LEN];
98 };
99 
100 struct bwfm_pci_ioctl {
101 	uint16_t		 transid;
102 	uint16_t		 retlen;
103 	int16_t			 status;
104 	struct mbuf		*m;
105 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
106 };
107 
108 struct bwfm_pci_buf {
109 	bus_dmamap_t	 bb_map;
110 	struct mbuf	*bb_m;
111 };
112 
113 struct bwfm_pci_pkts {
114 	struct bwfm_pci_buf	*pkts;
115 	uint32_t		 npkt;
116 	int			 last;
117 };
118 
119 struct bwfm_pci_softc {
120 	struct bwfm_softc	 sc_sc;
121 	pci_chipset_tag_t	 sc_pc;
122 	pcitag_t		 sc_tag;
123 	pcireg_t		 sc_id;
124 	void 			*sc_ih;
125 
126 	int			 sc_initialized;
127 
128 	bus_space_tag_t		 sc_reg_iot;
129 	bus_space_handle_t	 sc_reg_ioh;
130 	bus_size_t		 sc_reg_ios;
131 
132 	bus_space_tag_t		 sc_tcm_iot;
133 	bus_space_handle_t	 sc_tcm_ioh;
134 	bus_size_t		 sc_tcm_ios;
135 
136 	bus_dma_tag_t		 sc_dmat;
137 
138 	uint32_t		 sc_shared_address;
139 	uint32_t		 sc_shared_flags;
140 	uint8_t			 sc_shared_version;
141 
142 	uint8_t			 sc_dma_idx_sz;
143 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
144 	size_t			 sc_dma_idx_bufsz;
145 
146 	uint16_t		 sc_max_rxbufpost;
147 	uint32_t		 sc_rx_dataoffset;
148 	uint32_t		 sc_htod_mb_data_addr;
149 	uint32_t		 sc_dtoh_mb_data_addr;
150 	uint32_t		 sc_ring_info_addr;
151 
152 	uint32_t		 sc_console_base_addr;
153 	uint32_t		 sc_console_buf_addr;
154 	uint32_t		 sc_console_buf_size;
155 	uint32_t		 sc_console_readidx;
156 
157 	uint16_t		 sc_max_flowrings;
158 	uint16_t		 sc_max_submissionrings;
159 	uint16_t		 sc_max_completionrings;
160 
161 	struct bwfm_pci_msgring	 sc_ctrl_submit;
162 	struct bwfm_pci_msgring	 sc_rxpost_submit;
163 	struct bwfm_pci_msgring	 sc_ctrl_complete;
164 	struct bwfm_pci_msgring	 sc_tx_complete;
165 	struct bwfm_pci_msgring	 sc_rx_complete;
166 	struct bwfm_pci_msgring	*sc_flowrings;
167 
168 	struct bwfm_pci_dmamem	*sc_scratch_buf;
169 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
170 
171 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
172 	uint16_t		 sc_ioctl_transid;
173 
174 	struct if_rxring	 sc_ioctl_ring;
175 	struct if_rxring	 sc_event_ring;
176 	struct if_rxring	 sc_rxbuf_ring;
177 
178 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
179 	struct bwfm_pci_pkts	 sc_rx_pkts;
180 	struct bwfm_pci_pkts	 sc_tx_pkts;
181 	int			 sc_tx_pkts_full;
182 };
183 
184 struct bwfm_pci_dmamem {
185 	bus_dmamap_t		bdm_map;
186 	bus_dma_segment_t	bdm_seg;
187 	size_t			bdm_size;
188 	caddr_t			bdm_kva;
189 };
190 
191 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
192 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
193 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
194 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
195 
196 int		 bwfm_pci_match(struct device *, void *, void *);
197 void		 bwfm_pci_attach(struct device *, struct device *, void *);
198 int		 bwfm_pci_detach(struct device *, int);
199 
200 int		 bwfm_pci_intr(void *);
201 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
202 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
203 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
204 		    size_t, const u_char *, size_t);
205 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
206 
207 struct bwfm_pci_dmamem *
208 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
209 		    bus_size_t);
210 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
211 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
212 		    struct bwfm_pci_pkts *);
213 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
214 		    struct bwfm_pci_pkts *, struct mbuf *,
215 		    uint32_t *, paddr_t *);
216 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
217 		    struct bwfm_pci_pkts *, uint32_t);
218 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
219 		    struct if_rxring *, uint32_t);
220 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
221 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
222 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
223 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
224 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
225 		    int, size_t);
226 
227 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
228 		    struct bwfm_pci_msgring *);
229 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
230 		    struct bwfm_pci_msgring *);
231 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
232 		    struct bwfm_pci_msgring *);
233 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
234 		    struct bwfm_pci_msgring *);
235 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
236 		    struct bwfm_pci_msgring *);
237 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
238 		    struct bwfm_pci_msgring *);
239 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
240 		    struct bwfm_pci_msgring *, int, int *);
241 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
242 		    struct bwfm_pci_msgring *, int *);
243 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
244 		    struct bwfm_pci_msgring *, int);
245 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
246 		    struct bwfm_pci_msgring *);
247 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *, int);
249 
250 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
251 		    struct bwfm_pci_msgring *, struct mbuf_list *);
252 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
253 		    struct mbuf_list *);
254 
255 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
256 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
257 		    uint32_t);
258 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
259 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
260 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
261 
262 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
263 		     struct mbuf *);
264 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
265 		     struct mbuf *);
266 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
267 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
268 
269 int		 bwfm_pci_preinit(struct bwfm_softc *);
270 void		 bwfm_pci_stop(struct bwfm_softc *);
271 int		 bwfm_pci_txcheck(struct bwfm_softc *);
272 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
273 
274 #ifdef BWFM_DEBUG
275 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
276 #endif
277 
278 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
279 		    int, char *, size_t *);
280 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
281 		    int, char *, size_t);
282 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
283 		    struct msgbuf_ioctl_resp_hdr *);
284 
285 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
286 	.bc_read = bwfm_pci_buscore_read,
287 	.bc_write = bwfm_pci_buscore_write,
288 	.bc_prepare = bwfm_pci_buscore_prepare,
289 	.bc_reset = bwfm_pci_buscore_reset,
290 	.bc_setup = NULL,
291 	.bc_activate = bwfm_pci_buscore_activate,
292 };
293 
294 struct bwfm_bus_ops bwfm_pci_bus_ops = {
295 	.bs_preinit = bwfm_pci_preinit,
296 	.bs_stop = bwfm_pci_stop,
297 	.bs_txcheck = bwfm_pci_txcheck,
298 	.bs_txdata = bwfm_pci_txdata,
299 	.bs_txctl = NULL,
300 };
301 
302 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
303 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
304 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
305 	.proto_rx = NULL,
306 	.proto_rxctl = NULL,
307 };
308 
309 struct cfattach bwfm_pci_ca = {
310 	sizeof(struct bwfm_pci_softc),
311 	bwfm_pci_match,
312 	bwfm_pci_attach,
313 	bwfm_pci_detach,
314 };
315 
316 static const struct pci_matchid bwfm_pci_devices[] = {
317 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
318 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
319 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
320 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
321 };
322 
323 int
324 bwfm_pci_match(struct device *parent, void *match, void *aux)
325 {
326 	return (pci_matchbyid(aux, bwfm_pci_devices,
327 	    nitems(bwfm_pci_devices)));
328 }
329 
330 void
331 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
332 {
333 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
334 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
335 	const char *intrstr;
336 	pci_intr_handle_t ih;
337 
338 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
339 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
340 	    NULL, &sc->sc_tcm_ios, 0)) {
341 		printf(": can't map bar1\n");
342 		return;
343 	}
344 
345 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
346 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
347 	    NULL, &sc->sc_reg_ios, 0)) {
348 		printf(": can't map bar0\n");
349 		goto bar1;
350 	}
351 
352 	sc->sc_pc = pa->pa_pc;
353 	sc->sc_tag = pa->pa_tag;
354 	sc->sc_id = pa->pa_id;
355 	sc->sc_dmat = pa->pa_dmat;
356 
357 	/* Map and establish the interrupt. */
358 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
359 		printf(": couldn't map interrupt\n");
360 		goto bar0;
361 	}
362 	intrstr = pci_intr_string(pa->pa_pc, ih);
363 
364 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
365 	    bwfm_pci_intr, sc, DEVNAME(sc));
366 	if (sc->sc_ih == NULL) {
367 		printf(": couldn't establish interrupt");
368 		if (intrstr != NULL)
369 			printf(" at %s", intrstr);
370 		printf("\n");
371 		goto bar1;
372 	}
373 	printf(": %s\n", intrstr);
374 
375 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
376 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
377 	bwfm_attach(&sc->sc_sc);
378 	config_mountroot(self, bwfm_attachhook);
379 	return;
380 
381 bar0:
382 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
383 bar1:
384 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
385 }
386 
387 int
388 bwfm_pci_preinit(struct bwfm_softc *bwfm)
389 {
390 	struct bwfm_pci_softc *sc = (void *)bwfm;
391 	struct bwfm_pci_ringinfo ringinfo;
392 	const char *name, *nvname;
393 	u_char *ucode, *nvram = NULL;
394 	size_t size, nvlen = 0;
395 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
396 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
397 	uint32_t idx_offset, reg;
398 	int i;
399 
400 	if (sc->sc_initialized)
401 		return 0;
402 
403 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
404 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
405 		printf("%s: cannot attach chip\n", DEVNAME(sc));
406 		return 1;
407 	}
408 
409 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
410 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
411 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
412 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
413 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
414 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
415 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
416 
417 	switch (bwfm->sc_chip.ch_chip)
418 	{
419 	case BRCM_CC_4350_CHIP_ID:
420 		if (bwfm->sc_chip.ch_chiprev > 7) {
421 			name = "brcmfmac4350-pcie.bin";
422 			nvname = "brcmfmac4350-pcie.nvram";
423 		} else {
424 			name = "brcmfmac4350c2-pcie.bin";
425 			nvname = "brcmfmac4350c2-pcie.nvram";
426 		}
427 		break;
428 	case BRCM_CC_4356_CHIP_ID:
429 		name = "brcmfmac4356-pcie.bin";
430 		nvname = "brcmfmac4356-pcie.nvram";
431 		break;
432 	case BRCM_CC_43602_CHIP_ID:
433 		name = "brcmfmac43602-pcie.bin";
434 		nvname = "brcmfmac43602-pcie.nvram";
435 		break;
436 	case BRCM_CC_4371_CHIP_ID:
437 		name = "brcmfmac4371-pcie.bin";
438 		nvname = "brcmfmac4371-pcie.nvram";
439 		break;
440 	default:
441 		printf("%s: unknown firmware for chip %s\n",
442 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
443 		return 1;
444 	}
445 
446 	if (loadfirmware(name, &ucode, &size) != 0) {
447 		printf("%s: failed loadfirmware of file %s\n",
448 		    DEVNAME(sc), name);
449 		return 1;
450 	}
451 
452 	/* NVRAM is optional. */
453 	loadfirmware(nvname, &nvram, &nvlen);
454 
455 	/* Retrieve RAM size from firmware. */
456 	if (size >= BWFM_RAMSIZE + 8) {
457 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
458 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
459 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
460 	}
461 
462 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
463 		printf("%s: could not load microcode\n",
464 		    DEVNAME(sc));
465 		free(ucode, M_DEVBUF, size);
466 		free(nvram, M_DEVBUF, nvlen);
467 		return 1;
468 	}
469 	free(ucode, M_DEVBUF, size);
470 	free(nvram, M_DEVBUF, nvlen);
471 
472 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
473 	    sc->sc_shared_address + BWFM_SHARED_INFO);
474 	sc->sc_shared_version = sc->sc_shared_flags;
475 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
476 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
477 		printf("%s: PCIe version %d unsupported\n",
478 		    DEVNAME(sc), sc->sc_shared_version);
479 		return 1;
480 	}
481 
482 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
483 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
484 			sc->sc_dma_idx_sz = sizeof(uint16_t);
485 		else
486 			sc->sc_dma_idx_sz = sizeof(uint32_t);
487 	}
488 
489 	/* Maximum RX data buffers in the ring. */
490 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
491 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
492 	if (sc->sc_max_rxbufpost == 0)
493 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
494 
495 	/* Alternative offset of data in a packet */
496 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
497 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
498 
499 	/* For Power Management */
500 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
501 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
502 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
503 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
504 
505 	/* Ring information */
506 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
507 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
508 
509 	/* Firmware's "dmesg" */
510 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
511 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
512 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
513 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
514 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
515 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
516 
517 	/* Read ring information. */
518 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
519 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
520 
521 	if (sc->sc_shared_version >= 6) {
522 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
523 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
524 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
525 	} else {
526 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
527 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
528 		    BWFM_NUM_TX_MSGRINGS;
529 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
530 	}
531 
532 	if (sc->sc_dma_idx_sz == 0) {
533 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
534 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
535 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
536 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
537 		idx_offset = sizeof(uint32_t);
538 	} else {
539 		uint64_t address;
540 
541 		/* Each TX/RX Ring has a Read and Write Ptr */
542 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
543 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
544 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
545 		    sc->sc_dma_idx_bufsz, 8);
546 		if (sc->sc_dma_idx_buf == NULL) {
547 			/* XXX: Fallback to TCM? */
548 			printf("%s: cannot allocate idx buf\n",
549 			    DEVNAME(sc));
550 			return 1;
551 		}
552 
553 		idx_offset = sc->sc_dma_idx_sz;
554 		h2d_w_idx_ptr = 0;
555 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
556 		ringinfo.h2d_w_idx_hostaddr_low =
557 		    htole32(address & 0xffffffff);
558 		ringinfo.h2d_w_idx_hostaddr_high =
559 		    htole32(address >> 32);
560 
561 		h2d_r_idx_ptr = h2d_w_idx_ptr +
562 		    sc->sc_max_submissionrings * idx_offset;
563 		address += sc->sc_max_submissionrings * idx_offset;
564 		ringinfo.h2d_r_idx_hostaddr_low =
565 		    htole32(address & 0xffffffff);
566 		ringinfo.h2d_r_idx_hostaddr_high =
567 		    htole32(address >> 32);
568 
569 		d2h_w_idx_ptr = h2d_r_idx_ptr +
570 		    sc->sc_max_submissionrings * idx_offset;
571 		address += sc->sc_max_submissionrings * idx_offset;
572 		ringinfo.d2h_w_idx_hostaddr_low =
573 		    htole32(address & 0xffffffff);
574 		ringinfo.d2h_w_idx_hostaddr_high =
575 		    htole32(address >> 32);
576 
577 		d2h_r_idx_ptr = d2h_w_idx_ptr +
578 		    sc->sc_max_completionrings * idx_offset;
579 		address += sc->sc_max_completionrings * idx_offset;
580 		ringinfo.d2h_r_idx_hostaddr_low =
581 		    htole32(address & 0xffffffff);
582 		ringinfo.d2h_r_idx_hostaddr_high =
583 		    htole32(address >> 32);
584 
585 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
586 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
587 	}
588 
589 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
590 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
591 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
592 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
593 	    &ring_mem_ptr))
594 		goto cleanup;
595 	/* TX rxpost ring: Send clean data mbufs for RX */
596 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
597 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
598 	    &ring_mem_ptr))
599 		goto cleanup;
600 	/* RX completion rings: recv our filled buffers back */
601 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
602 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
603 	    &ring_mem_ptr))
604 		goto cleanup;
605 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
606 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
607 	    &ring_mem_ptr))
608 		goto cleanup;
609 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
610 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
611 	    &ring_mem_ptr))
612 		goto cleanup;
613 
614 	/* Dynamic TX rings for actual data */
615 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
616 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
617 	for (i = 0; i < sc->sc_max_flowrings; i++) {
618 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
619 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
620 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
621 	}
622 
623 	/* Scratch and ring update buffers for firmware */
624 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
625 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
626 		goto cleanup;
627 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
628 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
629 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
630 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
631 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
632 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
633 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
634 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
635 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
636 
637 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
638 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
639 		goto cleanup;
640 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
641 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
642 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
643 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
644 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
645 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
646 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
647 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
648 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
649 
650 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
651 	bwfm_pci_intr_enable(sc);
652 
653 	/* Maps RX mbufs to a packet id and back. */
654 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
655 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
656 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
657 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
658 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
659 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
660 		    &sc->sc_rx_pkts.pkts[i].bb_map);
661 
662 	/* Maps TX mbufs to a packet id and back. */
663 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
664 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
665 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
666 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
667 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
668 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
669 		    &sc->sc_tx_pkts.pkts[i].bb_map);
670 
671 	/* Maps IOCTL mbufs to a packet id and back. */
672 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
673 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
674 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
675 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
676 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
677 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
678 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
679 
680 	/*
681 	 * For whatever reason, could also be a bug somewhere in this
682 	 * driver, the firmware needs a bunch of RX buffers otherwise
683 	 * it won't send any RX complete messages.  64 buffers don't
684 	 * suffice, but 128 buffers are enough.
685 	 */
686 	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
687 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
688 	if_rxr_init(&sc->sc_event_ring, 8, 8);
689 	bwfm_pci_fill_rx_rings(sc);
690 
691 	TAILQ_INIT(&sc->sc_ioctlq);
692 
693 #ifdef BWFM_DEBUG
694 	sc->sc_console_readidx = 0;
695 	bwfm_pci_debug_console(sc);
696 #endif
697 
698 	sc->sc_initialized = 1;
699 	return 0;
700 
701 cleanup:
702 	if (sc->sc_ringupd_buf)
703 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
704 	if (sc->sc_scratch_buf)
705 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
706 	if (sc->sc_rx_complete.ring)
707 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
708 	if (sc->sc_tx_complete.ring)
709 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
710 	if (sc->sc_ctrl_complete.ring)
711 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
712 	if (sc->sc_rxpost_submit.ring)
713 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
714 	if (sc->sc_ctrl_submit.ring)
715 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
716 	if (sc->sc_dma_idx_buf)
717 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
718 	return 1;
719 }
720 
721 int
722 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
723     const u_char *nvram, size_t nvlen)
724 {
725 	struct bwfm_softc *bwfm = (void *)sc;
726 	struct bwfm_core *core;
727 	uint32_t shared, written;
728 	int i;
729 
730 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
731 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
732 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
733 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
734 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
735 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
736 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
737 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
738 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
739 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
740 	}
741 
742 	for (i = 0; i < size; i++)
743 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
744 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
745 
746 	/* Firmware replaces this with a pointer once up. */
747 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
748 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
749 
750 	if (nvram) {
751 		for (i = 0; i < nvlen; i++)
752 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
753 			    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
754 			    - nvlen  + i, nvram[i]);
755 	}
756 
757 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
758 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
759 
760 	/* Load reset vector from firmware and kickstart core. */
761 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
762 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
763 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
764 	}
765 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
766 
767 	for (i = 0; i < 40; i++) {
768 		delay(50 * 1000);
769 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
770 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
771 		if (shared != written)
772 			break;
773 	}
774 	if (!shared) {
775 		printf("%s: firmware did not come up\n", DEVNAME(sc));
776 		return 1;
777 	}
778 
779 	sc->sc_shared_address = shared;
780 	return 0;
781 }
782 
783 int
784 bwfm_pci_detach(struct device *self, int flags)
785 {
786 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
787 
788 	bwfm_detach(&sc->sc_sc, flags);
789 
790 	/* FIXME: free RX buffers */
791 	/* FIXME: free TX buffers */
792 	/* FIXME: free more memory */
793 
794 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
795 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
796 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
797 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
798 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
799 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
800 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
801 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
802 	return 0;
803 }
804 
805 /* DMA code */
806 struct bwfm_pci_dmamem *
807 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
808 {
809 	struct bwfm_pci_dmamem *bdm;
810 	int nsegs;
811 
812 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
813 	bdm->bdm_size = size;
814 
815 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
816 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
817 		goto bdmfree;
818 
819 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
820 	    &nsegs, BUS_DMA_WAITOK) != 0)
821 		goto destroy;
822 
823 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
824 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
825 		goto free;
826 
827 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
828 	    NULL, BUS_DMA_WAITOK) != 0)
829 		goto unmap;
830 
831 	bzero(bdm->bdm_kva, size);
832 
833 	return (bdm);
834 
835 unmap:
836 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
837 free:
838 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
839 destroy:
840 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
841 bdmfree:
842 	free(bdm, M_DEVBUF, sizeof(*bdm));
843 
844 	return (NULL);
845 }
846 
847 void
848 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
849 {
850 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
851 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
852 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
853 	free(bdm, M_DEVBUF, sizeof(*bdm));
854 }
855 
856 /*
857  * We need a simple mapping from a packet ID to mbufs, because when
858  * a transfer completed, we only know the ID so we have to look up
859  * the memory for the ID.  This simply looks for an empty slot.
860  */
861 int
862 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
863 {
864 	int i, idx;
865 
866 	idx = pkts->last + 1;
867 	for (i = 0; i < pkts->npkt; i++) {
868 		if (idx == pkts->npkt)
869 			idx = 0;
870 		if (pkts->pkts[idx].bb_m == NULL)
871 			return 0;
872 		idx++;
873 	}
874 	return ENOBUFS;
875 }
876 
877 int
878 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
879     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
880 {
881 	int i, idx;
882 
883 	idx = pkts->last + 1;
884 	for (i = 0; i < pkts->npkt; i++) {
885 		if (idx == pkts->npkt)
886 			idx = 0;
887 		if (pkts->pkts[idx].bb_m == NULL) {
888 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
889 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
890 				if (m_defrag(m, M_DONTWAIT))
891 					return EFBIG;
892 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
893 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
894 					return EFBIG;
895 			}
896 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
897 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
898 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
899 			pkts->last = idx;
900 			pkts->pkts[idx].bb_m = m;
901 			*pktid = idx;
902 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
903 			return 0;
904 		}
905 		idx++;
906 	}
907 	return ENOBUFS;
908 }
909 
910 struct mbuf *
911 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
912     uint32_t pktid)
913 {
914 	struct mbuf *m;
915 
916 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
917 		return NULL;
918 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
919 	    pkts->pkts[pktid].bb_map->dm_mapsize,
920 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
921 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
922 	m = pkts->pkts[pktid].bb_m;
923 	pkts->pkts[pktid].bb_m = NULL;
924 	return m;
925 }
926 
927 void
928 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
929 {
930 	bwfm_pci_fill_rx_buf_ring(sc);
931 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
932 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
933 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
934 	    MSGBUF_TYPE_EVENT_BUF_POST);
935 }
936 
937 void
938 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
939     uint32_t msgtype)
940 {
941 	struct msgbuf_rx_ioctl_resp_or_event *req;
942 	struct mbuf *m;
943 	uint32_t pktid;
944 	paddr_t paddr;
945 	int s, slots;
946 
947 	s = splnet();
948 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
949 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
950 			break;
951 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
952 		if (req == NULL)
953 			break;
954 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
955 		if (m == NULL) {
956 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
957 			break;
958 		}
959 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
960 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
961 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
962 			m_freem(m);
963 			break;
964 		}
965 		memset(req, 0, sizeof(*req));
966 		req->msg.msgtype = msgtype;
967 		req->msg.request_id = htole32(pktid);
968 		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
969 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
970 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
971 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
972 	}
973 	if_rxr_put(rxring, slots);
974 	splx(s);
975 }
976 
977 void
978 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
979 {
980 	struct msgbuf_rx_bufpost *req;
981 	struct mbuf *m;
982 	uint32_t pktid;
983 	paddr_t paddr;
984 	int s, slots;
985 
986 	s = splnet();
987 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
988 	    slots > 0; slots--) {
989 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
990 			break;
991 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
992 		if (req == NULL)
993 			break;
994 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
995 		if (m == NULL) {
996 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
997 			break;
998 		}
999 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1000 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1001 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1002 			m_freem(m);
1003 			break;
1004 		}
1005 		memset(req, 0, sizeof(*req));
1006 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1007 		req->msg.request_id = htole32(pktid);
1008 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1009 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1010 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1011 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1012 	}
1013 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1014 	splx(s);
1015 }
1016 
1017 int
1018 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1019     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1020     int idx, uint32_t idx_off, uint32_t *ring_mem)
1021 {
1022 	ring->w_idx_addr = w_idx + idx * idx_off;
1023 	ring->r_idx_addr = r_idx + idx * idx_off;
1024 	ring->nitem = nitem;
1025 	ring->itemsz = itemsz;
1026 	bwfm_pci_ring_write_rptr(sc, ring);
1027 	bwfm_pci_ring_write_wptr(sc, ring);
1028 
1029 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1030 	if (ring->ring == NULL)
1031 		return ENOMEM;
1032 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1033 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1034 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1035 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1036 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1037 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1038 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1039 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1040 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1041 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1042 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1043 	return 0;
1044 }
1045 
1046 int
1047 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1048     int nitem, size_t itemsz)
1049 {
1050 	ring->w_ptr = 0;
1051 	ring->r_ptr = 0;
1052 	ring->nitem = nitem;
1053 	ring->itemsz = itemsz;
1054 	bwfm_pci_ring_write_rptr(sc, ring);
1055 	bwfm_pci_ring_write_wptr(sc, ring);
1056 
1057 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1058 	if (ring->ring == NULL)
1059 		return ENOMEM;
1060 	return 0;
1061 }
1062 
1063 /* Ring helpers */
1064 void
1065 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1066     struct bwfm_pci_msgring *ring)
1067 {
1068 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1069 	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1070 }
1071 
1072 void
1073 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1074     struct bwfm_pci_msgring *ring)
1075 {
1076 	if (sc->sc_dma_idx_sz == 0) {
1077 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1078 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1079 	} else {
1080 		bus_dmamap_sync(sc->sc_dmat,
1081 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1082 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1083 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1084 		    + ring->r_idx_addr);
1085 	}
1086 }
1087 
1088 void
1089 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1090     struct bwfm_pci_msgring *ring)
1091 {
1092 	if (sc->sc_dma_idx_sz == 0) {
1093 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1094 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1095 	} else {
1096 		bus_dmamap_sync(sc->sc_dmat,
1097 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1098 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1099 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1100 		    + ring->w_idx_addr);
1101 	}
1102 }
1103 
1104 void
1105 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1106     struct bwfm_pci_msgring *ring)
1107 {
1108 	if (sc->sc_dma_idx_sz == 0) {
1109 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1110 		    ring->r_idx_addr, ring->r_ptr);
1111 	} else {
1112 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1113 		    + ring->r_idx_addr) = ring->r_ptr;
1114 		bus_dmamap_sync(sc->sc_dmat,
1115 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1116 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1117 	}
1118 }
1119 
1120 void
1121 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1122     struct bwfm_pci_msgring *ring)
1123 {
1124 	if (sc->sc_dma_idx_sz == 0) {
1125 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1126 		    ring->w_idx_addr, ring->w_ptr);
1127 	} else {
1128 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1129 		    + ring->w_idx_addr) = ring->w_ptr;
1130 		bus_dmamap_sync(sc->sc_dmat,
1131 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1132 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1133 	}
1134 }
1135 
1136 /*
1137  * Retrieve a free descriptor to put new stuff in, but don't commit
1138  * to it yet so we can rollback later if any error occurs.
1139  */
1140 void *
1141 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1142     struct bwfm_pci_msgring *ring)
1143 {
1144 	int available;
1145 	char *ret;
1146 
1147 	bwfm_pci_ring_update_rptr(sc, ring);
1148 
1149 	if (ring->r_ptr > ring->w_ptr)
1150 		available = ring->r_ptr - ring->w_ptr;
1151 	else
1152 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1153 
1154 	if (available <= 1)
1155 		return NULL;
1156 
1157 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1158 	ring->w_ptr += 1;
1159 	if (ring->w_ptr == ring->nitem)
1160 		ring->w_ptr = 0;
1161 	return ret;
1162 }
1163 
1164 void *
1165 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1166     struct bwfm_pci_msgring *ring, int count, int *avail)
1167 {
1168 	int available;
1169 	char *ret;
1170 
1171 	bwfm_pci_ring_update_rptr(sc, ring);
1172 
1173 	if (ring->r_ptr > ring->w_ptr)
1174 		available = ring->r_ptr - ring->w_ptr;
1175 	else
1176 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1177 
1178 	if (available <= 1)
1179 		return NULL;
1180 
1181 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1182 	*avail = min(count, available - 1);
1183 	if (*avail + ring->w_ptr > ring->nitem)
1184 		*avail = ring->nitem - ring->w_ptr;
1185 	ring->w_ptr += *avail;
1186 	if (ring->w_ptr == ring->nitem)
1187 		ring->w_ptr = 0;
1188 	return ret;
1189 }
1190 
1191 /*
1192  * Read number of descriptors available (submitted by the firmware)
1193  * and retrieve pointer to first descriptor.
1194  */
1195 void *
1196 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1197     struct bwfm_pci_msgring *ring, int *avail)
1198 {
1199 	bwfm_pci_ring_update_wptr(sc, ring);
1200 
1201 	if (ring->w_ptr >= ring->r_ptr)
1202 		*avail = ring->w_ptr - ring->r_ptr;
1203 	else
1204 		*avail = ring->nitem - ring->r_ptr;
1205 
1206 	if (*avail == 0)
1207 		return NULL;
1208 
1209 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1210 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1211 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1212 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1213 }
1214 
1215 /*
1216  * Let firmware know we read N descriptors.
1217  */
1218 void
1219 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1220     struct bwfm_pci_msgring *ring, int nitem)
1221 {
1222 	ring->r_ptr += nitem;
1223 	if (ring->r_ptr == ring->nitem)
1224 		ring->r_ptr = 0;
1225 	bwfm_pci_ring_write_rptr(sc, ring);
1226 }
1227 
1228 /*
1229  * Let firmware know that we submitted some descriptors.
1230  */
1231 void
1232 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1233     struct bwfm_pci_msgring *ring)
1234 {
1235 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1236 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1237 	    BUS_DMASYNC_PREWRITE);
1238 	bwfm_pci_ring_write_wptr(sc, ring);
1239 	bwfm_pci_ring_bell(sc, ring);
1240 }
1241 
1242 /*
1243  * Rollback N descriptors in case we don't actually want
1244  * to commit to it.
1245  */
1246 void
1247 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1248     struct bwfm_pci_msgring *ring, int nitem)
1249 {
1250 	if (ring->w_ptr == 0)
1251 		ring->w_ptr = ring->nitem - nitem;
1252 	else
1253 		ring->w_ptr -= nitem;
1254 }
1255 
1256 /*
1257  * Foreach written descriptor on the ring, pass the descriptor to
1258  * a message handler and let the firmware know we handled it.
1259  */
1260 void
1261 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1262     struct mbuf_list *ml)
1263 {
1264 	void *buf;
1265 	int avail, processed;
1266 
1267 again:
1268 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1269 	if (buf == NULL)
1270 		return;
1271 
1272 	processed = 0;
1273 	while (avail) {
1274 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1275 		buf += ring->itemsz;
1276 		processed++;
1277 		if (processed == 48) {
1278 			bwfm_pci_ring_read_commit(sc, ring, processed);
1279 			processed = 0;
1280 		}
1281 		avail--;
1282 	}
1283 	if (processed)
1284 		bwfm_pci_ring_read_commit(sc, ring, processed);
1285 	if (ring->r_ptr == 0)
1286 		goto again;
1287 }
1288 
1289 void
1290 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1291 {
1292 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1293 	struct msgbuf_ioctl_resp_hdr *resp;
1294 	struct msgbuf_tx_status *tx;
1295 	struct msgbuf_rx_complete *rx;
1296 	struct msgbuf_rx_event *event;
1297 	struct msgbuf_common_hdr *msg;
1298 	struct msgbuf_flowring_create_resp *fcr;
1299 	struct msgbuf_flowring_delete_resp *fdr;
1300 	struct bwfm_pci_msgring *ring;
1301 	struct mbuf *m;
1302 	int flowid;
1303 
1304 	msg = (struct msgbuf_common_hdr *)buf;
1305 	switch (msg->msgtype)
1306 	{
1307 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1308 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1309 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1310 		if (flowid < 2)
1311 			break;
1312 		flowid -= 2;
1313 		if (flowid >= sc->sc_max_flowrings)
1314 			break;
1315 		ring = &sc->sc_flowrings[flowid];
1316 		if (ring->status != RING_OPENING)
1317 			break;
1318 		if (fcr->compl_hdr.status) {
1319 			printf("%s: failed to open flowring %d\n",
1320 			    DEVNAME(sc), flowid);
1321 			ring->status = RING_CLOSED;
1322 			if (ring->m) {
1323 				m_freem(ring->m);
1324 				ring->m = NULL;
1325 			}
1326 			ifq_restart(&ifp->if_snd);
1327 			break;
1328 		}
1329 		ring->status = RING_OPEN;
1330 		if (ring->m != NULL) {
1331 			m = ring->m;
1332 			ring->m = NULL;
1333 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1334 				m_freem(ring->m);
1335 		}
1336 		ifq_restart(&ifp->if_snd);
1337 		break;
1338 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1339 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1340 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1341 		if (flowid < 2)
1342 			break;
1343 		flowid -= 2;
1344 		if (flowid >= sc->sc_max_flowrings)
1345 			break;
1346 		ring = &sc->sc_flowrings[flowid];
1347 		if (ring->status != RING_CLOSING)
1348 			break;
1349 		if (fdr->compl_hdr.status) {
1350 			printf("%s: failed to delete flowring %d\n",
1351 			    DEVNAME(sc), flowid);
1352 			break;
1353 		}
1354 		bwfm_pci_dmamem_free(sc, ring->ring);
1355 		ring->status = RING_CLOSED;
1356 		break;
1357 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1358 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1359 		    letoh32(msg->request_id));
1360 		if (m == NULL)
1361 			break;
1362 		m_freem(m);
1363 		break;
1364 	case MSGBUF_TYPE_IOCTL_CMPLT:
1365 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1366 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1367 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1368 		bwfm_pci_fill_rx_rings(sc);
1369 		break;
1370 	case MSGBUF_TYPE_WL_EVENT:
1371 		event = (struct msgbuf_rx_event *)buf;
1372 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1373 		    letoh32(event->msg.request_id));
1374 		if (m == NULL)
1375 			break;
1376 		m_adj(m, sc->sc_rx_dataoffset);
1377 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1378 		bwfm_rx(&sc->sc_sc, m, ml);
1379 		if_rxr_put(&sc->sc_event_ring, 1);
1380 		bwfm_pci_fill_rx_rings(sc);
1381 		break;
1382 	case MSGBUF_TYPE_TX_STATUS:
1383 		tx = (struct msgbuf_tx_status *)buf;
1384 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1385 		    letoh32(tx->msg.request_id) - 1);
1386 		if (m == NULL)
1387 			break;
1388 		m_freem(m);
1389 		if (sc->sc_tx_pkts_full) {
1390 			sc->sc_tx_pkts_full = 0;
1391 			ifq_restart(&ifp->if_snd);
1392 		}
1393 		break;
1394 	case MSGBUF_TYPE_RX_CMPLT:
1395 		rx = (struct msgbuf_rx_complete *)buf;
1396 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1397 		    letoh32(rx->msg.request_id));
1398 		if (m == NULL)
1399 			break;
1400 		if (letoh16(rx->data_offset))
1401 			m_adj(m, letoh16(rx->data_offset));
1402 		else if (sc->sc_rx_dataoffset)
1403 			m_adj(m, sc->sc_rx_dataoffset);
1404 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1405 		bwfm_rx(&sc->sc_sc, m, ml);
1406 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1407 		bwfm_pci_fill_rx_rings(sc);
1408 		break;
1409 	default:
1410 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1411 		break;
1412 	}
1413 }
1414 
1415 /* Bus core helpers */
1416 void
1417 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1418 {
1419 	struct bwfm_softc *bwfm = (void *)sc;
1420 	struct bwfm_core *core;
1421 
1422 	core = bwfm_chip_get_core(bwfm, id);
1423 	if (core == NULL) {
1424 		printf("%s: could not find core to select", DEVNAME(sc));
1425 		return;
1426 	}
1427 
1428 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1429 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1430 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1431 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1432 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1433 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1434 }
1435 
1436 uint32_t
1437 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1438 {
1439 	struct bwfm_pci_softc *sc = (void *)bwfm;
1440 	uint32_t page, offset;
1441 
1442 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1443 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1444 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1445 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1446 }
1447 
1448 void
1449 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1450 {
1451 	struct bwfm_pci_softc *sc = (void *)bwfm;
1452 	uint32_t page, offset;
1453 
1454 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1455 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1456 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1457 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1458 }
1459 
1460 int
1461 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1462 {
1463 	return 0;
1464 }
1465 
1466 int
1467 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1468 {
1469 	struct bwfm_pci_softc *sc = (void *)bwfm;
1470 	struct bwfm_core *core;
1471 	uint32_t reg;
1472 	int i;
1473 
1474 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1475 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1476 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1477 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1478 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1479 
1480 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1481 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1482 	    BWFM_CHIP_REG_WATCHDOG, 4);
1483 	delay(100 * 1000);
1484 
1485 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1486 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1487 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1488 
1489 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1490 	if (core->co_rev <= 13) {
1491 		uint16_t cfg_offset[] = {
1492 		    BWFM_PCI_CFGREG_STATUS_CMD,
1493 		    BWFM_PCI_CFGREG_PM_CSR,
1494 		    BWFM_PCI_CFGREG_MSI_CAP,
1495 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1496 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1497 		    BWFM_PCI_CFGREG_MSI_DATA,
1498 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1499 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1500 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1501 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1502 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1503 		};
1504 
1505 		for (i = 0; i < nitems(cfg_offset); i++) {
1506 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1507 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1508 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1509 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1510 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1511 			    DEVNAME(sc), cfg_offset[i], reg));
1512 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1513 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1514 		}
1515 	}
1516 
1517 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1518 	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1519 	if (reg != 0xffffffff)
1520 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1521 		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1522 
1523 	return 0;
1524 }
1525 
1526 void
1527 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1528 {
1529 	struct bwfm_pci_softc *sc = (void *)bwfm;
1530 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1531 }
1532 
1533 static int bwfm_pci_prio2fifo[8] = {
1534 	1, /* best effort */
1535 	0, /* IPTOS_PREC_IMMEDIATE */
1536 	0, /* IPTOS_PREC_PRIORITY */
1537 	1, /* IPTOS_PREC_FLASH */
1538 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1539 	2, /* IPTOS_PREC_CRITIC_ECP */
1540 	3, /* IPTOS_PREC_INTERNETCONTROL */
1541 	3, /* IPTOS_PREC_NETCONTROL */
1542 };
1543 
1544 int
1545 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1546 {
1547 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1548 #ifndef IEEE80211_STA_ONLY
1549 	uint8_t *da = mtod(m, uint8_t *);
1550 #endif
1551 	int flowid, prio, fifo;
1552 	int i, found;
1553 
1554 	prio = ieee80211_classify(ic, m);
1555 	fifo = bwfm_pci_prio2fifo[prio];
1556 
1557 	switch (ic->ic_opmode)
1558 	{
1559 	case IEEE80211_M_STA:
1560 		flowid = fifo;
1561 		break;
1562 #ifndef IEEE80211_STA_ONLY
1563 	case IEEE80211_M_HOSTAP:
1564 		if (ETHER_IS_MULTICAST(da))
1565 			da = etherbroadcastaddr;
1566 		flowid = da[5] * 2 + fifo;
1567 		break;
1568 #endif
1569 	default:
1570 		printf("%s: state not supported\n", DEVNAME(sc));
1571 		return ENOBUFS;
1572 	}
1573 
1574 	found = 0;
1575 	flowid = flowid % sc->sc_max_flowrings;
1576 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1577 		if (ic->ic_opmode == IEEE80211_M_STA &&
1578 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1579 		    sc->sc_flowrings[flowid].fifo == fifo) {
1580 			found = 1;
1581 			break;
1582 		}
1583 #ifndef IEEE80211_STA_ONLY
1584 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1585 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1586 		    sc->sc_flowrings[flowid].fifo == fifo &&
1587 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1588 			found = 1;
1589 			break;
1590 		}
1591 #endif
1592 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1593 	}
1594 
1595 	if (found)
1596 		return flowid;
1597 
1598 	return -1;
1599 }
1600 
1601 void
1602 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1603 {
1604 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1605 	struct bwfm_cmd_flowring_create cmd;
1606 #ifndef IEEE80211_STA_ONLY
1607 	uint8_t *da = mtod(m, uint8_t *);
1608 #endif
1609 	struct bwfm_pci_msgring *ring;
1610 	int flowid, prio, fifo;
1611 	int i, found;
1612 
1613 	prio = ieee80211_classify(ic, m);
1614 	fifo = bwfm_pci_prio2fifo[prio];
1615 
1616 	switch (ic->ic_opmode)
1617 	{
1618 	case IEEE80211_M_STA:
1619 		flowid = fifo;
1620 		break;
1621 #ifndef IEEE80211_STA_ONLY
1622 	case IEEE80211_M_HOSTAP:
1623 		if (ETHER_IS_MULTICAST(da))
1624 			da = etherbroadcastaddr;
1625 		flowid = da[5] * 2 + fifo;
1626 		break;
1627 #endif
1628 	default:
1629 		printf("%s: state not supported\n", DEVNAME(sc));
1630 		return;
1631 	}
1632 
1633 	found = 0;
1634 	flowid = flowid % sc->sc_max_flowrings;
1635 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1636 		ring = &sc->sc_flowrings[flowid];
1637 		if (ring->status == RING_CLOSED) {
1638 			ring->status = RING_OPENING;
1639 			found = 1;
1640 			break;
1641 		}
1642 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1643 	}
1644 
1645 	/*
1646 	 * We cannot recover from that so far.  Only a stop/init
1647 	 * cycle can revive this if it ever happens at all.
1648 	 */
1649 	if (!found) {
1650 		printf("%s: no flowring available\n", DEVNAME(sc));
1651 		return;
1652 	}
1653 
1654 	cmd.m = m;
1655 	cmd.prio = prio;
1656 	cmd.flowid = flowid;
1657 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1658 }
1659 
1660 void
1661 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1662 {
1663 	struct bwfm_pci_softc *sc = (void *)bwfm;
1664 #ifndef IEEE80211_STA_ONLY
1665 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1666 #endif
1667 	struct bwfm_cmd_flowring_create *cmd = arg;
1668 	struct msgbuf_tx_flowring_create_req *req;
1669 	struct bwfm_pci_msgring *ring;
1670 	uint8_t *da, *sa;
1671 	int s;
1672 
1673 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1674 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1675 
1676 	ring = &sc->sc_flowrings[cmd->flowid];
1677 	if (ring->status != RING_OPENING) {
1678 		printf("%s: flowring not opening\n", DEVNAME(sc));
1679 		return;
1680 	}
1681 
1682 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1683 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1684 		return;
1685 	}
1686 
1687 	s = splnet();
1688 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1689 	if (req == NULL) {
1690 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1691 		splx(s);
1692 		return;
1693 	}
1694 
1695 	ring->status = RING_OPENING;
1696 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1697 	ring->m = cmd->m;
1698 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1699 #ifndef IEEE80211_STA_ONLY
1700 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1701 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1702 #endif
1703 
1704 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1705 	req->msg.ifidx = 0;
1706 	req->msg.request_id = 0;
1707 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1708 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1709 	memcpy(req->da, da, ETHER_ADDR_LEN);
1710 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1711 	req->flow_ring_addr.high_addr =
1712 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1713 	req->flow_ring_addr.low_addr =
1714 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1715 	req->max_items = letoh16(512);
1716 	req->len_item = letoh16(48);
1717 
1718 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1719 	splx(s);
1720 }
1721 
1722 void
1723 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1724 {
1725 	struct msgbuf_tx_flowring_delete_req *req;
1726 	struct bwfm_pci_msgring *ring;
1727 	int s;
1728 
1729 	ring = &sc->sc_flowrings[flowid];
1730 	if (ring->status != RING_OPEN) {
1731 		printf("%s: flowring not open\n", DEVNAME(sc));
1732 		return;
1733 	}
1734 
1735 	s = splnet();
1736 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1737 	if (req == NULL) {
1738 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1739 		splx(s);
1740 		return;
1741 	}
1742 
1743 	ring->status = RING_CLOSING;
1744 
1745 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1746 	req->msg.ifidx = 0;
1747 	req->msg.request_id = 0;
1748 	req->flow_ring_id = letoh16(flowid + 2);
1749 	req->reason = 0;
1750 
1751 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1752 	splx(s);
1753 }
1754 
1755 void
1756 bwfm_pci_stop(struct bwfm_softc *bwfm)
1757 {
1758 	struct bwfm_pci_softc *sc = (void *)bwfm;
1759 	struct bwfm_pci_msgring *ring;
1760 	int i;
1761 
1762 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1763 		ring = &sc->sc_flowrings[i];
1764 		if (ring->status == RING_OPEN)
1765 			bwfm_pci_flowring_delete(sc, i);
1766 	}
1767 }
1768 
1769 int
1770 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1771 {
1772 	struct bwfm_pci_softc *sc = (void *)bwfm;
1773 	struct bwfm_pci_msgring *ring;
1774 	int i;
1775 
1776 	/* If we are transitioning, we cannot send. */
1777 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1778 		ring = &sc->sc_flowrings[i];
1779 		if (ring->status == RING_OPENING)
1780 			return ENOBUFS;
1781 	}
1782 
1783 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1784 		sc->sc_tx_pkts_full = 1;
1785 		return ENOBUFS;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 int
1792 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
1793 {
1794 	struct bwfm_pci_softc *sc = (void *)bwfm;
1795 	struct bwfm_pci_msgring *ring;
1796 	struct msgbuf_tx_msghdr *tx;
1797 	uint32_t pktid;
1798 	paddr_t paddr;
1799 	int flowid, ret;
1800 
1801 	flowid = bwfm_pci_flowring_lookup(sc, m);
1802 	if (flowid < 0) {
1803 		/*
1804 		 * We cannot send the packet right now as there is
1805 		 * no flowring yet.  The flowring will be created
1806 		 * asynchronously.  While the ring is transitioning
1807 		 * the TX check will tell the upper layers that we
1808 		 * cannot send packets right now.  When the flowring
1809 		 * is created the queue will be restarted and this
1810 		 * mbuf will be transmitted.
1811 		 */
1812 		bwfm_pci_flowring_create(sc, m);
1813 		return 0;
1814 	}
1815 
1816 	ring = &sc->sc_flowrings[flowid];
1817 	if (ring->status == RING_OPENING ||
1818 	    ring->status == RING_CLOSING) {
1819 		printf("%s: tried to use a flow that was "
1820 		    "transitioning in status %d\n",
1821 		    DEVNAME(sc), ring->status);
1822 		return ENOBUFS;
1823 	}
1824 
1825 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1826 	if (tx == NULL)
1827 		return ENOBUFS;
1828 
1829 	memset(tx, 0, sizeof(*tx));
1830 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1831 	tx->msg.ifidx = 0;
1832 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1833 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
1834 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1835 	tx->seg_cnt = 1;
1836 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
1837 
1838 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
1839 	if (ret) {
1840 		if (ret == ENOBUFS) {
1841 			printf("%s: no pktid available for TX\n",
1842 			    DEVNAME(sc));
1843 			sc->sc_tx_pkts_full = 1;
1844 		}
1845 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1846 		return ret;
1847 	}
1848 	paddr += ETHER_HDR_LEN;
1849 
1850 	tx->msg.request_id = htole32(pktid + 1);
1851 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
1852 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1853 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1854 
1855 	bwfm_pci_ring_write_commit(sc, ring);
1856 	return 0;
1857 }
1858 
1859 #ifdef BWFM_DEBUG
1860 void
1861 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1862 {
1863 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1864 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1865 
1866 	if (newidx != sc->sc_console_readidx)
1867 		DPRINTFN(3, ("BWFM CONSOLE: "));
1868 	while (newidx != sc->sc_console_readidx) {
1869 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1870 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
1871 		sc->sc_console_readidx++;
1872 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
1873 			sc->sc_console_readidx = 0;
1874 		if (ch == '\r')
1875 			continue;
1876 		DPRINTFN(3, ("%c", ch));
1877 	}
1878 }
1879 #endif
1880 
1881 int
1882 bwfm_pci_intr(void *v)
1883 {
1884 	struct bwfm_pci_softc *sc = (void *)v;
1885 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1886 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1887 	uint32_t status;
1888 
1889 	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1890 	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1891 		return 0;
1892 
1893 	bwfm_pci_intr_disable(sc);
1894 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1895 	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
1896 
1897 	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1898 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
1899 		printf("%s: handle MB data\n", __func__);
1900 
1901 	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
1902 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
1903 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
1904 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
1905 		if_input(ifp, &ml);
1906 	}
1907 
1908 #ifdef BWFM_DEBUG
1909 	bwfm_pci_debug_console(sc);
1910 #endif
1911 
1912 	bwfm_pci_intr_enable(sc);
1913 	return 1;
1914 }
1915 
1916 void
1917 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
1918 {
1919 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1920 	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
1921 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1922 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
1923 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
1924 }
1925 
1926 void
1927 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
1928 {
1929 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1930 	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
1931 }
1932 
1933 /* Msgbuf protocol implementation */
1934 int
1935 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
1936     int cmd, char *buf, size_t *len)
1937 {
1938 	struct bwfm_pci_softc *sc = (void *)bwfm;
1939 	struct msgbuf_ioctl_req_hdr *req;
1940 	struct bwfm_pci_ioctl *ctl;
1941 	struct mbuf *m;
1942 	uint32_t pktid;
1943 	paddr_t paddr;
1944 	size_t buflen;
1945 	int s;
1946 
1947 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
1948 	m = MCLGETI(NULL, M_DONTWAIT, NULL, buflen);
1949 	if (m == NULL)
1950 		return 1;
1951 	m->m_len = m->m_pkthdr.len = buflen;
1952 
1953 	if (buf)
1954 		memcpy(mtod(m, char *), buf, buflen);
1955 	else
1956 		memset(mtod(m, char *), 0, buflen);
1957 
1958 	s = splnet();
1959 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1960 	if (req == NULL) {
1961 		splx(s);
1962 		m_freem(m);
1963 		return 1;
1964 	}
1965 
1966 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
1967 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1968 		splx(s);
1969 		m_freem(m);
1970 		return 1;
1971 	}
1972 
1973 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
1974 	ctl->transid = sc->sc_ioctl_transid++;
1975 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
1976 
1977 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
1978 	req->msg.ifidx = 0;
1979 	req->msg.flags = 0;
1980 	req->msg.request_id = htole32(pktid);
1981 	req->cmd = htole32(cmd);
1982 	req->output_buf_len = htole16(*len);
1983 	req->trans_id = htole16(ctl->transid);
1984 
1985 	req->input_buf_len = htole16(m->m_len);
1986 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1987 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1988 
1989 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1990 	splx(s);
1991 
1992 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
1993 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
1994 
1995 	if (ctl->m == NULL) {
1996 		free(ctl, M_TEMP, sizeof(*ctl));
1997 		return 1;
1998 	}
1999 
2000 	*len = min(ctl->retlen, m->m_len);
2001 	*len = min(*len, buflen);
2002 	if (buf)
2003 		m_copydata(ctl->m, 0, *len, (caddr_t)buf);
2004 	m_freem(ctl->m);
2005 
2006 	if (ctl->status < 0) {
2007 		free(ctl, M_TEMP, sizeof(*ctl));
2008 		return 1;
2009 	}
2010 
2011 	free(ctl, M_TEMP, sizeof(*ctl));
2012 	return 0;
2013 }
2014 
2015 int
2016 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2017     int cmd, char *buf, size_t len)
2018 {
2019 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2020 }
2021 
2022 void
2023 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2024     struct msgbuf_ioctl_resp_hdr *resp)
2025 {
2026 	struct bwfm_pci_ioctl *ctl, *tmp;
2027 	struct mbuf *m;
2028 
2029 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2030 	    letoh32(resp->msg.request_id));
2031 
2032 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2033 		if (ctl->transid != letoh16(resp->trans_id))
2034 			continue;
2035 		ctl->m = m;
2036 		ctl->retlen = letoh16(resp->resp_len);
2037 		ctl->status = letoh16(resp->compl_hdr.status);
2038 		wakeup(ctl);
2039 		return;
2040 	}
2041 
2042 	m_freem(m);
2043 }
2044