xref: /openbsd-src/sys/dev/pci/if_bwfm_pci.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.38 2020/12/12 11:48:53 jan Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if NBPFILTER > 0
31 #include <net/bpf.h>
32 #endif
33 #include <net/if.h>
34 #include <net/if_dl.h>
35 #include <net/if_media.h>
36 
37 #include <netinet/in.h>
38 #include <netinet/if_ether.h>
39 
40 #include <net80211/ieee80211_var.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <dev/ic/bwfmvar.h>
49 #include <dev/ic/bwfmreg.h>
50 #include <dev/pci/if_bwfm_pci.h>
51 
52 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
53 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
54 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
55 
56 #define BWFM_NUM_TX_MSGRINGS			2
57 #define BWFM_NUM_RX_MSGRINGS			3
58 
59 #define BWFM_NUM_IOCTL_PKTIDS			8
60 #define BWFM_NUM_TX_PKTIDS			2048
61 #define BWFM_NUM_RX_PKTIDS			1024
62 
63 #define BWFM_NUM_IOCTL_DESCS			1
64 #define BWFM_NUM_TX_DESCS			1
65 #define BWFM_NUM_RX_DESCS			1
66 
67 #ifdef BWFM_DEBUG
68 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
69 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
70 static int bwfm_debug = 2;
71 #else
72 #define DPRINTF(x)	do { ; } while (0)
73 #define DPRINTFN(n, x)	do { ; } while (0)
74 #endif
75 
76 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
77 
78 enum ring_status {
79 	RING_CLOSED,
80 	RING_CLOSING,
81 	RING_OPEN,
82 	RING_OPENING,
83 };
84 
85 struct bwfm_pci_msgring {
86 	uint32_t		 w_idx_addr;
87 	uint32_t		 r_idx_addr;
88 	uint32_t		 w_ptr;
89 	uint32_t		 r_ptr;
90 	int			 nitem;
91 	int			 itemsz;
92 	enum ring_status	 status;
93 	struct bwfm_pci_dmamem	*ring;
94 	struct mbuf		*m;
95 
96 	int			 fifo;
97 	uint8_t			 mac[ETHER_ADDR_LEN];
98 };
99 
100 struct bwfm_pci_ioctl {
101 	uint16_t		 transid;
102 	uint16_t		 retlen;
103 	int16_t			 status;
104 	struct mbuf		*m;
105 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
106 };
107 
108 struct bwfm_pci_buf {
109 	bus_dmamap_t	 bb_map;
110 	struct mbuf	*bb_m;
111 };
112 
113 struct bwfm_pci_pkts {
114 	struct bwfm_pci_buf	*pkts;
115 	uint32_t		 npkt;
116 	int			 last;
117 };
118 
119 struct bwfm_pci_softc {
120 	struct bwfm_softc	 sc_sc;
121 	pci_chipset_tag_t	 sc_pc;
122 	pcitag_t		 sc_tag;
123 	pcireg_t		 sc_id;
124 	void 			*sc_ih;
125 
126 	int			 sc_initialized;
127 
128 	bus_space_tag_t		 sc_reg_iot;
129 	bus_space_handle_t	 sc_reg_ioh;
130 	bus_size_t		 sc_reg_ios;
131 
132 	bus_space_tag_t		 sc_tcm_iot;
133 	bus_space_handle_t	 sc_tcm_ioh;
134 	bus_size_t		 sc_tcm_ios;
135 
136 	bus_dma_tag_t		 sc_dmat;
137 
138 	uint32_t		 sc_shared_address;
139 	uint32_t		 sc_shared_flags;
140 	uint8_t			 sc_shared_version;
141 
142 	uint8_t			 sc_dma_idx_sz;
143 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
144 	size_t			 sc_dma_idx_bufsz;
145 
146 	uint16_t		 sc_max_rxbufpost;
147 	uint32_t		 sc_rx_dataoffset;
148 	uint32_t		 sc_htod_mb_data_addr;
149 	uint32_t		 sc_dtoh_mb_data_addr;
150 	uint32_t		 sc_ring_info_addr;
151 
152 	uint32_t		 sc_console_base_addr;
153 	uint32_t		 sc_console_buf_addr;
154 	uint32_t		 sc_console_buf_size;
155 	uint32_t		 sc_console_readidx;
156 
157 	uint16_t		 sc_max_flowrings;
158 	uint16_t		 sc_max_submissionrings;
159 	uint16_t		 sc_max_completionrings;
160 
161 	struct bwfm_pci_msgring	 sc_ctrl_submit;
162 	struct bwfm_pci_msgring	 sc_rxpost_submit;
163 	struct bwfm_pci_msgring	 sc_ctrl_complete;
164 	struct bwfm_pci_msgring	 sc_tx_complete;
165 	struct bwfm_pci_msgring	 sc_rx_complete;
166 	struct bwfm_pci_msgring	*sc_flowrings;
167 
168 	struct bwfm_pci_dmamem	*sc_scratch_buf;
169 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
170 
171 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
172 	uint16_t		 sc_ioctl_transid;
173 
174 	struct if_rxring	 sc_ioctl_ring;
175 	struct if_rxring	 sc_event_ring;
176 	struct if_rxring	 sc_rxbuf_ring;
177 
178 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
179 	struct bwfm_pci_pkts	 sc_rx_pkts;
180 	struct bwfm_pci_pkts	 sc_tx_pkts;
181 	int			 sc_tx_pkts_full;
182 };
183 
184 struct bwfm_pci_dmamem {
185 	bus_dmamap_t		bdm_map;
186 	bus_dma_segment_t	bdm_seg;
187 	size_t			bdm_size;
188 	caddr_t			bdm_kva;
189 };
190 
191 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
192 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
193 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
194 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
195 
196 int		 bwfm_pci_match(struct device *, void *, void *);
197 void		 bwfm_pci_attach(struct device *, struct device *, void *);
198 int		 bwfm_pci_detach(struct device *, int);
199 
200 int		 bwfm_pci_intr(void *);
201 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
202 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
203 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
204 		    size_t, const u_char *, size_t);
205 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
206 
207 struct bwfm_pci_dmamem *
208 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
209 		    bus_size_t);
210 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
211 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
212 		    struct bwfm_pci_pkts *);
213 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
214 		    struct bwfm_pci_pkts *, struct mbuf *,
215 		    uint32_t *, paddr_t *);
216 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
217 		    struct bwfm_pci_pkts *, uint32_t);
218 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
219 		    struct if_rxring *, uint32_t);
220 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
221 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
222 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
223 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
224 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
225 		    int, size_t);
226 
227 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
228 		    struct bwfm_pci_msgring *);
229 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
230 		    struct bwfm_pci_msgring *);
231 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
232 		    struct bwfm_pci_msgring *);
233 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
234 		    struct bwfm_pci_msgring *);
235 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
236 		    struct bwfm_pci_msgring *);
237 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
238 		    struct bwfm_pci_msgring *);
239 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
240 		    struct bwfm_pci_msgring *, int, int *);
241 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
242 		    struct bwfm_pci_msgring *, int *);
243 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
244 		    struct bwfm_pci_msgring *, int);
245 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
246 		    struct bwfm_pci_msgring *);
247 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *, int);
249 
250 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
251 		    struct bwfm_pci_msgring *, struct mbuf_list *);
252 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
253 		    struct mbuf_list *);
254 
255 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
256 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
257 		    uint32_t);
258 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
259 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
260 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
261 
262 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
263 		     struct mbuf *);
264 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
265 		     struct mbuf *);
266 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
267 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
268 
269 int		 bwfm_pci_preinit(struct bwfm_softc *);
270 void		 bwfm_pci_stop(struct bwfm_softc *);
271 int		 bwfm_pci_txcheck(struct bwfm_softc *);
272 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
273 
274 #ifdef BWFM_DEBUG
275 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
276 #endif
277 
278 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
279 		    int, char *, size_t *);
280 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
281 		    int, char *, size_t);
282 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
283 		    struct msgbuf_ioctl_resp_hdr *);
284 
285 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
286 	.bc_read = bwfm_pci_buscore_read,
287 	.bc_write = bwfm_pci_buscore_write,
288 	.bc_prepare = bwfm_pci_buscore_prepare,
289 	.bc_reset = bwfm_pci_buscore_reset,
290 	.bc_setup = NULL,
291 	.bc_activate = bwfm_pci_buscore_activate,
292 };
293 
294 struct bwfm_bus_ops bwfm_pci_bus_ops = {
295 	.bs_preinit = bwfm_pci_preinit,
296 	.bs_stop = bwfm_pci_stop,
297 	.bs_txcheck = bwfm_pci_txcheck,
298 	.bs_txdata = bwfm_pci_txdata,
299 	.bs_txctl = NULL,
300 };
301 
302 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
303 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
304 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
305 	.proto_rx = NULL,
306 	.proto_rxctl = NULL,
307 };
308 
309 struct cfattach bwfm_pci_ca = {
310 	sizeof(struct bwfm_pci_softc),
311 	bwfm_pci_match,
312 	bwfm_pci_attach,
313 	bwfm_pci_detach,
314 };
315 
316 static const struct pci_matchid bwfm_pci_devices[] = {
317 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
318 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
319 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
320 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
321 };
322 
323 int
324 bwfm_pci_match(struct device *parent, void *match, void *aux)
325 {
326 	return (pci_matchbyid(aux, bwfm_pci_devices,
327 	    nitems(bwfm_pci_devices)));
328 }
329 
330 void
331 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
332 {
333 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
334 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
335 	const char *intrstr;
336 	pci_intr_handle_t ih;
337 
338 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
339 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
340 	    NULL, &sc->sc_tcm_ios, 0)) {
341 		printf(": can't map bar1\n");
342 		return;
343 	}
344 
345 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
346 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
347 	    NULL, &sc->sc_reg_ios, 0)) {
348 		printf(": can't map bar0\n");
349 		goto bar1;
350 	}
351 
352 	sc->sc_pc = pa->pa_pc;
353 	sc->sc_tag = pa->pa_tag;
354 	sc->sc_id = pa->pa_id;
355 	sc->sc_dmat = pa->pa_dmat;
356 
357 	/* Map and establish the interrupt. */
358 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
359 		printf(": couldn't map interrupt\n");
360 		goto bar0;
361 	}
362 	intrstr = pci_intr_string(pa->pa_pc, ih);
363 
364 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
365 	    bwfm_pci_intr, sc, DEVNAME(sc));
366 	if (sc->sc_ih == NULL) {
367 		printf(": couldn't establish interrupt");
368 		if (intrstr != NULL)
369 			printf(" at %s", intrstr);
370 		printf("\n");
371 		goto bar1;
372 	}
373 	printf(": %s\n", intrstr);
374 
375 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
376 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
377 	bwfm_attach(&sc->sc_sc);
378 	config_mountroot(self, bwfm_attachhook);
379 	return;
380 
381 bar0:
382 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
383 bar1:
384 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
385 }
386 
387 int
388 bwfm_pci_preinit(struct bwfm_softc *bwfm)
389 {
390 	struct bwfm_pci_softc *sc = (void *)bwfm;
391 	struct bwfm_pci_ringinfo ringinfo;
392 	const char *chip = NULL;
393 	char name[128];
394 	u_char *ucode, *nvram = NULL;
395 	size_t size, nvsize, nvlen = 0;
396 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
397 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
398 	uint32_t idx_offset, reg;
399 	int i;
400 
401 	if (sc->sc_initialized)
402 		return 0;
403 
404 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
405 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
406 		printf("%s: cannot attach chip\n", DEVNAME(sc));
407 		return 1;
408 	}
409 
410 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
411 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
412 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
413 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
414 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
415 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
416 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
417 
418 	switch (bwfm->sc_chip.ch_chip)
419 	{
420 	case BRCM_CC_4350_CHIP_ID:
421 		if (bwfm->sc_chip.ch_chiprev > 7)
422 			chip = "4350";
423 		else
424 			chip = "4350c2";
425 		break;
426 	case BRCM_CC_4356_CHIP_ID:
427 		chip = "4356";
428 		break;
429 	case BRCM_CC_43602_CHIP_ID:
430 		chip = "43602";
431 		break;
432 	case BRCM_CC_4371_CHIP_ID:
433 		chip = "4371";
434 		break;
435 	default:
436 		printf("%s: unknown firmware for chip %s\n",
437 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
438 		return 1;
439 	}
440 
441 	snprintf(name, sizeof(name), "brcmfmac%s-pcie.bin", chip);
442 	if (loadfirmware(name, &ucode, &size) != 0) {
443 		printf("%s: failed loadfirmware of file %s\n",
444 		    DEVNAME(sc), name);
445 		return 1;
446 	}
447 
448 	/* .txt needs to be processed first */
449 	snprintf(name, sizeof(name), "brcmfmac%s-pcie.txt", chip);
450 	if (loadfirmware(name, &nvram, &nvsize) == 0) {
451 		if (bwfm_nvram_convert(nvram, nvsize, &nvlen) != 0) {
452 			printf("%s: failed to process file %s\n",
453 			    DEVNAME(sc), name);
454 			free(ucode, M_DEVBUF, size);
455 			free(nvram, M_DEVBUF, nvsize);
456 			return 1;
457 		}
458 	}
459 
460 	/* .nvram is the pre-processed version */
461 	if (nvlen == 0) {
462 		snprintf(name, sizeof(name), "brcmfmac%s-pcie.nvram", chip);
463 		if (loadfirmware(name, &nvram, &nvsize) == 0)
464 			nvlen = nvsize;
465 	}
466 
467 	/* Retrieve RAM size from firmware. */
468 	if (size >= BWFM_RAMSIZE + 8) {
469 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
470 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
471 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
472 	}
473 
474 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
475 		printf("%s: could not load microcode\n",
476 		    DEVNAME(sc));
477 		free(ucode, M_DEVBUF, size);
478 		free(nvram, M_DEVBUF, nvsize);
479 		return 1;
480 	}
481 	free(ucode, M_DEVBUF, size);
482 	free(nvram, M_DEVBUF, nvsize);
483 
484 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
485 	    sc->sc_shared_address + BWFM_SHARED_INFO);
486 	sc->sc_shared_version = sc->sc_shared_flags;
487 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
488 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
489 		printf("%s: PCIe version %d unsupported\n",
490 		    DEVNAME(sc), sc->sc_shared_version);
491 		return 1;
492 	}
493 
494 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
495 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
496 			sc->sc_dma_idx_sz = sizeof(uint16_t);
497 		else
498 			sc->sc_dma_idx_sz = sizeof(uint32_t);
499 	}
500 
501 	/* Maximum RX data buffers in the ring. */
502 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
503 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
504 	if (sc->sc_max_rxbufpost == 0)
505 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
506 
507 	/* Alternative offset of data in a packet */
508 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
509 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
510 
511 	/* For Power Management */
512 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
513 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
514 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
515 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
516 
517 	/* Ring information */
518 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
519 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
520 
521 	/* Firmware's "dmesg" */
522 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
523 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
524 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
525 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
526 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
527 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
528 
529 	/* Read ring information. */
530 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
531 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
532 
533 	if (sc->sc_shared_version >= 6) {
534 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
535 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
536 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
537 	} else {
538 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
539 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
540 		    BWFM_NUM_TX_MSGRINGS;
541 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
542 	}
543 
544 	if (sc->sc_dma_idx_sz == 0) {
545 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
546 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
547 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
548 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
549 		idx_offset = sizeof(uint32_t);
550 	} else {
551 		uint64_t address;
552 
553 		/* Each TX/RX Ring has a Read and Write Ptr */
554 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
555 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
556 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
557 		    sc->sc_dma_idx_bufsz, 8);
558 		if (sc->sc_dma_idx_buf == NULL) {
559 			/* XXX: Fallback to TCM? */
560 			printf("%s: cannot allocate idx buf\n",
561 			    DEVNAME(sc));
562 			return 1;
563 		}
564 
565 		idx_offset = sc->sc_dma_idx_sz;
566 		h2d_w_idx_ptr = 0;
567 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
568 		ringinfo.h2d_w_idx_hostaddr_low =
569 		    htole32(address & 0xffffffff);
570 		ringinfo.h2d_w_idx_hostaddr_high =
571 		    htole32(address >> 32);
572 
573 		h2d_r_idx_ptr = h2d_w_idx_ptr +
574 		    sc->sc_max_submissionrings * idx_offset;
575 		address += sc->sc_max_submissionrings * idx_offset;
576 		ringinfo.h2d_r_idx_hostaddr_low =
577 		    htole32(address & 0xffffffff);
578 		ringinfo.h2d_r_idx_hostaddr_high =
579 		    htole32(address >> 32);
580 
581 		d2h_w_idx_ptr = h2d_r_idx_ptr +
582 		    sc->sc_max_submissionrings * idx_offset;
583 		address += sc->sc_max_submissionrings * idx_offset;
584 		ringinfo.d2h_w_idx_hostaddr_low =
585 		    htole32(address & 0xffffffff);
586 		ringinfo.d2h_w_idx_hostaddr_high =
587 		    htole32(address >> 32);
588 
589 		d2h_r_idx_ptr = d2h_w_idx_ptr +
590 		    sc->sc_max_completionrings * idx_offset;
591 		address += sc->sc_max_completionrings * idx_offset;
592 		ringinfo.d2h_r_idx_hostaddr_low =
593 		    htole32(address & 0xffffffff);
594 		ringinfo.d2h_r_idx_hostaddr_high =
595 		    htole32(address >> 32);
596 
597 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
598 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
599 	}
600 
601 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
602 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
603 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
604 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
605 	    &ring_mem_ptr))
606 		goto cleanup;
607 	/* TX rxpost ring: Send clean data mbufs for RX */
608 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
609 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
610 	    &ring_mem_ptr))
611 		goto cleanup;
612 	/* RX completion rings: recv our filled buffers back */
613 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
614 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
615 	    &ring_mem_ptr))
616 		goto cleanup;
617 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
618 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
619 	    &ring_mem_ptr))
620 		goto cleanup;
621 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
622 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
623 	    &ring_mem_ptr))
624 		goto cleanup;
625 
626 	/* Dynamic TX rings for actual data */
627 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
628 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
629 	for (i = 0; i < sc->sc_max_flowrings; i++) {
630 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
631 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
632 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
633 	}
634 
635 	/* Scratch and ring update buffers for firmware */
636 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
637 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
638 		goto cleanup;
639 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
640 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
641 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
642 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
643 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
644 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
645 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
646 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
647 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
648 
649 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
650 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
651 		goto cleanup;
652 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
653 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
654 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
655 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
656 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
657 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
658 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
659 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
660 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
661 
662 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
663 	bwfm_pci_intr_enable(sc);
664 
665 	/* Maps RX mbufs to a packet id and back. */
666 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
667 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
668 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
669 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
670 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
671 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
672 		    &sc->sc_rx_pkts.pkts[i].bb_map);
673 
674 	/* Maps TX mbufs to a packet id and back. */
675 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
676 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
677 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
678 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
679 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
680 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
681 		    &sc->sc_tx_pkts.pkts[i].bb_map);
682 
683 	/* Maps IOCTL mbufs to a packet id and back. */
684 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
685 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
686 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
687 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
688 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
689 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
690 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
691 
692 	/*
693 	 * For whatever reason, could also be a bug somewhere in this
694 	 * driver, the firmware needs a bunch of RX buffers otherwise
695 	 * it won't send any RX complete messages.  64 buffers don't
696 	 * suffice, but 128 buffers are enough.
697 	 */
698 	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
699 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
700 	if_rxr_init(&sc->sc_event_ring, 8, 8);
701 	bwfm_pci_fill_rx_rings(sc);
702 
703 	TAILQ_INIT(&sc->sc_ioctlq);
704 
705 #ifdef BWFM_DEBUG
706 	sc->sc_console_readidx = 0;
707 	bwfm_pci_debug_console(sc);
708 #endif
709 
710 	sc->sc_initialized = 1;
711 	return 0;
712 
713 cleanup:
714 	if (sc->sc_ringupd_buf)
715 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
716 	if (sc->sc_scratch_buf)
717 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
718 	if (sc->sc_rx_complete.ring)
719 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
720 	if (sc->sc_tx_complete.ring)
721 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
722 	if (sc->sc_ctrl_complete.ring)
723 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
724 	if (sc->sc_rxpost_submit.ring)
725 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
726 	if (sc->sc_ctrl_submit.ring)
727 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
728 	if (sc->sc_dma_idx_buf)
729 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
730 	return 1;
731 }
732 
733 int
734 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
735     const u_char *nvram, size_t nvlen)
736 {
737 	struct bwfm_softc *bwfm = (void *)sc;
738 	struct bwfm_core *core;
739 	uint32_t shared, written;
740 	int i;
741 
742 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
743 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
744 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
745 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
746 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
747 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
748 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
749 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
750 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
751 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
752 	}
753 
754 	for (i = 0; i < size; i++)
755 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
756 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
757 
758 	/* Firmware replaces this with a pointer once up. */
759 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
760 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
761 
762 	if (nvram) {
763 		for (i = 0; i < nvlen; i++)
764 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
765 			    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
766 			    - nvlen  + i, nvram[i]);
767 	}
768 
769 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
770 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
771 
772 	/* Load reset vector from firmware and kickstart core. */
773 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
774 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
775 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
776 	}
777 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
778 
779 	for (i = 0; i < 40; i++) {
780 		delay(50 * 1000);
781 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
782 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
783 		if (shared != written)
784 			break;
785 	}
786 	if (!shared) {
787 		printf("%s: firmware did not come up\n", DEVNAME(sc));
788 		return 1;
789 	}
790 
791 	sc->sc_shared_address = shared;
792 	return 0;
793 }
794 
795 int
796 bwfm_pci_detach(struct device *self, int flags)
797 {
798 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
799 
800 	bwfm_detach(&sc->sc_sc, flags);
801 
802 	/* FIXME: free RX buffers */
803 	/* FIXME: free TX buffers */
804 	/* FIXME: free more memory */
805 
806 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
807 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
808 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
809 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
810 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
811 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
812 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
813 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
814 	return 0;
815 }
816 
817 /* DMA code */
818 struct bwfm_pci_dmamem *
819 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
820 {
821 	struct bwfm_pci_dmamem *bdm;
822 	int nsegs;
823 
824 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
825 	bdm->bdm_size = size;
826 
827 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
828 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
829 		goto bdmfree;
830 
831 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
832 	    &nsegs, BUS_DMA_WAITOK) != 0)
833 		goto destroy;
834 
835 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
836 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
837 		goto free;
838 
839 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
840 	    NULL, BUS_DMA_WAITOK) != 0)
841 		goto unmap;
842 
843 	bzero(bdm->bdm_kva, size);
844 
845 	return (bdm);
846 
847 unmap:
848 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
849 free:
850 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
851 destroy:
852 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
853 bdmfree:
854 	free(bdm, M_DEVBUF, sizeof(*bdm));
855 
856 	return (NULL);
857 }
858 
859 void
860 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
861 {
862 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
863 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
864 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
865 	free(bdm, M_DEVBUF, sizeof(*bdm));
866 }
867 
868 /*
869  * We need a simple mapping from a packet ID to mbufs, because when
870  * a transfer completed, we only know the ID so we have to look up
871  * the memory for the ID.  This simply looks for an empty slot.
872  */
873 int
874 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
875 {
876 	int i, idx;
877 
878 	idx = pkts->last + 1;
879 	for (i = 0; i < pkts->npkt; i++) {
880 		if (idx == pkts->npkt)
881 			idx = 0;
882 		if (pkts->pkts[idx].bb_m == NULL)
883 			return 0;
884 		idx++;
885 	}
886 	return ENOBUFS;
887 }
888 
889 int
890 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
891     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
892 {
893 	int i, idx;
894 
895 	idx = pkts->last + 1;
896 	for (i = 0; i < pkts->npkt; i++) {
897 		if (idx == pkts->npkt)
898 			idx = 0;
899 		if (pkts->pkts[idx].bb_m == NULL) {
900 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
901 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
902 				if (m_defrag(m, M_DONTWAIT))
903 					return EFBIG;
904 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
905 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
906 					return EFBIG;
907 			}
908 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
909 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
910 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
911 			pkts->last = idx;
912 			pkts->pkts[idx].bb_m = m;
913 			*pktid = idx;
914 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
915 			return 0;
916 		}
917 		idx++;
918 	}
919 	return ENOBUFS;
920 }
921 
922 struct mbuf *
923 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
924     uint32_t pktid)
925 {
926 	struct mbuf *m;
927 
928 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
929 		return NULL;
930 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
931 	    pkts->pkts[pktid].bb_map->dm_mapsize,
932 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
933 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
934 	m = pkts->pkts[pktid].bb_m;
935 	pkts->pkts[pktid].bb_m = NULL;
936 	return m;
937 }
938 
939 void
940 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
941 {
942 	bwfm_pci_fill_rx_buf_ring(sc);
943 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
944 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
945 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
946 	    MSGBUF_TYPE_EVENT_BUF_POST);
947 }
948 
949 void
950 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
951     uint32_t msgtype)
952 {
953 	struct msgbuf_rx_ioctl_resp_or_event *req;
954 	struct mbuf *m;
955 	uint32_t pktid;
956 	paddr_t paddr;
957 	int s, slots;
958 
959 	s = splnet();
960 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
961 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
962 			break;
963 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
964 		if (req == NULL)
965 			break;
966 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
967 		if (m == NULL) {
968 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
969 			break;
970 		}
971 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
972 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
973 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
974 			m_freem(m);
975 			break;
976 		}
977 		memset(req, 0, sizeof(*req));
978 		req->msg.msgtype = msgtype;
979 		req->msg.request_id = htole32(pktid);
980 		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
981 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
982 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
983 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
984 	}
985 	if_rxr_put(rxring, slots);
986 	splx(s);
987 }
988 
989 void
990 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
991 {
992 	struct msgbuf_rx_bufpost *req;
993 	struct mbuf *m;
994 	uint32_t pktid;
995 	paddr_t paddr;
996 	int s, slots;
997 
998 	s = splnet();
999 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1000 	    slots > 0; slots--) {
1001 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1002 			break;
1003 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1004 		if (req == NULL)
1005 			break;
1006 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1007 		if (m == NULL) {
1008 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1009 			break;
1010 		}
1011 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1012 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1013 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1014 			m_freem(m);
1015 			break;
1016 		}
1017 		memset(req, 0, sizeof(*req));
1018 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1019 		req->msg.request_id = htole32(pktid);
1020 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1021 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1022 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1023 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1024 	}
1025 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1026 	splx(s);
1027 }
1028 
1029 int
1030 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1031     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1032     int idx, uint32_t idx_off, uint32_t *ring_mem)
1033 {
1034 	ring->w_idx_addr = w_idx + idx * idx_off;
1035 	ring->r_idx_addr = r_idx + idx * idx_off;
1036 	ring->nitem = nitem;
1037 	ring->itemsz = itemsz;
1038 	bwfm_pci_ring_write_rptr(sc, ring);
1039 	bwfm_pci_ring_write_wptr(sc, ring);
1040 
1041 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1042 	if (ring->ring == NULL)
1043 		return ENOMEM;
1044 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1045 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1046 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1047 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1048 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1049 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1050 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1051 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1052 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1053 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1054 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1055 	return 0;
1056 }
1057 
1058 int
1059 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1060     int nitem, size_t itemsz)
1061 {
1062 	ring->w_ptr = 0;
1063 	ring->r_ptr = 0;
1064 	ring->nitem = nitem;
1065 	ring->itemsz = itemsz;
1066 	bwfm_pci_ring_write_rptr(sc, ring);
1067 	bwfm_pci_ring_write_wptr(sc, ring);
1068 
1069 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1070 	if (ring->ring == NULL)
1071 		return ENOMEM;
1072 	return 0;
1073 }
1074 
1075 /* Ring helpers */
1076 void
1077 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1078     struct bwfm_pci_msgring *ring)
1079 {
1080 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1081 	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1082 }
1083 
1084 void
1085 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1086     struct bwfm_pci_msgring *ring)
1087 {
1088 	if (sc->sc_dma_idx_sz == 0) {
1089 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1090 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1091 	} else {
1092 		bus_dmamap_sync(sc->sc_dmat,
1093 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1094 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1095 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1096 		    + ring->r_idx_addr);
1097 	}
1098 }
1099 
1100 void
1101 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1102     struct bwfm_pci_msgring *ring)
1103 {
1104 	if (sc->sc_dma_idx_sz == 0) {
1105 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1106 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1107 	} else {
1108 		bus_dmamap_sync(sc->sc_dmat,
1109 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1110 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1111 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1112 		    + ring->w_idx_addr);
1113 	}
1114 }
1115 
1116 void
1117 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1118     struct bwfm_pci_msgring *ring)
1119 {
1120 	if (sc->sc_dma_idx_sz == 0) {
1121 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1122 		    ring->r_idx_addr, ring->r_ptr);
1123 	} else {
1124 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1125 		    + ring->r_idx_addr) = ring->r_ptr;
1126 		bus_dmamap_sync(sc->sc_dmat,
1127 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1128 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1129 	}
1130 }
1131 
1132 void
1133 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1134     struct bwfm_pci_msgring *ring)
1135 {
1136 	if (sc->sc_dma_idx_sz == 0) {
1137 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1138 		    ring->w_idx_addr, ring->w_ptr);
1139 	} else {
1140 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1141 		    + ring->w_idx_addr) = ring->w_ptr;
1142 		bus_dmamap_sync(sc->sc_dmat,
1143 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1144 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1145 	}
1146 }
1147 
1148 /*
1149  * Retrieve a free descriptor to put new stuff in, but don't commit
1150  * to it yet so we can rollback later if any error occurs.
1151  */
1152 void *
1153 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1154     struct bwfm_pci_msgring *ring)
1155 {
1156 	int available;
1157 	char *ret;
1158 
1159 	bwfm_pci_ring_update_rptr(sc, ring);
1160 
1161 	if (ring->r_ptr > ring->w_ptr)
1162 		available = ring->r_ptr - ring->w_ptr;
1163 	else
1164 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1165 
1166 	if (available <= 1)
1167 		return NULL;
1168 
1169 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1170 	ring->w_ptr += 1;
1171 	if (ring->w_ptr == ring->nitem)
1172 		ring->w_ptr = 0;
1173 	return ret;
1174 }
1175 
1176 void *
1177 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1178     struct bwfm_pci_msgring *ring, int count, int *avail)
1179 {
1180 	int available;
1181 	char *ret;
1182 
1183 	bwfm_pci_ring_update_rptr(sc, ring);
1184 
1185 	if (ring->r_ptr > ring->w_ptr)
1186 		available = ring->r_ptr - ring->w_ptr;
1187 	else
1188 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1189 
1190 	if (available <= 1)
1191 		return NULL;
1192 
1193 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1194 	*avail = min(count, available - 1);
1195 	if (*avail + ring->w_ptr > ring->nitem)
1196 		*avail = ring->nitem - ring->w_ptr;
1197 	ring->w_ptr += *avail;
1198 	if (ring->w_ptr == ring->nitem)
1199 		ring->w_ptr = 0;
1200 	return ret;
1201 }
1202 
1203 /*
1204  * Read number of descriptors available (submitted by the firmware)
1205  * and retrieve pointer to first descriptor.
1206  */
1207 void *
1208 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1209     struct bwfm_pci_msgring *ring, int *avail)
1210 {
1211 	bwfm_pci_ring_update_wptr(sc, ring);
1212 
1213 	if (ring->w_ptr >= ring->r_ptr)
1214 		*avail = ring->w_ptr - ring->r_ptr;
1215 	else
1216 		*avail = ring->nitem - ring->r_ptr;
1217 
1218 	if (*avail == 0)
1219 		return NULL;
1220 
1221 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1222 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1223 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1224 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1225 }
1226 
1227 /*
1228  * Let firmware know we read N descriptors.
1229  */
1230 void
1231 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1232     struct bwfm_pci_msgring *ring, int nitem)
1233 {
1234 	ring->r_ptr += nitem;
1235 	if (ring->r_ptr == ring->nitem)
1236 		ring->r_ptr = 0;
1237 	bwfm_pci_ring_write_rptr(sc, ring);
1238 }
1239 
1240 /*
1241  * Let firmware know that we submitted some descriptors.
1242  */
1243 void
1244 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1245     struct bwfm_pci_msgring *ring)
1246 {
1247 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1248 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1249 	    BUS_DMASYNC_PREWRITE);
1250 	bwfm_pci_ring_write_wptr(sc, ring);
1251 	bwfm_pci_ring_bell(sc, ring);
1252 }
1253 
1254 /*
1255  * Rollback N descriptors in case we don't actually want
1256  * to commit to it.
1257  */
1258 void
1259 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1260     struct bwfm_pci_msgring *ring, int nitem)
1261 {
1262 	if (ring->w_ptr == 0)
1263 		ring->w_ptr = ring->nitem - nitem;
1264 	else
1265 		ring->w_ptr -= nitem;
1266 }
1267 
1268 /*
1269  * Foreach written descriptor on the ring, pass the descriptor to
1270  * a message handler and let the firmware know we handled it.
1271  */
1272 void
1273 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1274     struct mbuf_list *ml)
1275 {
1276 	void *buf;
1277 	int avail, processed;
1278 
1279 again:
1280 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1281 	if (buf == NULL)
1282 		return;
1283 
1284 	processed = 0;
1285 	while (avail) {
1286 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1287 		buf += ring->itemsz;
1288 		processed++;
1289 		if (processed == 48) {
1290 			bwfm_pci_ring_read_commit(sc, ring, processed);
1291 			processed = 0;
1292 		}
1293 		avail--;
1294 	}
1295 	if (processed)
1296 		bwfm_pci_ring_read_commit(sc, ring, processed);
1297 	if (ring->r_ptr == 0)
1298 		goto again;
1299 }
1300 
1301 void
1302 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1303 {
1304 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1305 	struct msgbuf_ioctl_resp_hdr *resp;
1306 	struct msgbuf_tx_status *tx;
1307 	struct msgbuf_rx_complete *rx;
1308 	struct msgbuf_rx_event *event;
1309 	struct msgbuf_common_hdr *msg;
1310 	struct msgbuf_flowring_create_resp *fcr;
1311 	struct msgbuf_flowring_delete_resp *fdr;
1312 	struct bwfm_pci_msgring *ring;
1313 	struct mbuf *m;
1314 	int flowid;
1315 
1316 	msg = (struct msgbuf_common_hdr *)buf;
1317 	switch (msg->msgtype)
1318 	{
1319 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1320 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1321 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1322 		if (flowid < 2)
1323 			break;
1324 		flowid -= 2;
1325 		if (flowid >= sc->sc_max_flowrings)
1326 			break;
1327 		ring = &sc->sc_flowrings[flowid];
1328 		if (ring->status != RING_OPENING)
1329 			break;
1330 		if (fcr->compl_hdr.status) {
1331 			printf("%s: failed to open flowring %d\n",
1332 			    DEVNAME(sc), flowid);
1333 			ring->status = RING_CLOSED;
1334 			if (ring->m) {
1335 				m_freem(ring->m);
1336 				ring->m = NULL;
1337 			}
1338 			ifq_restart(&ifp->if_snd);
1339 			break;
1340 		}
1341 		ring->status = RING_OPEN;
1342 		if (ring->m != NULL) {
1343 			m = ring->m;
1344 			ring->m = NULL;
1345 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1346 				m_freem(ring->m);
1347 		}
1348 		ifq_restart(&ifp->if_snd);
1349 		break;
1350 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1351 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1352 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1353 		if (flowid < 2)
1354 			break;
1355 		flowid -= 2;
1356 		if (flowid >= sc->sc_max_flowrings)
1357 			break;
1358 		ring = &sc->sc_flowrings[flowid];
1359 		if (ring->status != RING_CLOSING)
1360 			break;
1361 		if (fdr->compl_hdr.status) {
1362 			printf("%s: failed to delete flowring %d\n",
1363 			    DEVNAME(sc), flowid);
1364 			break;
1365 		}
1366 		bwfm_pci_dmamem_free(sc, ring->ring);
1367 		ring->status = RING_CLOSED;
1368 		break;
1369 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1370 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1371 		    letoh32(msg->request_id));
1372 		if (m == NULL)
1373 			break;
1374 		m_freem(m);
1375 		break;
1376 	case MSGBUF_TYPE_IOCTL_CMPLT:
1377 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1378 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1379 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1380 		bwfm_pci_fill_rx_rings(sc);
1381 		break;
1382 	case MSGBUF_TYPE_WL_EVENT:
1383 		event = (struct msgbuf_rx_event *)buf;
1384 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1385 		    letoh32(event->msg.request_id));
1386 		if (m == NULL)
1387 			break;
1388 		m_adj(m, sc->sc_rx_dataoffset);
1389 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1390 		bwfm_rx(&sc->sc_sc, m, ml);
1391 		if_rxr_put(&sc->sc_event_ring, 1);
1392 		bwfm_pci_fill_rx_rings(sc);
1393 		break;
1394 	case MSGBUF_TYPE_TX_STATUS:
1395 		tx = (struct msgbuf_tx_status *)buf;
1396 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1397 		    letoh32(tx->msg.request_id) - 1);
1398 		if (m == NULL)
1399 			break;
1400 		m_freem(m);
1401 		if (sc->sc_tx_pkts_full) {
1402 			sc->sc_tx_pkts_full = 0;
1403 			ifq_restart(&ifp->if_snd);
1404 		}
1405 		break;
1406 	case MSGBUF_TYPE_RX_CMPLT:
1407 		rx = (struct msgbuf_rx_complete *)buf;
1408 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1409 		    letoh32(rx->msg.request_id));
1410 		if (m == NULL)
1411 			break;
1412 		if (letoh16(rx->data_offset))
1413 			m_adj(m, letoh16(rx->data_offset));
1414 		else if (sc->sc_rx_dataoffset)
1415 			m_adj(m, sc->sc_rx_dataoffset);
1416 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1417 		bwfm_rx(&sc->sc_sc, m, ml);
1418 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1419 		bwfm_pci_fill_rx_rings(sc);
1420 		break;
1421 	default:
1422 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1423 		break;
1424 	}
1425 }
1426 
1427 /* Bus core helpers */
1428 void
1429 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1430 {
1431 	struct bwfm_softc *bwfm = (void *)sc;
1432 	struct bwfm_core *core;
1433 
1434 	core = bwfm_chip_get_core(bwfm, id);
1435 	if (core == NULL) {
1436 		printf("%s: could not find core to select", DEVNAME(sc));
1437 		return;
1438 	}
1439 
1440 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1441 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1442 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1443 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1444 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1445 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1446 }
1447 
1448 uint32_t
1449 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1450 {
1451 	struct bwfm_pci_softc *sc = (void *)bwfm;
1452 	uint32_t page, offset;
1453 
1454 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1455 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1456 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1457 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1458 }
1459 
1460 void
1461 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1462 {
1463 	struct bwfm_pci_softc *sc = (void *)bwfm;
1464 	uint32_t page, offset;
1465 
1466 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1467 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1468 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1469 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1470 }
1471 
1472 int
1473 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1474 {
1475 	return 0;
1476 }
1477 
1478 int
1479 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1480 {
1481 	struct bwfm_pci_softc *sc = (void *)bwfm;
1482 	struct bwfm_core *core;
1483 	uint32_t reg;
1484 	int i;
1485 
1486 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1487 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1488 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1489 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1490 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1491 
1492 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1493 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1494 	    BWFM_CHIP_REG_WATCHDOG, 4);
1495 	delay(100 * 1000);
1496 
1497 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1498 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1499 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1500 
1501 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1502 	if (core->co_rev <= 13) {
1503 		uint16_t cfg_offset[] = {
1504 		    BWFM_PCI_CFGREG_STATUS_CMD,
1505 		    BWFM_PCI_CFGREG_PM_CSR,
1506 		    BWFM_PCI_CFGREG_MSI_CAP,
1507 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1508 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1509 		    BWFM_PCI_CFGREG_MSI_DATA,
1510 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1511 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1512 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1513 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1514 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1515 		};
1516 
1517 		for (i = 0; i < nitems(cfg_offset); i++) {
1518 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1519 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1520 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1521 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1522 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1523 			    DEVNAME(sc), cfg_offset[i], reg));
1524 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1525 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1526 		}
1527 	}
1528 
1529 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1530 	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1531 	if (reg != 0xffffffff)
1532 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1533 		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1534 
1535 	return 0;
1536 }
1537 
1538 void
1539 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1540 {
1541 	struct bwfm_pci_softc *sc = (void *)bwfm;
1542 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1543 }
1544 
1545 static int bwfm_pci_prio2fifo[8] = {
1546 	1, /* best effort */
1547 	0, /* IPTOS_PREC_IMMEDIATE */
1548 	0, /* IPTOS_PREC_PRIORITY */
1549 	1, /* IPTOS_PREC_FLASH */
1550 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1551 	2, /* IPTOS_PREC_CRITIC_ECP */
1552 	3, /* IPTOS_PREC_INTERNETCONTROL */
1553 	3, /* IPTOS_PREC_NETCONTROL */
1554 };
1555 
1556 int
1557 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1558 {
1559 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1560 #ifndef IEEE80211_STA_ONLY
1561 	uint8_t *da = mtod(m, uint8_t *);
1562 #endif
1563 	int flowid, prio, fifo;
1564 	int i, found;
1565 
1566 	prio = ieee80211_classify(ic, m);
1567 	fifo = bwfm_pci_prio2fifo[prio];
1568 
1569 	switch (ic->ic_opmode)
1570 	{
1571 	case IEEE80211_M_STA:
1572 		flowid = fifo;
1573 		break;
1574 #ifndef IEEE80211_STA_ONLY
1575 	case IEEE80211_M_HOSTAP:
1576 		if (ETHER_IS_MULTICAST(da))
1577 			da = etherbroadcastaddr;
1578 		flowid = da[5] * 2 + fifo;
1579 		break;
1580 #endif
1581 	default:
1582 		printf("%s: state not supported\n", DEVNAME(sc));
1583 		return ENOBUFS;
1584 	}
1585 
1586 	found = 0;
1587 	flowid = flowid % sc->sc_max_flowrings;
1588 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1589 		if (ic->ic_opmode == IEEE80211_M_STA &&
1590 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1591 		    sc->sc_flowrings[flowid].fifo == fifo) {
1592 			found = 1;
1593 			break;
1594 		}
1595 #ifndef IEEE80211_STA_ONLY
1596 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1597 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1598 		    sc->sc_flowrings[flowid].fifo == fifo &&
1599 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1600 			found = 1;
1601 			break;
1602 		}
1603 #endif
1604 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1605 	}
1606 
1607 	if (found)
1608 		return flowid;
1609 
1610 	return -1;
1611 }
1612 
1613 void
1614 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1615 {
1616 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1617 	struct bwfm_cmd_flowring_create cmd;
1618 #ifndef IEEE80211_STA_ONLY
1619 	uint8_t *da = mtod(m, uint8_t *);
1620 #endif
1621 	struct bwfm_pci_msgring *ring;
1622 	int flowid, prio, fifo;
1623 	int i, found;
1624 
1625 	prio = ieee80211_classify(ic, m);
1626 	fifo = bwfm_pci_prio2fifo[prio];
1627 
1628 	switch (ic->ic_opmode)
1629 	{
1630 	case IEEE80211_M_STA:
1631 		flowid = fifo;
1632 		break;
1633 #ifndef IEEE80211_STA_ONLY
1634 	case IEEE80211_M_HOSTAP:
1635 		if (ETHER_IS_MULTICAST(da))
1636 			da = etherbroadcastaddr;
1637 		flowid = da[5] * 2 + fifo;
1638 		break;
1639 #endif
1640 	default:
1641 		printf("%s: state not supported\n", DEVNAME(sc));
1642 		return;
1643 	}
1644 
1645 	found = 0;
1646 	flowid = flowid % sc->sc_max_flowrings;
1647 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1648 		ring = &sc->sc_flowrings[flowid];
1649 		if (ring->status == RING_CLOSED) {
1650 			ring->status = RING_OPENING;
1651 			found = 1;
1652 			break;
1653 		}
1654 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1655 	}
1656 
1657 	/*
1658 	 * We cannot recover from that so far.  Only a stop/init
1659 	 * cycle can revive this if it ever happens at all.
1660 	 */
1661 	if (!found) {
1662 		printf("%s: no flowring available\n", DEVNAME(sc));
1663 		return;
1664 	}
1665 
1666 	cmd.m = m;
1667 	cmd.prio = prio;
1668 	cmd.flowid = flowid;
1669 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1670 }
1671 
1672 void
1673 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1674 {
1675 	struct bwfm_pci_softc *sc = (void *)bwfm;
1676 #ifndef IEEE80211_STA_ONLY
1677 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1678 #endif
1679 	struct bwfm_cmd_flowring_create *cmd = arg;
1680 	struct msgbuf_tx_flowring_create_req *req;
1681 	struct bwfm_pci_msgring *ring;
1682 	uint8_t *da, *sa;
1683 	int s;
1684 
1685 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1686 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1687 
1688 	ring = &sc->sc_flowrings[cmd->flowid];
1689 	if (ring->status != RING_OPENING) {
1690 		printf("%s: flowring not opening\n", DEVNAME(sc));
1691 		return;
1692 	}
1693 
1694 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1695 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1696 		return;
1697 	}
1698 
1699 	s = splnet();
1700 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1701 	if (req == NULL) {
1702 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1703 		splx(s);
1704 		return;
1705 	}
1706 
1707 	ring->status = RING_OPENING;
1708 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1709 	ring->m = cmd->m;
1710 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1711 #ifndef IEEE80211_STA_ONLY
1712 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1713 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1714 #endif
1715 
1716 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1717 	req->msg.ifidx = 0;
1718 	req->msg.request_id = 0;
1719 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1720 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1721 	memcpy(req->da, da, ETHER_ADDR_LEN);
1722 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1723 	req->flow_ring_addr.high_addr =
1724 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1725 	req->flow_ring_addr.low_addr =
1726 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1727 	req->max_items = letoh16(512);
1728 	req->len_item = letoh16(48);
1729 
1730 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1731 	splx(s);
1732 }
1733 
1734 void
1735 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1736 {
1737 	struct msgbuf_tx_flowring_delete_req *req;
1738 	struct bwfm_pci_msgring *ring;
1739 	int s;
1740 
1741 	ring = &sc->sc_flowrings[flowid];
1742 	if (ring->status != RING_OPEN) {
1743 		printf("%s: flowring not open\n", DEVNAME(sc));
1744 		return;
1745 	}
1746 
1747 	s = splnet();
1748 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1749 	if (req == NULL) {
1750 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1751 		splx(s);
1752 		return;
1753 	}
1754 
1755 	ring->status = RING_CLOSING;
1756 
1757 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1758 	req->msg.ifidx = 0;
1759 	req->msg.request_id = 0;
1760 	req->flow_ring_id = letoh16(flowid + 2);
1761 	req->reason = 0;
1762 
1763 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1764 	splx(s);
1765 }
1766 
1767 void
1768 bwfm_pci_stop(struct bwfm_softc *bwfm)
1769 {
1770 	struct bwfm_pci_softc *sc = (void *)bwfm;
1771 	struct bwfm_pci_msgring *ring;
1772 	int i;
1773 
1774 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1775 		ring = &sc->sc_flowrings[i];
1776 		if (ring->status == RING_OPEN)
1777 			bwfm_pci_flowring_delete(sc, i);
1778 	}
1779 }
1780 
1781 int
1782 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1783 {
1784 	struct bwfm_pci_softc *sc = (void *)bwfm;
1785 	struct bwfm_pci_msgring *ring;
1786 	int i;
1787 
1788 	/* If we are transitioning, we cannot send. */
1789 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1790 		ring = &sc->sc_flowrings[i];
1791 		if (ring->status == RING_OPENING)
1792 			return ENOBUFS;
1793 	}
1794 
1795 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1796 		sc->sc_tx_pkts_full = 1;
1797 		return ENOBUFS;
1798 	}
1799 
1800 	return 0;
1801 }
1802 
1803 int
1804 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
1805 {
1806 	struct bwfm_pci_softc *sc = (void *)bwfm;
1807 	struct bwfm_pci_msgring *ring;
1808 	struct msgbuf_tx_msghdr *tx;
1809 	uint32_t pktid;
1810 	paddr_t paddr;
1811 	int flowid, ret;
1812 
1813 	flowid = bwfm_pci_flowring_lookup(sc, m);
1814 	if (flowid < 0) {
1815 		/*
1816 		 * We cannot send the packet right now as there is
1817 		 * no flowring yet.  The flowring will be created
1818 		 * asynchronously.  While the ring is transitioning
1819 		 * the TX check will tell the upper layers that we
1820 		 * cannot send packets right now.  When the flowring
1821 		 * is created the queue will be restarted and this
1822 		 * mbuf will be transmitted.
1823 		 */
1824 		bwfm_pci_flowring_create(sc, m);
1825 		return 0;
1826 	}
1827 
1828 	ring = &sc->sc_flowrings[flowid];
1829 	if (ring->status == RING_OPENING ||
1830 	    ring->status == RING_CLOSING) {
1831 		printf("%s: tried to use a flow that was "
1832 		    "transitioning in status %d\n",
1833 		    DEVNAME(sc), ring->status);
1834 		return ENOBUFS;
1835 	}
1836 
1837 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1838 	if (tx == NULL)
1839 		return ENOBUFS;
1840 
1841 	memset(tx, 0, sizeof(*tx));
1842 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1843 	tx->msg.ifidx = 0;
1844 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1845 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
1846 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1847 	tx->seg_cnt = 1;
1848 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
1849 
1850 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
1851 	if (ret) {
1852 		if (ret == ENOBUFS) {
1853 			printf("%s: no pktid available for TX\n",
1854 			    DEVNAME(sc));
1855 			sc->sc_tx_pkts_full = 1;
1856 		}
1857 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1858 		return ret;
1859 	}
1860 	paddr += ETHER_HDR_LEN;
1861 
1862 	tx->msg.request_id = htole32(pktid + 1);
1863 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
1864 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1865 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1866 
1867 	bwfm_pci_ring_write_commit(sc, ring);
1868 	return 0;
1869 }
1870 
1871 #ifdef BWFM_DEBUG
1872 void
1873 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1874 {
1875 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1876 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1877 
1878 	if (newidx != sc->sc_console_readidx)
1879 		DPRINTFN(3, ("BWFM CONSOLE: "));
1880 	while (newidx != sc->sc_console_readidx) {
1881 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1882 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
1883 		sc->sc_console_readidx++;
1884 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
1885 			sc->sc_console_readidx = 0;
1886 		if (ch == '\r')
1887 			continue;
1888 		DPRINTFN(3, ("%c", ch));
1889 	}
1890 }
1891 #endif
1892 
1893 int
1894 bwfm_pci_intr(void *v)
1895 {
1896 	struct bwfm_pci_softc *sc = (void *)v;
1897 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1898 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1899 	uint32_t status;
1900 
1901 	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1902 	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1903 		return 0;
1904 
1905 	bwfm_pci_intr_disable(sc);
1906 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1907 	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
1908 
1909 	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1910 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
1911 		printf("%s: handle MB data\n", __func__);
1912 
1913 	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
1914 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
1915 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
1916 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
1917 
1918 		if (ifiq_input(&ifp->if_rcv, &ml))
1919 			if_rxr_livelocked(&sc->sc_rxbuf_ring);
1920 	}
1921 
1922 #ifdef BWFM_DEBUG
1923 	bwfm_pci_debug_console(sc);
1924 #endif
1925 
1926 	bwfm_pci_intr_enable(sc);
1927 	return 1;
1928 }
1929 
1930 void
1931 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
1932 {
1933 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1934 	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
1935 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1936 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
1937 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
1938 }
1939 
1940 void
1941 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
1942 {
1943 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1944 	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
1945 }
1946 
1947 /* Msgbuf protocol implementation */
1948 int
1949 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
1950     int cmd, char *buf, size_t *len)
1951 {
1952 	struct bwfm_pci_softc *sc = (void *)bwfm;
1953 	struct msgbuf_ioctl_req_hdr *req;
1954 	struct bwfm_pci_ioctl *ctl;
1955 	struct mbuf *m;
1956 	uint32_t pktid;
1957 	paddr_t paddr;
1958 	size_t buflen;
1959 	int s;
1960 
1961 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
1962 	m = MCLGETL(NULL, M_DONTWAIT, buflen);
1963 	if (m == NULL)
1964 		return 1;
1965 	m->m_len = m->m_pkthdr.len = buflen;
1966 
1967 	if (buf)
1968 		memcpy(mtod(m, char *), buf, buflen);
1969 	else
1970 		memset(mtod(m, char *), 0, buflen);
1971 
1972 	s = splnet();
1973 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1974 	if (req == NULL) {
1975 		splx(s);
1976 		m_freem(m);
1977 		return 1;
1978 	}
1979 
1980 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
1981 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1982 		splx(s);
1983 		m_freem(m);
1984 		return 1;
1985 	}
1986 
1987 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
1988 	ctl->transid = sc->sc_ioctl_transid++;
1989 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
1990 
1991 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
1992 	req->msg.ifidx = 0;
1993 	req->msg.flags = 0;
1994 	req->msg.request_id = htole32(pktid);
1995 	req->cmd = htole32(cmd);
1996 	req->output_buf_len = htole16(*len);
1997 	req->trans_id = htole16(ctl->transid);
1998 
1999 	req->input_buf_len = htole16(m->m_len);
2000 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2001 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2002 
2003 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2004 	splx(s);
2005 
2006 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
2007 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2008 
2009 	if (ctl->m == NULL) {
2010 		free(ctl, M_TEMP, sizeof(*ctl));
2011 		return 1;
2012 	}
2013 
2014 	*len = min(ctl->retlen, m->m_len);
2015 	*len = min(*len, buflen);
2016 	if (buf)
2017 		m_copydata(ctl->m, 0, *len, (caddr_t)buf);
2018 	m_freem(ctl->m);
2019 
2020 	if (ctl->status < 0) {
2021 		free(ctl, M_TEMP, sizeof(*ctl));
2022 		return 1;
2023 	}
2024 
2025 	free(ctl, M_TEMP, sizeof(*ctl));
2026 	return 0;
2027 }
2028 
2029 int
2030 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2031     int cmd, char *buf, size_t len)
2032 {
2033 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2034 }
2035 
2036 void
2037 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2038     struct msgbuf_ioctl_resp_hdr *resp)
2039 {
2040 	struct bwfm_pci_ioctl *ctl, *tmp;
2041 	struct mbuf *m;
2042 
2043 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2044 	    letoh32(resp->msg.request_id));
2045 
2046 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2047 		if (ctl->transid != letoh16(resp->trans_id))
2048 			continue;
2049 		ctl->m = m;
2050 		ctl->retlen = letoh16(resp->resp_len);
2051 		ctl->status = letoh16(resp->compl_hdr.status);
2052 		wakeup(ctl);
2053 		return;
2054 	}
2055 
2056 	m_freem(m);
2057 }
2058