xref: /netbsd-src/sys/dev/pci/if_bwfm_pci.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: if_bwfm_pci.c,v 1.1 2018/05/11 07:42:22 maya Exp $	*/
2 /*	$OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $	*/
3 /*
4  * Copyright (c) 2010-2016 Broadcom Corporation
5  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/kernel.h>
24 #include <sys/kmem.h>
25 #include <sys/device.h>
26 #include <sys/pool.h>
27 #include <sys/workqueue.h>
28 #include <sys/socket.h>
29 
30 #include <net/bpf.h>
31 #include <net/if.h>
32 #include <net/if_dl.h>
33 #include <net/if_ether.h>
34 #include <net/if_media.h>
35 
36 #include <netinet/in.h>
37 
38 #include <net80211/ieee80211_var.h>
39 
40 #include <dev/firmload.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #include <dev/ic/bwfmvar.h>
47 #include <dev/ic/bwfmreg.h>
48 #include <dev/pci/if_bwfm_pci.h>
49 
50 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
51 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
52 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
53 
54 #define BWFM_NUM_TX_MSGRINGS			2
55 #define BWFM_NUM_RX_MSGRINGS			3
56 
57 #define BWFM_NUM_TX_PKTIDS			2048
58 #define BWFM_NUM_RX_PKTIDS			1024
59 
60 #define BWFM_NUM_TX_DESCS			1
61 #define BWFM_NUM_RX_DESCS			1
62 
63 #ifdef BWFM_DEBUG
64 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
65 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
66 static int bwfm_debug = 2;
67 #else
68 #define DPRINTF(x)	do { ; } while (0)
69 #define DPRINTFN(n, x)	do { ; } while (0)
70 #endif
71 
72 #define DEVNAME(sc)	device_xname((sc)->sc_sc.sc_dev)
73 #define letoh16		htole16
74 #define letoh32		htole32
75 #define nitems(x)	__arraycount(x)
76 
77 enum ring_status {
78 	RING_CLOSED,
79 	RING_CLOSING,
80 	RING_OPEN,
81 	RING_OPENING,
82 };
83 
84 struct bwfm_pci_msgring {
85 	uint32_t		 w_idx_addr;
86 	uint32_t		 r_idx_addr;
87 	uint32_t		 w_ptr;
88 	uint32_t		 r_ptr;
89 	int			 nitem;
90 	int			 itemsz;
91 	enum ring_status	 status;
92 	struct bwfm_pci_dmamem	*ring;
93 	struct mbuf		*m;
94 
95 	int			 fifo;
96 	uint8_t			 mac[ETHER_ADDR_LEN];
97 };
98 
99 struct bwfm_pci_buf {
100 	bus_dmamap_t	 bb_map;
101 	struct mbuf	*bb_m;
102 };
103 
104 struct bwfm_pci_pkts {
105 	struct bwfm_pci_buf	*pkts;
106 	uint32_t		 npkt;
107 	int			 last;
108 };
109 
110 struct if_rxring {
111 	u_int	rxr_total;
112 	u_int	rxr_inuse;
113 };
114 
115 struct bwfm_cmd_flowring_create {
116 	struct work		 wq_cookie;
117 	struct bwfm_pci_softc	*sc;
118 	struct mbuf		*m;
119 	int			 flowid;
120 	int			 prio;
121 };
122 
123 struct bwfm_pci_softc {
124 	struct bwfm_softc	 sc_sc;
125 	pci_chipset_tag_t	 sc_pc;
126 	pcitag_t		 sc_tag;
127 	pcireg_t		 sc_id;
128 	void			*sc_ih;
129 	pci_intr_handle_t	*sc_pihp;
130 
131 	bus_space_tag_t		 sc_reg_iot;
132 	bus_space_handle_t	 sc_reg_ioh;
133 	bus_size_t		 sc_reg_ios;
134 
135 	bus_space_tag_t		 sc_tcm_iot;
136 	bus_space_handle_t	 sc_tcm_ioh;
137 	bus_size_t		 sc_tcm_ios;
138 
139 	bus_dma_tag_t		 sc_dmat;
140 
141 	uint32_t		 sc_shared_address;
142 	uint32_t		 sc_shared_flags;
143 	uint8_t			 sc_shared_version;
144 
145 	uint8_t			 sc_dma_idx_sz;
146 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
147 	size_t			 sc_dma_idx_bufsz;
148 
149 	uint16_t		 sc_max_rxbufpost;
150 	uint32_t		 sc_rx_dataoffset;
151 	uint32_t		 sc_htod_mb_data_addr;
152 	uint32_t		 sc_dtoh_mb_data_addr;
153 	uint32_t		 sc_ring_info_addr;
154 
155 	uint32_t		 sc_console_base_addr;
156 	uint32_t		 sc_console_buf_addr;
157 	uint32_t		 sc_console_buf_size;
158 	uint32_t		 sc_console_readidx;
159 
160 	struct pool		 sc_flowring_pool;
161 	struct workqueue	*flowring_wq;
162 
163 	uint16_t		 sc_max_flowrings;
164 	uint16_t		 sc_max_submissionrings;
165 	uint16_t		 sc_max_completionrings;
166 
167 	struct bwfm_pci_msgring	 sc_ctrl_submit;
168 	struct bwfm_pci_msgring	 sc_rxpost_submit;
169 	struct bwfm_pci_msgring	 sc_ctrl_complete;
170 	struct bwfm_pci_msgring	 sc_tx_complete;
171 	struct bwfm_pci_msgring	 sc_rx_complete;
172 	struct bwfm_pci_msgring	*sc_flowrings;
173 
174 	struct bwfm_pci_dmamem	*sc_scratch_buf;
175 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
176 
177 	struct bwfm_pci_dmamem	*sc_ioctl_buf;
178 	int			 sc_ioctl_reqid;
179 	uint32_t		 sc_ioctl_resp_pktid;
180 	uint32_t		 sc_ioctl_resp_ret_len;
181 	uint32_t		 sc_ioctl_resp_status;
182 	int			 sc_ioctl_poll;
183 
184 	struct if_rxring	 sc_ioctl_ring;
185 	struct if_rxring	 sc_event_ring;
186 	struct if_rxring	 sc_rxbuf_ring;
187 
188 	struct bwfm_pci_pkts	 sc_rx_pkts;
189 	struct bwfm_pci_pkts	 sc_tx_pkts;
190 	int			 sc_tx_pkts_full;
191 };
192 
193 struct bwfm_pci_dmamem {
194 	bus_dmamap_t		bdm_map;
195 	bus_dma_segment_t	bdm_seg;
196 	size_t			bdm_size;
197 	char *			bdm_kva;
198 };
199 
200 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
201 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
202 #define BWFM_PCI_DMA_DVA(_bdm)	((_bdm)->bdm_map->dm_segs[0].ds_addr)
203 #define BWFM_PCI_DMA_KVA(_bdm)	((_bdm)->bdm_kva)
204 
205 static u_int	 if_rxr_get(struct if_rxring *rxr, unsigned int max);
206 static void	 if_rxr_put(struct if_rxring *rxr, unsigned int n);
207 static void	 if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
208 
209 int		 bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
210 void		 bwfm_pci_attachhook(device_t);
211 void		 bwfm_pci_attach(device_t, device_t, void *);
212 int		 bwfm_pci_detach(device_t, int);
213 
214 int		 bwfm_pci_intr(void *);
215 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
216 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
217 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
218 		    size_t);
219 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
220 
221 struct bwfm_pci_dmamem *
222 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
223 		    bus_size_t);
224 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
225 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
226 		    struct bwfm_pci_pkts *);
227 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
228 		    struct bwfm_pci_pkts *, struct mbuf *,
229 		    uint32_t *, paddr_t *);
230 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
231 		    struct bwfm_pci_pkts *, uint32_t);
232 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
233 		    struct if_rxring *, uint32_t);
234 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
235 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
236 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
238 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
239 		    int, size_t);
240 
241 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
242 		    struct bwfm_pci_msgring *);
243 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
244 		    struct bwfm_pci_msgring *);
245 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
246 		    struct bwfm_pci_msgring *);
247 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
248 		    struct bwfm_pci_msgring *);
249 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
250 		    struct bwfm_pci_msgring *);
251 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
252 		    struct bwfm_pci_msgring *);
253 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
254 		    struct bwfm_pci_msgring *, int, int *);
255 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
256 		    struct bwfm_pci_msgring *, int *);
257 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
258 		    struct bwfm_pci_msgring *, int);
259 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
260 		    struct bwfm_pci_msgring *);
261 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
262 		    struct bwfm_pci_msgring *, int);
263 
264 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
265 		    struct bwfm_pci_msgring *);
266 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
267 
268 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
269 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
270 		    uint32_t);
271 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
272 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
273 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
274 
275 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
276 		     struct mbuf *);
277 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
278 		     struct mbuf *);
279 void		 bwfm_pci_flowring_create_cb(struct work *, void *);
280 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
281 
282 void		 bwfm_pci_stop(struct bwfm_softc *);
283 int		 bwfm_pci_txcheck(struct bwfm_softc *);
284 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
285 
286 #ifdef BWFM_DEBUG
287 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
288 #endif
289 
290 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
291 		    int, char *, size_t *);
292 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
293 		    int, char *, size_t);
294 
295 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
296 	.bc_read = bwfm_pci_buscore_read,
297 	.bc_write = bwfm_pci_buscore_write,
298 	.bc_prepare = bwfm_pci_buscore_prepare,
299 	.bc_reset = bwfm_pci_buscore_reset,
300 	.bc_setup = NULL,
301 	.bc_activate = bwfm_pci_buscore_activate,
302 };
303 
304 struct bwfm_bus_ops bwfm_pci_bus_ops = {
305 	.bs_init = NULL,
306 	.bs_stop = bwfm_pci_stop,
307 	.bs_txcheck = bwfm_pci_txcheck,
308 	.bs_txdata = bwfm_pci_txdata,
309 	.bs_txctl = NULL,
310 	.bs_rxctl = NULL,
311 };
312 
313 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
314 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
315 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
316 };
317 
318 
319 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
320     bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
321 
322 static const struct bwfm_pci_matchid {
323 	pci_vendor_id_t		bwfm_vendor;
324 	pci_product_id_t	bwfm_product;
325 } bwfm_pci_devices[] = {
326 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
327 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
328 };
329 
330 static struct mbuf *
331 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
332     struct ifnet *ifp __unused, u_int size)
333 {
334 	struct mbuf *m;
335 
336 	MGETHDR(m, how, MT_DATA);
337 	if (m == NULL)
338 		return NULL;
339 
340 	MEXTMALLOC(m, size, how);
341 	if ((m->m_flags & M_EXT) == 0) {
342 		m_freem(m);
343 		return NULL;
344 	}
345 	return m;
346 }
347 
348 int
349 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
350 {
351 	struct pci_attach_args *pa = aux;
352 
353 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BROADCOM)
354 		return 0;
355 
356 	for (size_t i = 0; i < __arraycount(bwfm_pci_devices); i++)
357 		if (PCI_PRODUCT(pa->pa_id) == bwfm_pci_devices[i].bwfm_product)
358 			return 1;
359 
360 	return 0;
361 }
362 
363 void
364 bwfm_pci_attach(device_t parent, device_t self, void *aux)
365 {
366 	struct bwfm_pci_softc *sc = device_private(self);
367 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
368 	const char *intrstr;
369 	char intrbuf[PCI_INTRSTR_LEN];
370 
371 	sc->sc_sc.sc_dev = self;
372 
373 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
374 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
375 	    NULL, &sc->sc_reg_ios)) {
376 		printf(": can't map bar0\n");
377 		return;
378 	}
379 
380 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
381 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
382 	    NULL, &sc->sc_tcm_ios)) {
383 		printf(": can't map bar1\n");
384 		goto bar0;
385 	}
386 
387 	sc->sc_pc = pa->pa_pc;
388 	sc->sc_tag = pa->pa_tag;
389 	sc->sc_id = pa->pa_id;
390 
391 	if (pci_dma64_available(pa))
392 		sc->sc_dmat = pa->pa_dmat64;
393 	else
394 		sc->sc_dmat = pa->pa_dmat;
395 
396 	/* Map and establish the interrupt. */
397 	if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
398 		printf(": couldn't map interrupt\n");
399 		goto bar1;
400 	}
401 	intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
402 
403 	sc->sc_ih = pci_intr_establish(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
404 	    bwfm_pci_intr, sc);
405 	if (sc->sc_ih == NULL) {
406 		printf(": couldn't establish interrupt");
407 		if (intrstr != NULL)
408 			printf(" at %s", intrstr);
409 		printf("\n");
410 		goto bar1;
411 	}
412 	printf(": %s\n", intrstr);
413 
414 	config_mountroot(self, bwfm_pci_attachhook);
415 	return;
416 
417 bar1:
418 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
419 bar0:
420 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
421 }
422 
423 void
424 bwfm_pci_attachhook(device_t self)
425 {
426 	struct bwfm_pci_softc *sc = device_private(self);
427 	struct bwfm_softc *bwfm = (void *)sc;
428 	struct bwfm_pci_ringinfo ringinfo;
429 	const char *name = NULL;
430 	firmware_handle_t fwh;
431 	u_char *ucode; size_t size;
432 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
433 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
434 	uint32_t idx_offset, reg;
435 	int i;
436 	int error;
437 
438 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
439 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
440 		printf("%s: cannot attach chip\n", DEVNAME(sc));
441 		return;
442 	}
443 
444 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
445 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
446 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
447 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
448 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
449 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
450 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
451 
452 	switch (bwfm->sc_chip.ch_chip)
453 	{
454 	case BRCM_CC_4350_CHIP_ID:
455 		if (bwfm->sc_chip.ch_chiprev > 7)
456 			name = "brcmfmac4350-pcie.bin";
457 		else
458 			name = "brcmfmac4350c2-pcie.bin";
459 		break;
460 	case BRCM_CC_43602_CHIP_ID:
461 		name = "brcmfmac43602-pcie.bin";
462 		break;
463 	default:
464 		printf("%s: unknown firmware for chip %s\n",
465 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
466 		return;
467 	}
468 
469 	if (firmware_open("if_bwfm", name, &fwh) != 0) {
470 		printf("%s: failed firmware_open of file %s\n",
471 		    DEVNAME(sc), name);
472 		return;
473 	}
474 	size = firmware_get_size(fwh);
475 	ucode = firmware_malloc(size);
476 	if (ucode == NULL) {
477 		printf("%s: failed to allocate firmware memory\n",
478 		    DEVNAME(sc));
479 		firmware_close(fwh);
480 		return;
481 	}
482 	error = firmware_read(fwh, 0, ucode, size);
483 	firmware_close(fwh);
484 	if (error != 0) {
485 		printf("%s: failed to read firmware (error %d)\n",
486 		    DEVNAME(sc), error);
487 		firmware_free(ucode, size);
488 		return;
489 	}
490 
491 	/* Retrieve RAM size from firmware. */
492 	if (size >= BWFM_RAMSIZE + 8) {
493 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
494 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
495 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
496 	}
497 
498 	if (bwfm_pci_load_microcode(sc, ucode, size) != 0) {
499 		printf("%s: could not load microcode\n",
500 		    DEVNAME(sc));
501 		kmem_free(ucode, size);
502 		return;
503 	}
504 
505 	firmware_free(ucode, size);
506 
507 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
508 	    sc->sc_shared_address + BWFM_SHARED_INFO);
509 	sc->sc_shared_version = sc->sc_shared_flags;
510 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
511 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
512 		printf("%s: PCIe version %d unsupported\n",
513 		    DEVNAME(sc), sc->sc_shared_version);
514 		return;
515 	}
516 
517 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
518 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
519 			sc->sc_dma_idx_sz = sizeof(uint16_t);
520 		else
521 			sc->sc_dma_idx_sz = sizeof(uint32_t);
522 	}
523 
524 	/* Maximum RX data buffers in the ring. */
525 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
526 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
527 	if (sc->sc_max_rxbufpost == 0)
528 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
529 
530 	/* Alternative offset of data in a packet */
531 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
532 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
533 
534 	/* For Power Management */
535 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
536 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
537 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
538 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
539 
540 	/* Ring information */
541 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
542 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
543 
544 	/* Firmware's "dmesg" */
545 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
546 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
547 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
548 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
549 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
550 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
551 
552 	/* Read ring information. */
553 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
554 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
555 
556 	if (sc->sc_shared_version >= 6) {
557 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
558 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
559 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
560 	} else {
561 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
562 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
563 		    BWFM_NUM_TX_MSGRINGS;
564 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
565 	}
566 
567 	if (sc->sc_dma_idx_sz == 0) {
568 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
569 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
570 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
571 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
572 		idx_offset = sizeof(uint32_t);
573 	} else {
574 		uint64_t address;
575 
576 		/* Each TX/RX Ring has a Read and Write Ptr */
577 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
578 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
579 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
580 		    sc->sc_dma_idx_bufsz, 8);
581 		if (sc->sc_dma_idx_buf == NULL) {
582 			/* XXX: Fallback to TCM? */
583 			printf("%s: cannot allocate idx buf\n",
584 			    DEVNAME(sc));
585 			return;
586 		}
587 
588 		idx_offset = sc->sc_dma_idx_sz;
589 		h2d_w_idx_ptr = 0;
590 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
591 		ringinfo.h2d_w_idx_hostaddr_low =
592 		    htole32(address & 0xffffffff);
593 		ringinfo.h2d_w_idx_hostaddr_high =
594 		    htole32(address >> 32);
595 
596 		h2d_r_idx_ptr = h2d_w_idx_ptr +
597 		    sc->sc_max_submissionrings * idx_offset;
598 		address += sc->sc_max_submissionrings * idx_offset;
599 		ringinfo.h2d_r_idx_hostaddr_low =
600 		    htole32(address & 0xffffffff);
601 		ringinfo.h2d_r_idx_hostaddr_high =
602 		    htole32(address >> 32);
603 
604 		d2h_w_idx_ptr = h2d_r_idx_ptr +
605 		    sc->sc_max_submissionrings * idx_offset;
606 		address += sc->sc_max_submissionrings * idx_offset;
607 		ringinfo.d2h_w_idx_hostaddr_low =
608 		    htole32(address & 0xffffffff);
609 		ringinfo.d2h_w_idx_hostaddr_high =
610 		    htole32(address >> 32);
611 
612 		d2h_r_idx_ptr = d2h_w_idx_ptr +
613 		    sc->sc_max_completionrings * idx_offset;
614 		address += sc->sc_max_completionrings * idx_offset;
615 		ringinfo.d2h_r_idx_hostaddr_low =
616 		    htole32(address & 0xffffffff);
617 		ringinfo.d2h_r_idx_hostaddr_high =
618 		    htole32(address >> 32);
619 
620 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
621 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
622 	}
623 
624 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
625 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
626 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
627 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
628 	    &ring_mem_ptr))
629 		goto cleanup;
630 	/* TX rxpost ring: Send clean data mbufs for RX */
631 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
632 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
633 	    &ring_mem_ptr))
634 		goto cleanup;
635 	/* RX completion rings: recv our filled buffers back */
636 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
637 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
638 	    &ring_mem_ptr))
639 		goto cleanup;
640 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
641 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
642 	    &ring_mem_ptr))
643 		goto cleanup;
644 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
645 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
646 	    &ring_mem_ptr))
647 		goto cleanup;
648 
649 	/* Dynamic TX rings for actual data */
650 	sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
651 	    sizeof(struct bwfm_pci_msgring), KM_SLEEP);
652 	for (i = 0; i < sc->sc_max_flowrings; i++) {
653 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
654 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
655 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
656 	}
657 
658 	pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
659 	    0, 0, 0, "bwfmpl", NULL, IPL_NET);
660 
661 	/* Scratch and ring update buffers for firmware */
662 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
663 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
664 		goto cleanup;
665 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
666 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
667 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
668 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
669 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
670 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
671 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
672 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
673 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
674 
675 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
676 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
677 		goto cleanup;
678 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
679 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
680 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
681 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
682 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
683 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
684 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
685 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
686 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
687 
688 	if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
689 	    BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
690 		goto cleanup;
691 
692 	if (workqueue_create(&sc->flowring_wq, "bwfmflow",
693 	    bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
694 		goto cleanup;
695 
696 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
697 	bwfm_pci_intr_enable(sc);
698 
699 	/* Maps RX mbufs to a packet id and back. */
700 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
701 	sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
702 	    sizeof(struct bwfm_pci_buf), KM_SLEEP);
703 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
704 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
705 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
706 		    &sc->sc_rx_pkts.pkts[i].bb_map);
707 
708 	/* Maps TX mbufs to a packet id and back. */
709 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
710 	sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
711 	    * sizeof(struct bwfm_pci_buf), KM_SLEEP);
712 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
713 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
714 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
715 		    &sc->sc_tx_pkts.pkts[i].bb_map);
716 
717 	/*
718 	 * For whatever reason, could also be a bug somewhere in this
719 	 * driver, the firmware needs a bunch of RX buffers otherwise
720 	 * it won't send any RX complete messages.  64 buffers don't
721 	 * suffice, but 128 buffers are enough.
722 	 */
723 	if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
724 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
725 	if_rxr_init(&sc->sc_event_ring, 8, 8);
726 	bwfm_pci_fill_rx_rings(sc);
727 
728 
729 #ifdef BWFM_DEBUG
730 	sc->sc_console_readidx = 0;
731 	bwfm_pci_debug_console(sc);
732 #endif
733 
734 	sc->sc_ioctl_poll = 1;
735 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
736 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
737 	bwfm_attach(&sc->sc_sc);
738 	sc->sc_ioctl_poll = 0;
739 	return;
740 
741 cleanup:
742 	if (sc->flowring_wq != NULL)
743 		workqueue_destroy(sc->flowring_wq);
744 	if (sc->sc_ih != NULL) {
745 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
746 		pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
747 	}
748 	if (sc->sc_ioctl_buf)
749 		bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
750 	if (sc->sc_ringupd_buf)
751 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
752 	if (sc->sc_scratch_buf)
753 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
754 	if (sc->sc_rx_complete.ring)
755 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
756 	if (sc->sc_tx_complete.ring)
757 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
758 	if (sc->sc_ctrl_complete.ring)
759 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
760 	if (sc->sc_rxpost_submit.ring)
761 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
762 	if (sc->sc_ctrl_submit.ring)
763 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
764 	if (sc->sc_dma_idx_buf)
765 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
766 }
767 
768 int
769 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
770 {
771 	struct bwfm_softc *bwfm = (void *)sc;
772 	struct bwfm_core *core;
773 	uint32_t shared;
774 	int i;
775 
776 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
777 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
778 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
779 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
780 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
781 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
782 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
783 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
784 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
785 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
786 	}
787 
788 	for (i = 0; i < size; i++)
789 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
790 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
791 
792 	/* Firmware replaces this with a pointer once up. */
793 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
794 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
795 
796 	/* TODO: restore NVRAM */
797 
798 	/* Load reset vector from firmware and kickstart core. */
799 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
800 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
801 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
802 	}
803 	bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
804 
805 	for (i = 0; i < 40; i++) {
806 		delay(50 * 1000);
807 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
809 		if (shared)
810 			break;
811 	}
812 	if (!shared) {
813 		printf("%s: firmware did not come up\n", DEVNAME(sc));
814 		return 1;
815 	}
816 
817 	sc->sc_shared_address = shared;
818 	return 0;
819 }
820 
821 int
822 bwfm_pci_detach(device_t self, int flags)
823 {
824 	struct bwfm_pci_softc *sc = device_private(self);
825 
826 	bwfm_detach(&sc->sc_sc, flags);
827 
828 	/* FIXME: free RX buffers */
829 	/* FIXME: free TX buffers */
830 	/* FIXME: free more memory */
831 
832 	kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
833 	    * sizeof(struct bwfm_pci_msgring));
834 	pool_destroy(&sc->sc_flowring_pool);
835 
836 	workqueue_destroy(sc->flowring_wq);
837 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
838 	pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
839 	bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
840 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
841 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
842 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
843 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
844 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
845 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
846 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
847 	bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
848 	return 0;
849 }
850 
851 /* DMA code */
852 struct bwfm_pci_dmamem *
853 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
854 {
855 	struct bwfm_pci_dmamem *bdm;
856 	int nsegs;
857 
858 	bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
859 	bdm->bdm_size = size;
860 
861 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
862 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
863 		goto bdmfree;
864 
865 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
866 	    &nsegs, BUS_DMA_WAITOK) != 0)
867 		goto destroy;
868 
869 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
870 	    (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
871 		goto free;
872 
873 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
874 	    NULL, BUS_DMA_WAITOK) != 0)
875 		goto unmap;
876 
877 	bzero(bdm->bdm_kva, size);
878 
879 	return (bdm);
880 
881 unmap:
882 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
883 free:
884 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
885 destroy:
886 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
887 bdmfree:
888 	kmem_free(bdm, sizeof(*bdm));
889 
890 	return (NULL);
891 }
892 
893 void
894 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
895 {
896 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
897 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
898 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
899 	kmem_free(bdm, sizeof(*bdm));
900 }
901 
902 /*
903  * We need a simple mapping from a packet ID to mbufs, because when
904  * a transfer completed, we only know the ID so we have to look up
905  * the memory for the ID.  This simply looks for an empty slot.
906  */
907 int
908 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
909 {
910 	int i, idx;
911 
912 	idx = pkts->last + 1;
913 	for (i = 0; i < pkts->npkt; i++) {
914 		if (idx == pkts->npkt)
915 			idx = 0;
916 		if (pkts->pkts[idx].bb_m == NULL)
917 			return 0;
918 		idx++;
919 	}
920 	return ENOBUFS;
921 }
922 
923 int
924 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
925     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
926 {
927 	int i, idx;
928 
929 	idx = pkts->last + 1;
930 	for (i = 0; i < pkts->npkt; i++) {
931 		if (idx == pkts->npkt)
932 			idx = 0;
933 		if (pkts->pkts[idx].bb_m == NULL) {
934 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
935 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
936 				if (m_defrag(m, M_DONTWAIT))
937 					return EFBIG;
938 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
939 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
940 					return EFBIG;
941 			}
942 			pkts->last = idx;
943 			pkts->pkts[idx].bb_m = m;
944 			*pktid = idx;
945 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
946 			return 0;
947 		}
948 		idx++;
949 	}
950 	return ENOBUFS;
951 }
952 
953 struct mbuf *
954 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
955     uint32_t pktid)
956 {
957 	struct mbuf *m;
958 
959 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
960 		return NULL;
961 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
962 	m = pkts->pkts[pktid].bb_m;
963 	pkts->pkts[pktid].bb_m = NULL;
964 	return m;
965 }
966 
967 void
968 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
969 {
970 	bwfm_pci_fill_rx_buf_ring(sc);
971 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
972 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
973 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
974 	    MSGBUF_TYPE_EVENT_BUF_POST);
975 }
976 
977 void
978 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
979     uint32_t msgtype)
980 {
981 	struct msgbuf_rx_ioctl_resp_or_event *req;
982 	struct mbuf *m;
983 	uint32_t pktid;
984 	paddr_t paddr;
985 	int s, slots;
986 
987 	s = splnet();
988 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
989 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
990 			break;
991 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
992 		if (req == NULL)
993 			break;
994 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
995 		if (m == NULL) {
996 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
997 			break;
998 		}
999 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1000 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1001 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1002 			m_freem(m);
1003 			break;
1004 		}
1005 		memset(req, 0, sizeof(*req));
1006 		req->msg.msgtype = msgtype;
1007 		req->msg.request_id = htole32(pktid);
1008 		req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1009 		req->host_buf_addr.high_addr = htole32(paddr >> 32);
1010 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1011 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1012 	}
1013 	if_rxr_put(rxring, slots);
1014 	splx(s);
1015 }
1016 
1017 void
1018 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1019 {
1020 	struct msgbuf_rx_bufpost *req;
1021 	struct mbuf *m;
1022 	uint32_t pktid;
1023 	paddr_t paddr;
1024 	int s, slots;
1025 
1026 	s = splnet();
1027 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1028 	    slots > 0; slots--) {
1029 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1030 			break;
1031 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1032 		if (req == NULL)
1033 			break;
1034 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1035 		if (m == NULL) {
1036 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1037 			break;
1038 		}
1039 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1040 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1041 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1042 			m_freem(m);
1043 			break;
1044 		}
1045 		memset(req, 0, sizeof(*req));
1046 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1047 		req->msg.request_id = htole32(pktid);
1048 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1049 		req->data_buf_addr.high_addr = htole32(paddr >> 32);
1050 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1051 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1052 	}
1053 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1054 	splx(s);
1055 }
1056 
1057 int
1058 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1059     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1060     int idx, uint32_t idx_off, uint32_t *ring_mem)
1061 {
1062 	ring->w_idx_addr = w_idx + idx * idx_off;
1063 	ring->r_idx_addr = r_idx + idx * idx_off;
1064 	ring->nitem = nitem;
1065 	ring->itemsz = itemsz;
1066 	bwfm_pci_ring_write_rptr(sc, ring);
1067 	bwfm_pci_ring_write_wptr(sc, ring);
1068 
1069 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1070 	if (ring->ring == NULL)
1071 		return ENOMEM;
1072 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1073 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1074 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1075 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1076 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1077 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1078 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1079 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1080 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1081 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1082 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1083 	return 0;
1084 }
1085 
1086 int
1087 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1088     int nitem, size_t itemsz)
1089 {
1090 	ring->w_ptr = 0;
1091 	ring->r_ptr = 0;
1092 	ring->nitem = nitem;
1093 	ring->itemsz = itemsz;
1094 	bwfm_pci_ring_write_rptr(sc, ring);
1095 	bwfm_pci_ring_write_wptr(sc, ring);
1096 
1097 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1098 	if (ring->ring == NULL)
1099 		return ENOMEM;
1100 	return 0;
1101 }
1102 
1103 /* Ring helpers */
1104 void
1105 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1106     struct bwfm_pci_msgring *ring)
1107 {
1108 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1109 	    BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1110 }
1111 
1112 void
1113 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1114     struct bwfm_pci_msgring *ring)
1115 {
1116 	if (sc->sc_dma_idx_sz == 0) {
1117 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1118 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1119 	} else {
1120 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1121 		    + ring->r_idx_addr);
1122 	}
1123 }
1124 
1125 static u_int
1126 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1127 {
1128 	u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1129 
1130 	KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1131 			"rxr->rxr_inuse: %d\n"
1132 			"taken: %d\n"
1133 			"rxr->rxr_total: %d\n",
1134 			rxr->rxr_inuse, taken, rxr->rxr_total);
1135 	rxr->rxr_inuse += taken;
1136 
1137 	return taken;
1138 }
1139 
1140 static void
1141 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1142 {
1143 	KASSERTMSG(rxr->rxr_inuse >= n,
1144 			"rxr->rxr_inuse: %d\n"
1145 			"n: %d\n"
1146 			"rxr->rxr_total: %d\n",
1147 			rxr->rxr_inuse, n, rxr->rxr_total);
1148 
1149 	rxr->rxr_inuse -= n;
1150 }
1151 
1152 static void
1153 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1154 {
1155 	(void) lwm;
1156 
1157 	rxr->rxr_total = hwm;
1158 	rxr->rxr_inuse = 0;
1159 }
1160 
1161 void
1162 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1163     struct bwfm_pci_msgring *ring)
1164 {
1165 	if (sc->sc_dma_idx_sz == 0) {
1166 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1167 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1168 	} else {
1169 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1170 		    + ring->w_idx_addr);
1171 	}
1172 }
1173 
1174 void
1175 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1176     struct bwfm_pci_msgring *ring)
1177 {
1178 	if (sc->sc_dma_idx_sz == 0) {
1179 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1180 		    ring->r_idx_addr, ring->r_ptr);
1181 	} else {
1182 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1183 		    + ring->r_idx_addr) = ring->r_ptr;
1184 	}
1185 }
1186 
1187 void
1188 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1189     struct bwfm_pci_msgring *ring)
1190 {
1191 	if (sc->sc_dma_idx_sz == 0) {
1192 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1193 		    ring->w_idx_addr, ring->w_ptr);
1194 	} else {
1195 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1196 		    + ring->w_idx_addr) = ring->w_ptr;
1197 	}
1198 }
1199 
1200 /*
1201  * Retrieve a free descriptor to put new stuff in, but don't commit
1202  * to it yet so we can rollback later if any error occurs.
1203  */
1204 void *
1205 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1206     struct bwfm_pci_msgring *ring)
1207 {
1208 	int available;
1209 	char *ret;
1210 
1211 	bwfm_pci_ring_update_rptr(sc, ring);
1212 
1213 	if (ring->r_ptr > ring->w_ptr)
1214 		available = ring->r_ptr - ring->w_ptr;
1215 	else
1216 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1217 
1218 	if (available < 1)
1219 		return NULL;
1220 
1221 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1222 	ring->w_ptr += 1;
1223 	if (ring->w_ptr == ring->nitem)
1224 		ring->w_ptr = 0;
1225 	return ret;
1226 }
1227 
1228 void *
1229 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1230     struct bwfm_pci_msgring *ring, int count, int *avail)
1231 {
1232 	int available;
1233 	char *ret;
1234 
1235 	bwfm_pci_ring_update_rptr(sc, ring);
1236 
1237 	if (ring->r_ptr > ring->w_ptr)
1238 		available = ring->r_ptr - ring->w_ptr;
1239 	else
1240 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1241 
1242 	if (available < 1)
1243 		return NULL;
1244 
1245 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1246 	*avail = min(count, available - 1);
1247 	if (*avail + ring->w_ptr > ring->nitem)
1248 		*avail = ring->nitem - ring->w_ptr;
1249 	ring->w_ptr += *avail;
1250 	if (ring->w_ptr == ring->nitem)
1251 		ring->w_ptr = 0;
1252 	return ret;
1253 }
1254 
1255 /*
1256  * Read number of descriptors available (submitted by the firmware)
1257  * and retrieve pointer to first descriptor.
1258  */
1259 void *
1260 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1261     struct bwfm_pci_msgring *ring, int *avail)
1262 {
1263 	bwfm_pci_ring_update_wptr(sc, ring);
1264 
1265 	if (ring->w_ptr >= ring->r_ptr)
1266 		*avail = ring->w_ptr - ring->r_ptr;
1267 	else
1268 		*avail = ring->nitem - ring->r_ptr;
1269 
1270 	if (*avail == 0)
1271 		return NULL;
1272 
1273 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1274 }
1275 
1276 /*
1277  * Let firmware know we read N descriptors.
1278  */
1279 void
1280 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1281     struct bwfm_pci_msgring *ring, int nitem)
1282 {
1283 	ring->r_ptr += nitem;
1284 	if (ring->r_ptr == ring->nitem)
1285 		ring->r_ptr = 0;
1286 	bwfm_pci_ring_write_rptr(sc, ring);
1287 }
1288 
1289 /*
1290  * Let firmware know that we submitted some descriptors.
1291  */
1292 void
1293 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1294     struct bwfm_pci_msgring *ring)
1295 {
1296 	bwfm_pci_ring_write_wptr(sc, ring);
1297 	bwfm_pci_ring_bell(sc, ring);
1298 }
1299 
1300 /*
1301  * Rollback N descriptors in case we don't actually want
1302  * to commit to it.
1303  */
1304 void
1305 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1306     struct bwfm_pci_msgring *ring, int nitem)
1307 {
1308 	if (ring->w_ptr == 0)
1309 		ring->w_ptr = ring->nitem - nitem;
1310 	else
1311 		ring->w_ptr -= nitem;
1312 }
1313 
1314 /*
1315  * Foreach written descriptor on the ring, pass the descriptor to
1316  * a message handler and let the firmware know we handled it.
1317  */
1318 void
1319 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1320 {
1321 	char *buf;
1322 	int avail, processed;
1323 
1324 again:
1325 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1326 	if (buf == NULL)
1327 		return;
1328 
1329 	processed = 0;
1330 	while (avail) {
1331 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1332 		buf += ring->itemsz;
1333 		processed++;
1334 		if (processed == 48) {
1335 			bwfm_pci_ring_read_commit(sc, ring, processed);
1336 			processed = 0;
1337 		}
1338 		avail--;
1339 	}
1340 	if (processed)
1341 		bwfm_pci_ring_read_commit(sc, ring, processed);
1342 	if (ring->r_ptr == 0)
1343 		goto again;
1344 }
1345 
1346 void
1347 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1348 {
1349 	struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1350 	struct msgbuf_ioctl_resp_hdr *resp;
1351 	struct msgbuf_tx_status *tx;
1352 	struct msgbuf_rx_complete *rx;
1353 	struct msgbuf_rx_event *event;
1354 	struct msgbuf_common_hdr *msg;
1355 	struct msgbuf_flowring_create_resp *fcr;
1356 	struct msgbuf_flowring_delete_resp *fdr;
1357 	struct bwfm_pci_msgring *ring;
1358 	struct mbuf *m;
1359 	int flowid;
1360 
1361 	msg = (struct msgbuf_common_hdr *)buf;
1362 	switch (msg->msgtype)
1363 	{
1364 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1365 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1366 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1367 		if (flowid < 2)
1368 			break;
1369 		flowid -= 2;
1370 		if (flowid >= sc->sc_max_flowrings)
1371 			break;
1372 		ring = &sc->sc_flowrings[flowid];
1373 		if (ring->status != RING_OPENING)
1374 			break;
1375 		if (fcr->compl_hdr.status) {
1376 			printf("%s: failed to open flowring %d\n",
1377 			    DEVNAME(sc), flowid);
1378 			ring->status = RING_CLOSED;
1379 			if (ring->m) {
1380 				m_freem(ring->m);
1381 				ring->m = NULL;
1382 			}
1383 			ifp->if_flags &= ~IFF_OACTIVE;
1384 			ifp->if_start(ifp);
1385 			break;
1386 		}
1387 		ring->status = RING_OPEN;
1388 		if (ring->m != NULL) {
1389 			m = ring->m;
1390 			ring->m = NULL;
1391 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1392 				m_freem(ring->m);
1393 		}
1394 		ifp->if_flags &= ~IFF_OACTIVE;
1395 		ifp->if_start(ifp);
1396 		break;
1397 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1398 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1399 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1400 		if (flowid < 2)
1401 			break;
1402 		flowid -= 2;
1403 		if (flowid >= sc->sc_max_flowrings)
1404 			break;
1405 		ring = &sc->sc_flowrings[flowid];
1406 		if (ring->status != RING_CLOSING)
1407 			break;
1408 		if (fdr->compl_hdr.status) {
1409 			printf("%s: failed to delete flowring %d\n",
1410 			    DEVNAME(sc), flowid);
1411 			break;
1412 		}
1413 		bwfm_pci_dmamem_free(sc, ring->ring);
1414 		ring->status = RING_CLOSED;
1415 		break;
1416 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1417 		break;
1418 	case MSGBUF_TYPE_IOCTL_CMPLT:
1419 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1420 		sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1421 		sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1422 		sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1423 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1424 		bwfm_pci_fill_rx_rings(sc);
1425 		wakeup(&sc->sc_ioctl_buf);
1426 		break;
1427 	case MSGBUF_TYPE_WL_EVENT:
1428 		event = (struct msgbuf_rx_event *)buf;
1429 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1430 		    letoh32(event->msg.request_id));
1431 		if (m == NULL)
1432 			break;
1433 		m_adj(m, sc->sc_rx_dataoffset);
1434 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1435 		bwfm_rx(&sc->sc_sc, m);
1436 		if_rxr_put(&sc->sc_event_ring, 1);
1437 		bwfm_pci_fill_rx_rings(sc);
1438 		break;
1439 	case MSGBUF_TYPE_TX_STATUS:
1440 		tx = (struct msgbuf_tx_status *)buf;
1441 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1442 		    letoh32(tx->msg.request_id));
1443 		if (m == NULL)
1444 			break;
1445 		m_freem(m);
1446 		if (sc->sc_tx_pkts_full) {
1447 			sc->sc_tx_pkts_full = 0;
1448 			ifp->if_flags &= ~IFF_OACTIVE;
1449 			ifp->if_start(ifp);
1450 		}
1451 		break;
1452 	case MSGBUF_TYPE_RX_CMPLT:
1453 		rx = (struct msgbuf_rx_complete *)buf;
1454 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1455 		    letoh32(rx->msg.request_id));
1456 		if (m == NULL)
1457 			break;
1458 		if (letoh16(rx->data_offset))
1459 			m_adj(m, letoh16(rx->data_offset));
1460 		else if (sc->sc_rx_dataoffset)
1461 			m_adj(m, sc->sc_rx_dataoffset);
1462 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1463 		bwfm_rx(&sc->sc_sc, m);
1464 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1465 		bwfm_pci_fill_rx_rings(sc);
1466 		break;
1467 	default:
1468 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1469 		break;
1470 	}
1471 }
1472 
1473 /* Bus core helpers */
1474 void
1475 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1476 {
1477 	struct bwfm_softc *bwfm = (void *)sc;
1478 	struct bwfm_core *core;
1479 
1480 	core = bwfm_chip_get_core(bwfm, id);
1481 	if (core == NULL) {
1482 		printf("%s: could not find core to select", DEVNAME(sc));
1483 		return;
1484 	}
1485 
1486 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1487 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1488 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1489 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1490 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1491 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1492 }
1493 
1494 uint32_t
1495 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1496 {
1497 	struct bwfm_pci_softc *sc = (void *)bwfm;
1498 	uint32_t page, offset;
1499 
1500 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1501 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1502 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1503 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1504 }
1505 
1506 void
1507 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1508 {
1509 	struct bwfm_pci_softc *sc = (void *)bwfm;
1510 	uint32_t page, offset;
1511 
1512 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1513 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1514 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1515 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1516 }
1517 
1518 int
1519 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1520 {
1521 	return 0;
1522 }
1523 
1524 int
1525 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1526 {
1527 	struct bwfm_pci_softc *sc = (void *)bwfm;
1528 	struct bwfm_core *core;
1529 	uint32_t reg;
1530 	int i;
1531 
1532 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1533 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1534 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1535 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1536 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1537 
1538 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1539 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1540 	    BWFM_CHIP_REG_WATCHDOG, 4);
1541 	delay(100 * 1000);
1542 
1543 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1544 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1545 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1546 
1547 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1548 	if (core->co_rev <= 13) {
1549 		uint16_t cfg_offset[] = {
1550 		    BWFM_PCI_CFGREG_STATUS_CMD,
1551 		    BWFM_PCI_CFGREG_PM_CSR,
1552 		    BWFM_PCI_CFGREG_MSI_CAP,
1553 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1554 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1555 		    BWFM_PCI_CFGREG_MSI_DATA,
1556 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1557 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1558 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1559 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1560 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1561 		};
1562 
1563 		for (i = 0; i < nitems(cfg_offset); i++) {
1564 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1565 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1566 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1567 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1568 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1569 			    DEVNAME(sc), cfg_offset[i], reg));
1570 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1571 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1572 		}
1573 	}
1574 
1575 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1576 	    BWFM_PCI_PCIE2REG_MAILBOXINT);
1577 	if (reg != 0xffffffff)
1578 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1579 		    BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1580 
1581 	return 0;
1582 }
1583 
1584 void
1585 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1586 {
1587 	struct bwfm_pci_softc *sc = (void *)bwfm;
1588 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1589 }
1590 
1591 static int bwfm_pci_prio2fifo[8] = {
1592 	1, /* best effort */
1593 	0, /* IPTOS_PREC_IMMEDIATE */
1594 	0, /* IPTOS_PREC_PRIORITY */
1595 	1, /* IPTOS_PREC_FLASH */
1596 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1597 	2, /* IPTOS_PREC_CRITIC_ECP */
1598 	3, /* IPTOS_PREC_INTERNETCONTROL */
1599 	3, /* IPTOS_PREC_NETCONTROL */
1600 };
1601 
1602 int
1603 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1604 {
1605 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1606 	uint8_t *da = mtod(m, uint8_t *);
1607 	struct ether_header *eh;
1608 	int flowid, prio, fifo;
1609 	int i, found, ac;
1610 
1611 	/* No QoS for EAPOL frames. */
1612 	eh = mtod(m, struct ether_header *);
1613 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1614 	    M_WME_GETAC(m) : WME_AC_BE;
1615 
1616 	prio = ac;
1617 	fifo = bwfm_pci_prio2fifo[prio];
1618 
1619 	switch (ic->ic_opmode)
1620 	{
1621 	case IEEE80211_M_STA:
1622 		flowid = fifo;
1623 		break;
1624 #ifndef IEEE80211_STA_ONLY
1625 	case IEEE80211_M_HOSTAP:
1626 		if (ETHER_IS_MULTICAST(da))
1627 			da = __UNCONST(etherbroadcastaddr);
1628 		flowid = da[5] * 2 + fifo;
1629 		break;
1630 #endif
1631 	default:
1632 		printf("%s: state not supported\n", DEVNAME(sc));
1633 		return ENOBUFS;
1634 	}
1635 
1636 	found = 0;
1637 	flowid = flowid % sc->sc_max_flowrings;
1638 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1639 		if (ic->ic_opmode == IEEE80211_M_STA &&
1640 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1641 		    sc->sc_flowrings[flowid].fifo == fifo) {
1642 			found = 1;
1643 			break;
1644 		}
1645 #ifndef IEEE80211_STA_ONLY
1646 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1647 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1648 		    sc->sc_flowrings[flowid].fifo == fifo &&
1649 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1650 			found = 1;
1651 			break;
1652 		}
1653 #endif
1654 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1655 	}
1656 
1657 	if (found)
1658 		return flowid;
1659 
1660 	return -1;
1661 }
1662 
1663 void
1664 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1665 {
1666 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1667 	struct bwfm_cmd_flowring_create * cmd;
1668 	uint8_t *da = mtod(m, uint8_t *);
1669 	struct ether_header *eh;
1670 	struct bwfm_pci_msgring *ring;
1671 	int flowid, prio, fifo;
1672 	int i, found, ac;
1673 
1674 	cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1675 	if (__predict_false(cmd == NULL))
1676 		return;
1677 
1678 	/* No QoS for EAPOL frames. */
1679 	eh = mtod(m, struct ether_header *);
1680 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1681 	    M_WME_GETAC(m) : WME_AC_BE;
1682 
1683 	prio = ac;
1684 	fifo = bwfm_pci_prio2fifo[prio];
1685 
1686 	switch (ic->ic_opmode)
1687 	{
1688 	case IEEE80211_M_STA:
1689 		flowid = fifo;
1690 		break;
1691 #ifndef IEEE80211_STA_ONLY
1692 	case IEEE80211_M_HOSTAP:
1693 		if (ETHER_IS_MULTICAST(da))
1694 			da = __UNCONST(etherbroadcastaddr);
1695 		flowid = da[5] * 2 + fifo;
1696 		break;
1697 #endif
1698 	default:
1699 		printf("%s: state not supported\n", DEVNAME(sc));
1700 		return;
1701 	}
1702 
1703 	found = 0;
1704 	flowid = flowid % sc->sc_max_flowrings;
1705 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1706 		ring = &sc->sc_flowrings[flowid];
1707 		if (ring->status == RING_CLOSED) {
1708 			ring->status = RING_OPENING;
1709 			found = 1;
1710 			break;
1711 		}
1712 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1713 	}
1714 
1715 	/*
1716 	 * We cannot recover from that so far.  Only a stop/init
1717 	 * cycle can revive this if it ever happens at all.
1718 	 */
1719 	if (!found) {
1720 		printf("%s: no flowring available\n", DEVNAME(sc));
1721 		return;
1722 	}
1723 
1724 	cmd->sc = sc;
1725 	cmd->m = m;
1726 	cmd->prio = prio;
1727 	cmd->flowid = flowid;
1728 	workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1729 }
1730 
1731 void
1732 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1733 {
1734 	struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1735 	struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1736 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1737 	struct msgbuf_tx_flowring_create_req *req;
1738 	struct bwfm_pci_msgring *ring;
1739 	uint8_t *da, *sa;
1740 
1741 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1742 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1743 
1744 	ring = &sc->sc_flowrings[cmd->flowid];
1745 	if (ring->status != RING_OPENING) {
1746 		printf("%s: flowring not opening\n", DEVNAME(sc));
1747 		return;
1748 	}
1749 
1750 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1751 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1752 		return;
1753 	}
1754 
1755 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1756 	if (req == NULL) {
1757 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1758 		return;
1759 	}
1760 
1761 	ring->status = RING_OPENING;
1762 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1763 	ring->m = cmd->m;
1764 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1765 #ifndef IEEE80211_STA_ONLY
1766 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1767 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1768 #endif
1769 
1770 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1771 	req->msg.ifidx = 0;
1772 	req->msg.request_id = 0;
1773 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1774 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1775 	memcpy(req->da, da, ETHER_ADDR_LEN);
1776 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1777 	req->flow_ring_addr.high_addr =
1778 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1779 	req->flow_ring_addr.low_addr =
1780 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1781 	req->max_items = letoh16(512);
1782 	req->len_item = letoh16(48);
1783 
1784 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1785 	pool_put(&sc->sc_flowring_pool, cmd);
1786 }
1787 
1788 void
1789 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1790 {
1791 	struct msgbuf_tx_flowring_delete_req *req;
1792 	struct bwfm_pci_msgring *ring;
1793 
1794 	ring = &sc->sc_flowrings[flowid];
1795 	if (ring->status != RING_OPEN) {
1796 		printf("%s: flowring not open\n", DEVNAME(sc));
1797 		return;
1798 	}
1799 
1800 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1801 	if (req == NULL) {
1802 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1803 		return;
1804 	}
1805 
1806 	ring->status = RING_CLOSING;
1807 
1808 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1809 	req->msg.ifidx = 0;
1810 	req->msg.request_id = 0;
1811 	req->flow_ring_id = letoh16(flowid + 2);
1812 	req->reason = 0;
1813 
1814 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1815 }
1816 
1817 void
1818 bwfm_pci_stop(struct bwfm_softc *bwfm)
1819 {
1820 	struct bwfm_pci_softc *sc = (void *)bwfm;
1821 	struct bwfm_pci_msgring *ring;
1822 	int i;
1823 
1824 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1825 		ring = &sc->sc_flowrings[i];
1826 		if (ring->status == RING_OPEN)
1827 			bwfm_pci_flowring_delete(sc, i);
1828 	}
1829 }
1830 
1831 int
1832 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1833 {
1834 	struct bwfm_pci_softc *sc = (void *)bwfm;
1835 	struct bwfm_pci_msgring *ring;
1836 	int i;
1837 
1838 	/* If we are transitioning, we cannot send. */
1839 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1840 		ring = &sc->sc_flowrings[i];
1841 		if (ring->status == RING_OPENING)
1842 			return ENOBUFS;
1843 	}
1844 
1845 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1846 		sc->sc_tx_pkts_full = 1;
1847 		return ENOBUFS;
1848 	}
1849 
1850 	return 0;
1851 }
1852 
1853 int
1854 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
1855 {
1856 	struct bwfm_pci_softc *sc = (void *)bwfm;
1857 	struct bwfm_pci_msgring *ring;
1858 	struct msgbuf_tx_msghdr *tx;
1859 	uint32_t pktid;
1860 	paddr_t paddr;
1861 	struct ether_header *eh;
1862 	int flowid, ret, ac;
1863 
1864 	flowid = bwfm_pci_flowring_lookup(sc, m);
1865 	if (flowid < 0) {
1866 		/*
1867 		 * We cannot send the packet right now as there is
1868 		 * no flowring yet.  The flowring will be created
1869 		 * asynchronously.  While the ring is transitioning
1870 		 * the TX check will tell the upper layers that we
1871 		 * cannot send packets right now.  When the flowring
1872 		 * is created the queue will be restarted and this
1873 		 * mbuf will be transmitted.
1874 		 */
1875 		bwfm_pci_flowring_create(sc, m);
1876 		return 0;
1877 	}
1878 
1879 	ring = &sc->sc_flowrings[flowid];
1880 	if (ring->status == RING_OPENING ||
1881 	    ring->status == RING_CLOSING) {
1882 		printf("%s: tried to use a flow that was "
1883 		    "transitioning in status %d\n",
1884 		    DEVNAME(sc), ring->status);
1885 		return ENOBUFS;
1886 	}
1887 
1888 	tx = bwfm_pci_ring_write_reserve(sc, ring);
1889 	if (tx == NULL)
1890 		return ENOBUFS;
1891 
1892 	/* No QoS for EAPOL frames. */
1893 	eh = mtod(m, struct ether_header *);
1894 	ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1895 	    M_WME_GETAC(m) : WME_AC_BE;
1896 
1897 	memset(tx, 0, sizeof(*tx));
1898 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1899 	tx->msg.ifidx = 0;
1900 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1901 	tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1902 	tx->seg_cnt = 1;
1903 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
1904 
1905 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
1906 	if (ret) {
1907 		if (ret == ENOBUFS) {
1908 			printf("%s: no pktid available for TX\n",
1909 			    DEVNAME(sc));
1910 			sc->sc_tx_pkts_full = 1;
1911 		}
1912 		bwfm_pci_ring_write_cancel(sc, ring, 1);
1913 		return ret;
1914 	}
1915 	paddr += ETHER_HDR_LEN;
1916 
1917 	tx->msg.request_id = htole32(pktid);
1918 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
1919 	tx->data_buf_addr.high_addr = htole32(paddr >> 32);
1920 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1921 
1922 	bwfm_pci_ring_write_commit(sc, ring);
1923 	return 0;
1924 }
1925 
1926 #ifdef BWFM_DEBUG
1927 void
1928 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1929 {
1930 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1931 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1932 
1933 	if (newidx != sc->sc_console_readidx)
1934 		DPRINTFN(3, ("BWFM CONSOLE: "));
1935 	while (newidx != sc->sc_console_readidx) {
1936 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1937 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
1938 		sc->sc_console_readidx++;
1939 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
1940 			sc->sc_console_readidx = 0;
1941 		if (ch == '\r')
1942 			continue;
1943 		DPRINTFN(3, ("%c", ch));
1944 	}
1945 }
1946 #endif
1947 
1948 int
1949 bwfm_pci_intr(void *v)
1950 {
1951 	struct bwfm_pci_softc *sc = (void *)v;
1952 	uint32_t status;
1953 
1954 	if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1955 	    BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1956 		return 0;
1957 
1958 	bwfm_pci_intr_disable(sc);
1959 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1960 	    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
1961 
1962 	if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1963 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
1964 		printf("%s: handle MB data\n", __func__);
1965 
1966 	if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
1967 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
1968 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
1969 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
1970 	}
1971 
1972 #ifdef BWFM_DEBUG
1973 	bwfm_pci_debug_console(sc);
1974 #endif
1975 
1976 	bwfm_pci_intr_enable(sc);
1977 	return 1;
1978 }
1979 
1980 void
1981 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
1982 {
1983 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1984 	    BWFM_PCI_PCIE2REG_MAILBOXMASK,
1985 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1986 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
1987 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
1988 }
1989 
1990 void
1991 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
1992 {
1993 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1994 	    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
1995 }
1996 
1997 /* Msgbuf protocol implementation */
1998 int
1999 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2000     int cmd, char *buf, size_t *len)
2001 {
2002 	struct bwfm_pci_softc *sc = (void *)bwfm;
2003 	struct msgbuf_ioctl_req_hdr *req;
2004 	struct mbuf *m;
2005 	size_t buflen;
2006 	int s;
2007 
2008 	s = splnet();
2009 	sc->sc_ioctl_resp_pktid = -1;
2010 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2011 	if (req == NULL) {
2012 		printf("%s: cannot reserve for write\n", DEVNAME(sc));
2013 		splx(s);
2014 		return 1;
2015 	}
2016 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2017 	req->msg.ifidx = 0;
2018 	req->msg.flags = 0;
2019 	req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2020 	req->cmd = htole32(cmd);
2021 	req->output_buf_len = htole16(*len);
2022 	req->trans_id = htole16(sc->sc_ioctl_reqid++);
2023 
2024 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2025 	req->input_buf_len = htole16(buflen);
2026 	req->req_buf_addr.high_addr =
2027 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2028 	req->req_buf_addr.low_addr =
2029 	    htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2030 	if (buf)
2031 		memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2032 	else
2033 		memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2034 
2035 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2036 	splx(s);
2037 
2038 	if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2039 		printf("%s: timeout waiting for ioctl response\n",
2040 		    DEVNAME(sc));
2041 		return 1;
2042 	}
2043 
2044 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2045 	if (m == NULL)
2046 		return 1;
2047 
2048 	*len = min(buflen, sc->sc_ioctl_resp_ret_len);
2049 	if (buf)
2050 		memcpy(buf, mtod(m, char *), *len);
2051 	m_freem(m);
2052 	splx(s);
2053 
2054 	return 0;
2055 }
2056 
2057 int
2058 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2059     int cmd, char *buf, size_t len)
2060 {
2061 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2062 }
2063